diff --git a/airbyte_cdk/connector_builder/connector_builder_handler.py b/airbyte_cdk/connector_builder/connector_builder_handler.py index a7d2163a9..513546737 100644 --- a/airbyte_cdk/connector_builder/connector_builder_handler.py +++ b/airbyte_cdk/connector_builder/connector_builder_handler.py @@ -3,8 +3,8 @@ # -from dataclasses import asdict -from typing import Any, Dict, List, Mapping, Optional +from dataclasses import asdict, dataclass, field +from typing import Any, ClassVar, Dict, List, Mapping from airbyte_cdk.connector_builder.test_reader import TestReader from airbyte_cdk.models import ( @@ -15,32 +15,45 @@ Type, ) from airbyte_cdk.models import Type as MessageType -from airbyte_cdk.sources.declarative.concurrent_declarative_source import ( - ConcurrentDeclarativeSource, - TestLimits, -) from airbyte_cdk.sources.declarative.declarative_source import DeclarativeSource from airbyte_cdk.sources.declarative.manifest_declarative_source import ManifestDeclarativeSource +from airbyte_cdk.sources.declarative.parsers.model_to_component_factory import ( + ModelToComponentFactory, +) from airbyte_cdk.utils.airbyte_secrets_utils import filter_secrets from airbyte_cdk.utils.datetime_helpers import ab_datetime_now from airbyte_cdk.utils.traced_exception import AirbyteTracedException +DEFAULT_MAXIMUM_NUMBER_OF_PAGES_PER_SLICE = 5 +DEFAULT_MAXIMUM_NUMBER_OF_SLICES = 5 +DEFAULT_MAXIMUM_RECORDS = 100 +DEFAULT_MAXIMUM_STREAMS = 100 + MAX_PAGES_PER_SLICE_KEY = "max_pages_per_slice" MAX_SLICES_KEY = "max_slices" MAX_RECORDS_KEY = "max_records" MAX_STREAMS_KEY = "max_streams" +@dataclass +class TestLimits: + __test__: ClassVar[bool] = False # Tell Pytest this is not a Pytest class, despite its name + + max_records: int = field(default=DEFAULT_MAXIMUM_RECORDS) + max_pages_per_slice: int = field(default=DEFAULT_MAXIMUM_NUMBER_OF_PAGES_PER_SLICE) + max_slices: int = field(default=DEFAULT_MAXIMUM_NUMBER_OF_SLICES) + max_streams: int = field(default=DEFAULT_MAXIMUM_STREAMS) + + def get_limits(config: Mapping[str, Any]) -> TestLimits: command_config = config.get("__test_read_config", {}) - return TestLimits( - max_records=command_config.get(MAX_RECORDS_KEY, TestLimits.DEFAULT_MAX_RECORDS), - max_pages_per_slice=command_config.get( - MAX_PAGES_PER_SLICE_KEY, TestLimits.DEFAULT_MAX_PAGES_PER_SLICE - ), - max_slices=command_config.get(MAX_SLICES_KEY, TestLimits.DEFAULT_MAX_SLICES), - max_streams=command_config.get(MAX_STREAMS_KEY, TestLimits.DEFAULT_MAX_STREAMS), + max_pages_per_slice = ( + command_config.get(MAX_PAGES_PER_SLICE_KEY) or DEFAULT_MAXIMUM_NUMBER_OF_PAGES_PER_SLICE ) + max_slices = command_config.get(MAX_SLICES_KEY) or DEFAULT_MAXIMUM_NUMBER_OF_SLICES + max_records = command_config.get(MAX_RECORDS_KEY) or DEFAULT_MAXIMUM_RECORDS + max_streams = command_config.get(MAX_STREAMS_KEY) or DEFAULT_MAXIMUM_STREAMS + return TestLimits(max_records, max_pages_per_slice, max_slices, max_streams) def should_migrate_manifest(config: Mapping[str, Any]) -> bool: @@ -62,30 +75,21 @@ def should_normalize_manifest(config: Mapping[str, Any]) -> bool: return config.get("__should_normalize", False) -def create_source( - config: Mapping[str, Any], - limits: TestLimits, - catalog: Optional[ConfiguredAirbyteCatalog], - state: Optional[List[AirbyteStateMessage]], -) -> ConcurrentDeclarativeSource[Optional[List[AirbyteStateMessage]]]: +def create_source(config: Mapping[str, Any], limits: TestLimits) -> ManifestDeclarativeSource: manifest = config["__injected_declarative_manifest"] - - # We enforce a concurrency level of 1 so that the stream is processed on a single thread - # to retain ordering for the grouping of the builder message responses. - if "concurrency_level" in manifest: - manifest["concurrency_level"]["default_concurrency"] = 1 - else: - manifest["concurrency_level"] = {"type": "ConcurrencyLevel", "default_concurrency": 1} - - return ConcurrentDeclarativeSource( - catalog=catalog, + return ManifestDeclarativeSource( config=config, - state=state, - source_config=manifest, emit_connector_builder_messages=True, + source_config=manifest, migrate_manifest=should_migrate_manifest(config), normalize_manifest=should_normalize_manifest(config), - limits=limits, + component_factory=ModelToComponentFactory( + emit_connector_builder_messages=True, + limit_pages_fetched_per_slice=limits.max_pages_per_slice, + limit_slices_fetched=limits.max_slices, + disable_retries=True, + disable_cache=True, + ), ) diff --git a/airbyte_cdk/connector_builder/main.py b/airbyte_cdk/connector_builder/main.py index 22be81c82..80cf4afa9 100644 --- a/airbyte_cdk/connector_builder/main.py +++ b/airbyte_cdk/connector_builder/main.py @@ -91,12 +91,12 @@ def handle_connector_builder_request( def handle_request(args: List[str]) -> str: command, config, catalog, state = get_config_and_catalog_from_args(args) limits = get_limits(config) - source = create_source(config=config, limits=limits, catalog=catalog, state=state) - return orjson.dumps( # type: ignore[no-any-return] # Serializer.dump() always returns AirbyteMessage + source = create_source(config, limits) + return orjson.dumps( AirbyteMessageSerializer.dump( handle_connector_builder_request(source, command, config, catalog, state, limits) ) - ).decode() + ).decode() # type: ignore[no-any-return] # Serializer.dump() always returns AirbyteMessage if __name__ == "__main__": diff --git a/airbyte_cdk/connector_builder/test_reader/helpers.py b/airbyte_cdk/connector_builder/test_reader/helpers.py index 3cc634ccb..9154610cc 100644 --- a/airbyte_cdk/connector_builder/test_reader/helpers.py +++ b/airbyte_cdk/connector_builder/test_reader/helpers.py @@ -5,7 +5,7 @@ import json from copy import deepcopy from json import JSONDecodeError -from typing import Any, Dict, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional from airbyte_cdk.connector_builder.models import ( AuxiliaryRequest, @@ -17,8 +17,6 @@ from airbyte_cdk.models import ( AirbyteLogMessage, AirbyteMessage, - AirbyteStateBlob, - AirbyteStateMessage, OrchestratorType, TraceType, ) @@ -468,7 +466,7 @@ def handle_current_slice( return StreamReadSlices( pages=current_slice_pages, slice_descriptor=current_slice_descriptor, - state=[convert_state_blob_to_mapping(latest_state_message)] if latest_state_message else [], + state=[latest_state_message] if latest_state_message else [], auxiliary_requests=auxiliary_requests if auxiliary_requests else [], ) @@ -720,23 +718,3 @@ def get_auxiliary_request_type(stream: dict, http: dict) -> str: # type: ignore Determines the type of the auxiliary request based on the stream and HTTP properties. """ return "PARENT_STREAM" if stream.get("is_substream", False) else str(http.get("type", None)) - - -def convert_state_blob_to_mapping( - state_message: Union[AirbyteStateMessage, Dict[str, Any]], -) -> Dict[str, Any]: - """ - The AirbyteStreamState stores state as an AirbyteStateBlob which deceivingly is not - a dictionary, but rather a list of kwargs fields. This in turn causes it to not be - properly turned into a dictionary when translating this back into response output - by the connector_builder_handler using asdict() - """ - - if isinstance(state_message, AirbyteStateMessage) and state_message.stream: - state_value = state_message.stream.stream_state - if isinstance(state_value, AirbyteStateBlob): - state_value_mapping = {k: v for k, v in state_value.__dict__.items()} - state_message.stream.stream_state = state_value_mapping # type: ignore # we intentionally set this as a Dict so that StreamReadSlices is translated properly in the resulting HTTP response - return state_message # type: ignore # See above, but when this is an AirbyteStateMessage we must convert AirbyteStateBlob to a Dict - else: - return state_message # type: ignore # This is guaranteed to be a Dict since we check isinstance AirbyteStateMessage above diff --git a/airbyte_cdk/connector_builder/test_reader/message_grouper.py b/airbyte_cdk/connector_builder/test_reader/message_grouper.py index 999b54b72..33b594451 100644 --- a/airbyte_cdk/connector_builder/test_reader/message_grouper.py +++ b/airbyte_cdk/connector_builder/test_reader/message_grouper.py @@ -95,7 +95,7 @@ def get_message_groups( latest_state_message: Optional[Dict[str, Any]] = None slice_auxiliary_requests: List[AuxiliaryRequest] = [] - while message := next(messages, None): + while records_count < limit and (message := next(messages, None)): json_message = airbyte_message_to_json(message) if is_page_http_request_for_different_stream(json_message, stream_name): diff --git a/airbyte_cdk/sources/concurrent_source/concurrent_read_processor.py b/airbyte_cdk/sources/concurrent_source/concurrent_read_processor.py index 905999a4d..09bd921e1 100644 --- a/airbyte_cdk/sources/concurrent_source/concurrent_read_processor.py +++ b/airbyte_cdk/sources/concurrent_source/concurrent_read_processor.py @@ -2,7 +2,6 @@ # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # import logging -import os from typing import Dict, Iterable, List, Optional, Set from airbyte_cdk.exception_handler import generate_failed_streams_error_message @@ -96,14 +95,11 @@ def on_partition(self, partition: Partition) -> None: """ stream_name = partition.stream_name() self._streams_to_running_partitions[stream_name].add(partition) - cursor = self._stream_name_to_instance[stream_name].cursor if self._slice_logger.should_log_slice_message(self._logger): self._message_repository.emit_message( self._slice_logger.create_slice_log_message(partition.to_slice()) ) - self._thread_pool_manager.submit( - self._partition_reader.process_partition, partition, cursor - ) + self._thread_pool_manager.submit(self._partition_reader.process_partition, partition) def on_partition_complete_sentinel( self, sentinel: PartitionCompleteSentinel @@ -116,16 +112,26 @@ def on_partition_complete_sentinel( """ partition = sentinel.partition - partitions_running = self._streams_to_running_partitions[partition.stream_name()] - if partition in partitions_running: - partitions_running.remove(partition) - # If all partitions were generated and this was the last one, the stream is done - if ( - partition.stream_name() not in self._streams_currently_generating_partitions - and len(partitions_running) == 0 - ): - yield from self._on_stream_is_done(partition.stream_name()) - yield from self._message_repository.consume_queue() + try: + if sentinel.is_successful: + stream = self._stream_name_to_instance[partition.stream_name()] + stream.cursor.close_partition(partition) + except Exception as exception: + self._flag_exception(partition.stream_name(), exception) + yield AirbyteTracedException.from_exception( + exception, stream_descriptor=StreamDescriptor(name=partition.stream_name()) + ).as_sanitized_airbyte_message() + finally: + partitions_running = self._streams_to_running_partitions[partition.stream_name()] + if partition in partitions_running: + partitions_running.remove(partition) + # If all partitions were generated and this was the last one, the stream is done + if ( + partition.stream_name() not in self._streams_currently_generating_partitions + and len(partitions_running) == 0 + ): + yield from self._on_stream_is_done(partition.stream_name()) + yield from self._message_repository.consume_queue() def on_record(self, record: Record) -> Iterable[AirbyteMessage]: """ @@ -154,6 +160,7 @@ def on_record(self, record: Record) -> Iterable[AirbyteMessage]: stream.as_airbyte_stream(), AirbyteStreamStatus.RUNNING ) self._record_counter[stream.name] += 1 + stream.cursor.observe(record) yield message yield from self._message_repository.consume_queue() diff --git a/airbyte_cdk/sources/concurrent_source/concurrent_source.py b/airbyte_cdk/sources/concurrent_source/concurrent_source.py index de2d93523..ffdee2dc1 100644 --- a/airbyte_cdk/sources/concurrent_source/concurrent_source.py +++ b/airbyte_cdk/sources/concurrent_source/concurrent_source.py @@ -1,11 +1,10 @@ # # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # - import concurrent import logging from queue import Queue -from typing import Iterable, Iterator, List, Optional +from typing import Iterable, Iterator, List from airbyte_cdk.models import AirbyteMessage from airbyte_cdk.sources.concurrent_source.concurrent_read_processor import ConcurrentReadProcessor @@ -17,7 +16,7 @@ from airbyte_cdk.sources.message import InMemoryMessageRepository, MessageRepository from airbyte_cdk.sources.streams.concurrent.abstract_stream import AbstractStream from airbyte_cdk.sources.streams.concurrent.partition_enqueuer import PartitionEnqueuer -from airbyte_cdk.sources.streams.concurrent.partition_reader import PartitionLogger, PartitionReader +from airbyte_cdk.sources.streams.concurrent.partition_reader import PartitionReader from airbyte_cdk.sources.streams.concurrent.partitions.partition import Partition from airbyte_cdk.sources.streams.concurrent.partitions.types import ( PartitionCompleteSentinel, @@ -44,7 +43,6 @@ def create( logger: logging.Logger, slice_logger: SliceLogger, message_repository: MessageRepository, - queue: Optional[Queue[QueueItem]] = None, timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS, ) -> "ConcurrentSource": is_single_threaded = initial_number_of_partitions_to_generate == 1 and num_workers == 1 @@ -61,13 +59,12 @@ def create( logger, ) return ConcurrentSource( - threadpool=threadpool, - logger=logger, - slice_logger=slice_logger, - queue=queue, - message_repository=message_repository, - initial_number_partitions_to_generate=initial_number_of_partitions_to_generate, - timeout_seconds=timeout_seconds, + threadpool, + logger, + slice_logger, + message_repository, + initial_number_of_partitions_to_generate, + timeout_seconds, ) def __init__( @@ -75,7 +72,6 @@ def __init__( threadpool: ThreadPoolManager, logger: logging.Logger, slice_logger: SliceLogger = DebugSliceLogger(), - queue: Optional[Queue[QueueItem]] = None, message_repository: MessageRepository = InMemoryMessageRepository(), initial_number_partitions_to_generate: int = 1, timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS, @@ -95,28 +91,25 @@ def __init__( self._initial_number_partitions_to_generate = initial_number_partitions_to_generate self._timeout_seconds = timeout_seconds - # We set a maxsize to for the main thread to process record items when the queue size grows. This assumes that there are less - # threads generating partitions that than are max number of workers. If it weren't the case, we could have threads only generating - # partitions which would fill the queue. This number is arbitrarily set to 10_000 but will probably need to be changed given more - # information and might even need to be configurable depending on the source - self._queue = queue or Queue(maxsize=10_000) - def read( self, streams: List[AbstractStream], ) -> Iterator[AirbyteMessage]: self._logger.info("Starting syncing") + + # We set a maxsize to for the main thread to process record items when the queue size grows. This assumes that there are less + # threads generating partitions that than are max number of workers. If it weren't the case, we could have threads only generating + # partitions which would fill the queue. This number is arbitrarily set to 10_000 but will probably need to be changed given more + # information and might even need to be configurable depending on the source + queue: Queue[QueueItem] = Queue(maxsize=10_000) concurrent_stream_processor = ConcurrentReadProcessor( streams, - PartitionEnqueuer(self._queue, self._threadpool), + PartitionEnqueuer(queue, self._threadpool), self._threadpool, self._logger, self._slice_logger, self._message_repository, - PartitionReader( - self._queue, - PartitionLogger(self._slice_logger, self._logger, self._message_repository), - ), + PartitionReader(queue), ) # Enqueue initial partition generation tasks @@ -124,7 +117,7 @@ def read( # Read from the queue until all partitions were generated and read yield from self._consume_from_queue( - self._queue, + queue, concurrent_stream_processor, ) self._threadpool.check_for_errors_and_shutdown() @@ -148,10 +141,7 @@ def _consume_from_queue( airbyte_message_or_record_or_exception, concurrent_stream_processor, ) - # In the event that a partition raises an exception, anything remaining in - # the queue will be missed because is_done() can raise an exception and exit - # out of this loop before remaining items are consumed - if queue.empty() and concurrent_stream_processor.is_done(): + if concurrent_stream_processor.is_done() and queue.empty(): # all partitions were generated and processed. we're done here break @@ -171,7 +161,5 @@ def _handle_item( yield from concurrent_stream_processor.on_partition_complete_sentinel(queue_item) elif isinstance(queue_item, Record): yield from concurrent_stream_processor.on_record(queue_item) - elif isinstance(queue_item, AirbyteMessage): - yield queue_item else: raise ValueError(f"Unknown queue item type: {type(queue_item)}") diff --git a/airbyte_cdk/sources/declarative/concurrent_declarative_source.py b/airbyte_cdk/sources/declarative/concurrent_declarative_source.py index 9a651514b..720934a11 100644 --- a/airbyte_cdk/sources/declarative/concurrent_declarative_source.py +++ b/airbyte_cdk/sources/declarative/concurrent_declarative_source.py @@ -3,22 +3,7 @@ # import logging -from dataclasses import dataclass, field -from queue import Queue -from typing import ( - Any, - ClassVar, - Generic, - Iterator, - List, - Mapping, - MutableMapping, - Optional, - Tuple, - Union, -) - -from airbyte_protocol_dataclasses.models import Level +from typing import Any, Generic, Iterator, List, Mapping, MutableMapping, Optional, Tuple, Union from airbyte_cdk.models import ( AirbyteCatalog, @@ -58,8 +43,6 @@ StreamSlicerPartitionGenerator, ) from airbyte_cdk.sources.declarative.types import ConnectionDefinition -from airbyte_cdk.sources.message.concurrent_repository import ConcurrentMessageRepository -from airbyte_cdk.sources.message.repository import InMemoryMessageRepository, MessageRepository from airbyte_cdk.sources.source import TState from airbyte_cdk.sources.streams import Stream from airbyte_cdk.sources.streams.concurrent.abstract_stream import AbstractStream @@ -67,22 +50,6 @@ from airbyte_cdk.sources.streams.concurrent.cursor import ConcurrentCursor, FinalStateCursor from airbyte_cdk.sources.streams.concurrent.default_stream import DefaultStream from airbyte_cdk.sources.streams.concurrent.helpers import get_primary_key_from_stream -from airbyte_cdk.sources.streams.concurrent.partitions.types import QueueItem - - -@dataclass -class TestLimits: - __test__: ClassVar[bool] = False # Tell Pytest this is not a Pytest class, despite its name - - DEFAULT_MAX_PAGES_PER_SLICE: ClassVar[int] = 5 - DEFAULT_MAX_SLICES: ClassVar[int] = 5 - DEFAULT_MAX_RECORDS: ClassVar[int] = 100 - DEFAULT_MAX_STREAMS: ClassVar[int] = 100 - - max_records: int = field(default=DEFAULT_MAX_RECORDS) - max_pages_per_slice: int = field(default=DEFAULT_MAX_PAGES_PER_SLICE) - max_slices: int = field(default=DEFAULT_MAX_SLICES) - max_streams: int = field(default=DEFAULT_MAX_STREAMS) class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]): @@ -98,9 +65,7 @@ def __init__( source_config: ConnectionDefinition, debug: bool = False, emit_connector_builder_messages: bool = False, - migrate_manifest: bool = False, - normalize_manifest: bool = False, - limits: Optional[TestLimits] = None, + component_factory: Optional[ModelToComponentFactory] = None, config_path: Optional[str] = None, **kwargs: Any, ) -> None: @@ -108,39 +73,21 @@ def __init__( # no longer needs to store the original incoming state. But maybe there's an edge case? self._connector_state_manager = ConnectorStateManager(state=state) # type: ignore # state is always in the form of List[AirbyteStateMessage]. The ConnectorStateManager should use generics, but this can be done later - # We set a maxsize to for the main thread to process record items when the queue size grows. This assumes that there are less - # threads generating partitions that than are max number of workers. If it weren't the case, we could have threads only generating - # partitions which would fill the queue. This number is arbitrarily set to 10_000 but will probably need to be changed given more - # information and might even need to be configurable depending on the source - queue: Queue[QueueItem] = Queue(maxsize=10_000) - message_repository = InMemoryMessageRepository( - Level.DEBUG if emit_connector_builder_messages else Level.INFO - ) - # To reduce the complexity of the concurrent framework, we are not enabling RFR with synthetic # cursors. We do this by no longer automatically instantiating RFR cursors when converting # the declarative models into runtime components. Concurrent sources will continue to checkpoint # incremental streams running in full refresh. - component_factory = ModelToComponentFactory( + component_factory = component_factory or ModelToComponentFactory( emit_connector_builder_messages=emit_connector_builder_messages, - message_repository=ConcurrentMessageRepository(queue, message_repository), connector_state_manager=self._connector_state_manager, max_concurrent_async_job_count=source_config.get("max_concurrent_async_job_count"), - limit_pages_fetched_per_slice=limits.max_pages_per_slice if limits else None, - limit_slices_fetched=limits.max_slices if limits else None, - disable_retries=True if limits else False, - disable_cache=True if limits else False, ) - self._limits = limits - super().__init__( source_config=source_config, config=config, debug=debug, emit_connector_builder_messages=emit_connector_builder_messages, - migrate_manifest=migrate_manifest, - normalize_manifest=normalize_manifest, component_factory=component_factory, config_path=config_path, ) @@ -170,7 +117,6 @@ def __init__( initial_number_of_partitions_to_generate=initial_number_of_partitions_to_generate, logger=self.logger, slice_logger=self._slice_logger, - queue=queue, message_repository=self.message_repository, ) @@ -334,14 +280,8 @@ def _group_streams( schema_loader=declarative_stream._schema_loader, # type: ignore # We are accessing the private property but the public one is optional and we will remove this code soonish retriever=retriever, message_repository=self.message_repository, - max_records_limit=self._limits.max_records - if self._limits - else None, ), stream_slicer=declarative_stream.retriever.stream_slicer, - slice_limit=self._limits.max_slices - if self._limits - else None, # technically not needed because create_declarative_stream() -> create_simple_retriever() will apply the decorator. But for consistency and depending how we build create_default_stream, this may be needed later ) else: if ( @@ -371,12 +311,8 @@ def _group_streams( schema_loader=declarative_stream._schema_loader, # type: ignore # We are accessing the private property but the public one is optional and we will remove this code soonish retriever=retriever, message_repository=self.message_repository, - max_records_limit=self._limits.max_records - if self._limits - else None, ), stream_slicer=cursor, - slice_limit=self._limits.max_slices if self._limits else None, ) concurrent_streams.append( @@ -405,12 +341,8 @@ def _group_streams( schema_loader=declarative_stream._schema_loader, # type: ignore # We are accessing the private property but the public one is optional and we will remove this code soonish retriever=declarative_stream.retriever, message_repository=self.message_repository, - max_records_limit=self._limits.max_records if self._limits else None, ), declarative_stream.retriever.stream_slicer, - slice_limit=self._limits.max_slices - if self._limits - else None, # technically not needed because create_declarative_stream() -> create_simple_retriever() will apply the decorator. But for consistency and depending how we build create_default_stream, this may be needed later ) final_state_cursor = FinalStateCursor( @@ -469,10 +401,8 @@ def _group_streams( schema_loader=declarative_stream._schema_loader, # type: ignore # We are accessing the private property but the public one is optional and we will remove this code soonish retriever=retriever, message_repository=self.message_repository, - max_records_limit=self._limits.max_records if self._limits else None, ), perpartition_cursor, - slice_limit=self._limits.max_slices if self._limits else None, ) concurrent_streams.append( diff --git a/airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py b/airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py index 34326f2c7..e75891584 100644 --- a/airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py +++ b/airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py @@ -631,10 +631,6 @@ SchemaNormalizationModel.Default: TransformConfig.DefaultSchemaNormalization, } -# Ideally this should use the value defined in ConcurrentDeclarativeSource, but -# this would be a circular import -MAX_SLICES = 5 - class ModelToComponentFactory: EPOCH_DATETIME_FORMAT = "%s" @@ -2090,6 +2086,12 @@ def create_declarative_stream( elif concurrent_cursor: cursor = concurrent_cursor + # FIXME to be removed once we migrate everything to DefaultStream + if isinstance(retriever, SimpleRetriever): + # We zero it out here, but since this is a cursor reference, the state is still properly + # instantiated for the other components that reference it + retriever.cursor = None + partition_generator = StreamSlicerPartitionGenerator( DeclarativePartitionFactory( stream_name, diff --git a/airbyte_cdk/sources/declarative/stream_slicers/declarative_partition_generator.py b/airbyte_cdk/sources/declarative/stream_slicers/declarative_partition_generator.py index 809936ae0..a7ce26143 100644 --- a/airbyte_cdk/sources/declarative/stream_slicers/declarative_partition_generator.py +++ b/airbyte_cdk/sources/declarative/stream_slicers/declarative_partition_generator.py @@ -1,12 +1,9 @@ -# Copyright (c) 2025 Airbyte, Inc., all rights reserved. +# Copyright (c) 2024 Airbyte, Inc., all rights reserved. -from typing import Any, Iterable, Mapping, Optional, cast +from typing import Any, Iterable, Mapping, Optional from airbyte_cdk.sources.declarative.retrievers import Retriever from airbyte_cdk.sources.declarative.schema import SchemaLoader -from airbyte_cdk.sources.declarative.stream_slicers.stream_slicer_test_read_decorator import ( - StreamSlicerTestReadDecorator, -) from airbyte_cdk.sources.message import MessageRepository from airbyte_cdk.sources.streams.concurrent.partitions.partition import Partition from airbyte_cdk.sources.streams.concurrent.partitions.partition_generator import PartitionGenerator @@ -14,11 +11,6 @@ from airbyte_cdk.sources.types import Record, StreamSlice from airbyte_cdk.utils.slice_hasher import SliceHasher -# For Connector Builder test read operations, we track the total number of records -# read for the stream at the global level so that we can stop reading early if we -# exceed the record limit -total_record_counter = 0 - class SchemaLoaderCachingDecorator(SchemaLoader): def __init__(self, schema_loader: SchemaLoader): @@ -39,7 +31,6 @@ def __init__( schema_loader: SchemaLoader, retriever: Retriever, message_repository: MessageRepository, - max_records_limit: Optional[int] = None, ) -> None: """ The DeclarativePartitionFactory takes a retriever_factory and not a retriever directly. The reason is that our components are not @@ -50,7 +41,6 @@ def __init__( self._schema_loader = SchemaLoaderCachingDecorator(schema_loader) self._retriever = retriever self._message_repository = message_repository - self._max_records_limit = max_records_limit def create(self, stream_slice: StreamSlice) -> Partition: return DeclarativePartition( @@ -58,7 +48,6 @@ def create(self, stream_slice: StreamSlice) -> Partition: schema_loader=self._schema_loader, retriever=self._retriever, message_repository=self._message_repository, - max_records_limit=self._max_records_limit, stream_slice=stream_slice, ) @@ -70,29 +59,19 @@ def __init__( schema_loader: SchemaLoader, retriever: Retriever, message_repository: MessageRepository, - max_records_limit: Optional[int], stream_slice: StreamSlice, ): self._stream_name = stream_name self._schema_loader = schema_loader self._retriever = retriever self._message_repository = message_repository - self._max_records_limit = max_records_limit self._stream_slice = stream_slice self._hash = SliceHasher.hash(self._stream_name, self._stream_slice) def read(self) -> Iterable[Record]: - if self._max_records_limit is not None: - global total_record_counter - if total_record_counter >= self._max_records_limit: - return for stream_data in self._retriever.read_records( self._schema_loader.get_json_schema(), self._stream_slice ): - if self._max_records_limit is not None: - if total_record_counter >= self._max_records_limit: - break - if isinstance(stream_data, Mapping): record = ( stream_data @@ -107,9 +86,6 @@ def read(self) -> Iterable[Record]: else: self._message_repository.emit_message(stream_data) - if self._max_records_limit is not None: - total_record_counter += 1 - def to_slice(self) -> Optional[Mapping[str, Any]]: return self._stream_slice @@ -122,24 +98,10 @@ def __hash__(self) -> int: class StreamSlicerPartitionGenerator(PartitionGenerator): def __init__( - self, - partition_factory: DeclarativePartitionFactory, - stream_slicer: StreamSlicer, - slice_limit: Optional[int] = None, - max_records_limit: Optional[int] = None, + self, partition_factory: DeclarativePartitionFactory, stream_slicer: StreamSlicer ) -> None: self._partition_factory = partition_factory - - if slice_limit: - self._stream_slicer = cast( - StreamSlicer, - StreamSlicerTestReadDecorator( - wrapped_slicer=stream_slicer, - maximum_number_of_slices=slice_limit, - ), - ) - else: - self._stream_slicer = stream_slicer + self._stream_slicer = stream_slicer def generate(self) -> Iterable[Partition]: for stream_slice in self._stream_slicer.stream_slices(): diff --git a/airbyte_cdk/sources/declarative/stream_slicers/stream_slicer_test_read_decorator.py b/airbyte_cdk/sources/declarative/stream_slicers/stream_slicer_test_read_decorator.py index d261c27e8..323c89196 100644 --- a/airbyte_cdk/sources/declarative/stream_slicers/stream_slicer_test_read_decorator.py +++ b/airbyte_cdk/sources/declarative/stream_slicers/stream_slicer_test_read_decorator.py @@ -4,10 +4,10 @@ from dataclasses import dataclass from itertools import islice -from typing import Any, Iterable +from typing import Any, Iterable, Mapping, Optional, Union from airbyte_cdk.sources.streams.concurrent.partitions.stream_slicer import StreamSlicer -from airbyte_cdk.sources.types import StreamSlice +from airbyte_cdk.sources.types import StreamSlice, StreamState @dataclass diff --git a/airbyte_cdk/sources/message/concurrent_repository.py b/airbyte_cdk/sources/message/concurrent_repository.py deleted file mode 100644 index e3bc7116a..000000000 --- a/airbyte_cdk/sources/message/concurrent_repository.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2025 Airbyte, Inc., all rights reserved. -import logging -import os -from queue import Queue -from typing import Callable, Iterable - -from airbyte_cdk.models import AirbyteMessage, Level -from airbyte_cdk.models import Type as MessageType -from airbyte_cdk.sources.message.repository import LogMessage, MessageRepository -from airbyte_cdk.sources.streams.concurrent.partitions.types import QueueItem - -logger = logging.getLogger("airbyte") - - -class ConcurrentMessageRepository(MessageRepository): - """ - Message repository that immediately loads messages onto the queue processed on the - main thread. This ensures that messages are processed in the correct order they are - received. The InMemoryMessageRepository implementation does not have guaranteed - ordering since whether to process the main thread vs. partitions is non-deterministic - and there can be a lag between reading the main-thread and consuming messages on the - MessageRepository. - - This is particularly important for the connector builder which relies on grouping - of messages to organize request/response, pages, and partitions. - """ - - def __init__(self, queue: Queue[QueueItem], message_repository: MessageRepository): - self._queue = queue - self._decorated_message_repository = message_repository - - def emit_message(self, message: AirbyteMessage) -> None: - self._decorated_message_repository.emit_message(message) - for message in self._decorated_message_repository.consume_queue(): - self._queue.put(message) - - def log_message(self, level: Level, message_provider: Callable[[], LogMessage]) -> None: - self._decorated_message_repository.log_message(level, message_provider) - for message in self._decorated_message_repository.consume_queue(): - self._queue.put(message) - - def consume_queue(self) -> Iterable[AirbyteMessage]: - """ - This method shouldn't need to be called because as part of emit_message() we are already - loading messages onto the queue processed on the main thread. - """ - yield from [] diff --git a/airbyte_cdk/sources/streams/concurrent/cursor.py b/airbyte_cdk/sources/streams/concurrent/cursor.py index ca63a6901..318076835 100644 --- a/airbyte_cdk/sources/streams/concurrent/cursor.py +++ b/airbyte_cdk/sources/streams/concurrent/cursor.py @@ -4,7 +4,6 @@ import functools import logging -import threading from abc import ABC, abstractmethod from typing import ( Any, @@ -175,12 +174,6 @@ def __init__( self._should_be_synced_logger_triggered = False self._clamping_strategy = clamping_strategy - # A lock is required when closing a partition because updating the cursor's concurrent_state is - # not thread safe. When multiple partitions are being closed by the cursor at the same time, it is - # possible for one partition to update concurrent_state after a second partition has already read - # the previous state. This can lead to the second partition overwriting the previous one's state. - self._lock = threading.Lock() - @property def state(self) -> MutableMapping[str, Any]: return self._connector_state_converter.convert_to_state_message( @@ -229,14 +222,6 @@ def _get_concurrent_state( ) def observe(self, record: Record) -> None: - # Because observe writes to the most_recent_cursor_value_per_partition mapping, - # it is not thread-safe. However, this shouldn't lead to concurrency issues because - # observe() is only invoked by PartitionReader.process_partition(). Since the map is - # broken down according to partition, concurrent threads processing only read/write - # from different keys which avoids any conflicts. - # - # If we were to add thread safety, we should implement a lock per-partition - # which is instantiated during stream_slices() most_recent_cursor_value = self._most_recent_cursor_value_per_partition.get( record.associated_slice ) @@ -252,14 +237,13 @@ def _extract_cursor_value(self, record: Record) -> Any: return self._connector_state_converter.parse_value(self._cursor_field.extract_value(record)) def close_partition(self, partition: Partition) -> None: - with self._lock: - slice_count_before = len(self._concurrent_state.get("slices", [])) - self._add_slice_to_state(partition) - if slice_count_before < len( - self._concurrent_state["slices"] - ): # only emit if at least one slice has been processed - self._merge_partitions() - self._emit_state_message() + slice_count_before = len(self._concurrent_state.get("slices", [])) + self._add_slice_to_state(partition) + if slice_count_before < len( + self._concurrent_state["slices"] + ): # only emit if at least one slice has been processed + self._merge_partitions() + self._emit_state_message() self._has_closed_at_least_one_slice = True def _add_slice_to_state(self, partition: Partition) -> None: diff --git a/airbyte_cdk/sources/streams/concurrent/partition_reader.py b/airbyte_cdk/sources/streams/concurrent/partition_reader.py index 0edc5056a..3d23fd9cf 100644 --- a/airbyte_cdk/sources/streams/concurrent/partition_reader.py +++ b/airbyte_cdk/sources/streams/concurrent/partition_reader.py @@ -1,45 +1,14 @@ -# Copyright (c) 2025 Airbyte, Inc., all rights reserved. - -import logging +# +# Copyright (c) 2023 Airbyte, Inc., all rights reserved. +# from queue import Queue -from typing import Optional from airbyte_cdk.sources.concurrent_source.stream_thread_exception import StreamThreadException -from airbyte_cdk.sources.message.repository import MessageRepository -from airbyte_cdk.sources.streams.concurrent.cursor import Cursor from airbyte_cdk.sources.streams.concurrent.partitions.partition import Partition from airbyte_cdk.sources.streams.concurrent.partitions.types import ( PartitionCompleteSentinel, QueueItem, ) -from airbyte_cdk.sources.utils.slice_logger import SliceLogger - - -# Since moving all the connector builder workflow to the concurrent CDK which required correct ordering -# of grouping log messages onto the main write thread using the ConcurrentMessageRepository, this -# separate flow and class that was used to log slices onto this partition's message_repository -# should just be replaced by emitting messages directly onto the repository instead of an intermediary. -class PartitionLogger: - """ - Helper class that provides a mechanism for passing a log message onto the current - partitions message repository - """ - - def __init__( - self, - slice_logger: SliceLogger, - logger: logging.Logger, - message_repository: MessageRepository, - ): - self._slice_logger = slice_logger - self._logger = logger - self._message_repository = message_repository - - def log(self, partition: Partition) -> None: - if self._slice_logger.should_log_slice_message(self._logger): - self._message_repository.emit_message( - self._slice_logger.create_slice_log_message(partition.to_slice()) - ) class PartitionReader: @@ -49,18 +18,13 @@ class PartitionReader: _IS_SUCCESSFUL = True - def __init__( - self, - queue: Queue[QueueItem], - partition_logger: Optional[PartitionLogger] = None, - ) -> None: + def __init__(self, queue: Queue[QueueItem]) -> None: """ :param queue: The queue to put the records in. """ self._queue = queue - self._partition_logger = partition_logger - def process_partition(self, partition: Partition, cursor: Cursor) -> None: + def process_partition(self, partition: Partition) -> None: """ Process a partition and put the records in the output queue. When all the partitions are added to the queue, a sentinel is added to the queue to indicate that all the partitions have been generated. @@ -73,13 +37,8 @@ def process_partition(self, partition: Partition, cursor: Cursor) -> None: :return: None """ try: - if self._partition_logger: - self._partition_logger.log(partition) - for record in partition.read(): self._queue.put(record) - cursor.observe(record) - cursor.close_partition(partition) self._queue.put(PartitionCompleteSentinel(partition, self._IS_SUCCESSFUL)) except Exception as e: self._queue.put(StreamThreadException(e, partition.stream_name())) diff --git a/airbyte_cdk/sources/streams/concurrent/partitions/types.py b/airbyte_cdk/sources/streams/concurrent/partitions/types.py index 3ae63c242..77644c6b9 100644 --- a/airbyte_cdk/sources/streams/concurrent/partitions/types.py +++ b/airbyte_cdk/sources/streams/concurrent/partitions/types.py @@ -4,7 +4,6 @@ from typing import Any, Union -from airbyte_cdk.models import AirbyteMessage from airbyte_cdk.sources.concurrent_source.partition_generation_completed_sentinel import ( PartitionGenerationCompletedSentinel, ) @@ -35,10 +34,5 @@ def __eq__(self, other: Any) -> bool: Typedef representing the items that can be added to the ThreadBasedConcurrentStream """ QueueItem = Union[ - Record, - Partition, - PartitionCompleteSentinel, - PartitionGenerationCompletedSentinel, - Exception, - AirbyteMessage, + Record, Partition, PartitionCompleteSentinel, PartitionGenerationCompletedSentinel, Exception ] diff --git a/airbyte_cdk/sources/streams/http/http_client.py b/airbyte_cdk/sources/streams/http/http_client.py index ff3c8e733..c4fa86866 100644 --- a/airbyte_cdk/sources/streams/http/http_client.py +++ b/airbyte_cdk/sources/streams/http/http_client.py @@ -153,10 +153,7 @@ def _request_session(self) -> requests.Session: # * `If the application running SQLite crashes, the data will be safe, but the database [might become corrupted](https://www.sqlite.org/howtocorrupt.html#cfgerr) if the operating system crashes or the computer loses power before that data has been written to the disk surface.` in [this description](https://www.sqlite.org/pragma.html#pragma_synchronous). backend = requests_cache.SQLiteCache(sqlite_path, fast_save=True, wal=True) return CachedLimiterSession( - cache_name=sqlite_path, - backend=backend, - api_budget=self._api_budget, - match_headers=True, + sqlite_path, backend=backend, api_budget=self._api_budget, match_headers=True ) else: return LimiterSession(api_budget=self._api_budget) diff --git a/airbyte_cdk/sources/utils/slice_logger.py b/airbyte_cdk/sources/utils/slice_logger.py index 4b29f3e0d..ee802a7a6 100644 --- a/airbyte_cdk/sources/utils/slice_logger.py +++ b/airbyte_cdk/sources/utils/slice_logger.py @@ -11,10 +11,6 @@ from airbyte_cdk.models import Type as MessageType -# Once everything runs on the concurrent CDK and we've cleaned up the legacy flows, we should try to remove -# this class and write messages directly to the message_repository instead of through the logger because for -# cases like the connector builder where ordering of messages is important, using the logger can cause -# messages to be grouped out of order. Alas work for a different day. class SliceLogger(ABC): """ SliceLogger is an interface that allows us to log slices of data in a uniform way. diff --git a/unit_tests/connector_builder/test_connector_builder_handler.py b/unit_tests/connector_builder/test_connector_builder_handler.py index 643878eec..d6f1bf2d6 100644 --- a/unit_tests/connector_builder/test_connector_builder_handler.py +++ b/unit_tests/connector_builder/test_connector_builder_handler.py @@ -17,6 +17,10 @@ from airbyte_cdk import connector_builder from airbyte_cdk.connector_builder.connector_builder_handler import ( + DEFAULT_MAXIMUM_NUMBER_OF_PAGES_PER_SLICE, + DEFAULT_MAXIMUM_NUMBER_OF_SLICES, + DEFAULT_MAXIMUM_RECORDS, + TestLimits, create_source, get_limits, resolve_manifest, @@ -54,9 +58,9 @@ from airbyte_cdk.models import Type as MessageType from airbyte_cdk.sources.declarative.concurrent_declarative_source import ( ConcurrentDeclarativeSource, - TestLimits, ) from airbyte_cdk.sources.declarative.declarative_stream import DeclarativeStream +from airbyte_cdk.sources.declarative.manifest_declarative_source import ManifestDeclarativeSource from airbyte_cdk.sources.declarative.retrievers.simple_retriever import SimpleRetriever from airbyte_cdk.sources.declarative.stream_slicers import StreamSlicerTestReadDecorator from airbyte_cdk.sources.streams.concurrent.default_stream import DefaultStream @@ -538,9 +542,7 @@ def test_resolve_manifest(valid_resolve_manifest_config_file): config = copy.deepcopy(RESOLVE_MANIFEST_CONFIG) command = "resolve_manifest" config["__command"] = command - source = ConcurrentDeclarativeSource( - catalog=None, config=config, state=None, source_config=MANIFEST - ) + source = ManifestDeclarativeSource(source_config=MANIFEST) limits = TestLimits() resolved_manifest = handle_connector_builder_request( source, command, config, create_configured_catalog("dummy_stream"), _A_STATE, limits @@ -689,21 +691,19 @@ def test_resolve_manifest(valid_resolve_manifest_config_file): def test_resolve_manifest_error_returns_error_response(): - class MockConcurrentDeclarativeSource: + class MockManifestDeclarativeSource: @property def resolved_manifest(self): raise ValueError - source = MockConcurrentDeclarativeSource() + source = MockManifestDeclarativeSource() response = resolve_manifest(source) assert "Error resolving manifest" in response.trace.error.message def test_read(): config = TEST_READ_CONFIG - source = ConcurrentDeclarativeSource( - catalog=None, config=config, state=None, source_config=MANIFEST - ) + source = ManifestDeclarativeSource(source_config=MANIFEST) real_record = AirbyteRecordMessage( data={"id": "1234", "key": "value"}, emitted_at=1, stream=_stream_name @@ -835,7 +835,7 @@ def cursor_field(self): def name(self): return _stream_name - class MockConcurrentDeclarativeSource: + class MockManifestDeclarativeSource: def streams(self, config): return [MockDeclarativeStream()] @@ -857,7 +857,7 @@ def check_config_against_spec(self) -> Literal[False]: stack_trace = "a stack trace" mock_from_exception.return_value = stack_trace - source = MockConcurrentDeclarativeSource() + source = MockManifestDeclarativeSource() limits = TestLimits() response = read_stream( source, @@ -892,30 +892,26 @@ def test_handle_429_response(): {"result": [{"error": "too many requests"}], "_metadata": {"next": "next"}} ) - config = copy.deepcopy(TEST_READ_CONFIG) - # Add backoff strategy to avoid default endless backoff loop - config["__injected_declarative_manifest"]["definitions"]["retriever"]["requester"][ + TEST_READ_CONFIG["__injected_declarative_manifest"]["definitions"]["retriever"]["requester"][ "error_handler" ] = {"backoff_strategies": [{"type": "ConstantBackoffStrategy", "backoff_time_in_seconds": 5}]} + config = TEST_READ_CONFIG limits = TestLimits() - catalog = ConfiguredAirbyteCatalogSerializer.load(CONFIGURED_CATALOG) - source = create_source(config=config, limits=limits, catalog=catalog, state=None) + source = create_source(config, limits) with patch("requests.Session.send", return_value=response) as mock_send: response = handle_connector_builder_request( source, "test_read", config, - catalog, + ConfiguredAirbyteCatalogSerializer.load(CONFIGURED_CATALOG), _A_PER_PARTITION_STATE, limits, ) - # The test read will attempt a read for 5 partitions, and attempt 1 request - # each time that will not be retried - assert mock_send.call_count == 5 + mock_send.assert_called_once() @pytest.mark.parametrize( @@ -967,7 +963,7 @@ def test_invalid_config_command(invalid_config_file, dummy_catalog): @pytest.fixture def manifest_declarative_source(): - return mock.Mock(spec=ConcurrentDeclarativeSource, autospec=True) + return mock.Mock(spec=ManifestDeclarativeSource, autospec=True) def create_mock_retriever(name, url_base, path): @@ -992,16 +988,16 @@ def create_mock_declarative_stream(http_stream): ( "test_no_test_read_config", {}, - TestLimits.DEFAULT_MAX_RECORDS, - TestLimits.DEFAULT_MAX_SLICES, - TestLimits.DEFAULT_MAX_PAGES_PER_SLICE, + DEFAULT_MAXIMUM_RECORDS, + DEFAULT_MAXIMUM_NUMBER_OF_SLICES, + DEFAULT_MAXIMUM_NUMBER_OF_PAGES_PER_SLICE, ), ( "test_no_values_set", {"__test_read_config": {}}, - TestLimits.DEFAULT_MAX_RECORDS, - TestLimits.DEFAULT_MAX_SLICES, - TestLimits.DEFAULT_MAX_PAGES_PER_SLICE, + DEFAULT_MAXIMUM_RECORDS, + DEFAULT_MAXIMUM_NUMBER_OF_SLICES, + DEFAULT_MAXIMUM_NUMBER_OF_PAGES_PER_SLICE, ), ( "test_values_are_set", @@ -1029,9 +1025,9 @@ def test_create_source(): config = {"__injected_declarative_manifest": MANIFEST} - source = create_source(config=config, limits=limits, catalog=None, state=None) + source = create_source(config, limits) - assert isinstance(source, ConcurrentDeclarativeSource) + assert isinstance(source, ManifestDeclarativeSource) assert source._constructor._limit_pages_fetched_per_slice == limits.max_pages_per_slice assert source._constructor._limit_slices_fetched == limits.max_slices assert source._constructor._disable_cache @@ -1123,7 +1119,7 @@ def test_read_source(mock_http_stream): config = {"__injected_declarative_manifest": MANIFEST} - source = create_source(config=config, limits=limits, catalog=catalog, state=None) + source = create_source(config, limits) output_data = read_stream(source, config, catalog, _A_PER_PARTITION_STATE, limits).record.data slices = output_data["slices"] @@ -1172,7 +1168,7 @@ def test_read_source_single_page_single_slice(mock_http_stream): config = {"__injected_declarative_manifest": MANIFEST} - source = create_source(config=config, limits=limits, catalog=catalog, state=None) + source = create_source(config, limits) output_data = read_stream(source, config, catalog, _A_PER_PARTITION_STATE, limits).record.data slices = output_data["slices"] @@ -1256,11 +1252,11 @@ def test_handle_read_external_requests(deployment_mode, url_base, expected_error ] ) - test_manifest = copy.deepcopy(MANIFEST) + test_manifest = MANIFEST test_manifest["streams"][0]["$parameters"]["url_base"] = url_base config = {"__injected_declarative_manifest": test_manifest} - source = create_source(config=config, limits=limits, catalog=catalog, state=None) + source = create_source(config, limits) with mock.patch.dict(os.environ, {"DEPLOYMENT_MODE": deployment_mode}, clear=False): output_data = read_stream( @@ -1290,13 +1286,13 @@ def test_handle_read_external_requests(deployment_mode, url_base, expected_error pytest.param( "CLOUD", "https://10.0.27.27/tokens/bearer", - "StreamThreadException", + "AirbyteTracedException", id="test_cloud_read_with_private_endpoint", ), pytest.param( "CLOUD", "http://unsecured.protocol/tokens/bearer", - "StreamThreadException", + "InvalidSchema", id="test_cloud_read_with_unsecured_endpoint", ), pytest.param( @@ -1350,13 +1346,13 @@ def test_handle_read_external_oauth_request(deployment_mode, token_url, expected "refresh_token": "john", } - test_manifest = copy.deepcopy(MANIFEST) + test_manifest = MANIFEST test_manifest["definitions"]["retriever"]["requester"]["authenticator"] = ( oauth_authenticator_config ) config = {"__injected_declarative_manifest": test_manifest} - source = create_source(config=config, limits=limits, catalog=catalog, state=None) + source = create_source(config, limits) with mock.patch.dict(os.environ, {"DEPLOYMENT_MODE": deployment_mode}, clear=False): output_data = read_stream( @@ -1413,9 +1409,7 @@ def test_read_stream_exception_with_secrets(): def test_full_resolve_manifest(valid_resolve_manifest_config_file): config = copy.deepcopy(RESOLVE_DYNAMIC_STREAM_MANIFEST_CONFIG) command = config["__command"] - source = ConcurrentDeclarativeSource( - catalog=None, config=config, state=None, source_config=DYNAMIC_STREAM_MANIFEST - ) + source = ManifestDeclarativeSource(source_config=DYNAMIC_STREAM_MANIFEST) limits = TestLimits(max_streams=2) with HttpMocker() as http_mocker: http_mocker.get( @@ -1486,11 +1480,11 @@ def test_full_resolve_manifest(valid_resolve_manifest_config_file): "type": "RequestOption", "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, }, "page_token_option": { @@ -1498,11 +1492,11 @@ def test_full_resolve_manifest(valid_resolve_manifest_config_file): "type": "RequestPath", "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, }, "pagination_strategy": { @@ -1511,20 +1505,20 @@ def test_full_resolve_manifest(valid_resolve_manifest_config_file): "page_size": 2, "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, }, "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, }, "partition_router": { @@ -1533,11 +1527,11 @@ def test_full_resolve_manifest(valid_resolve_manifest_config_file): "cursor_field": "item_id", "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, }, "requester": { @@ -1547,22 +1541,22 @@ def test_full_resolve_manifest(valid_resolve_manifest_config_file): "api_token": "{{ config.apikey }}", "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, }, "request_parameters": {"a_param": "10"}, "type": "HttpRequester", "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, }, "record_selector": { @@ -1571,40 +1565,40 @@ def test_full_resolve_manifest(valid_resolve_manifest_config_file): "type": "DpathExtractor", "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, }, "type": "RecordSelector", "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, }, "type": "SimpleRetriever", "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, }, "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", "$parameters": { "name": "stream_with_custom_requester", "primary_key": "id", - "url_base": "https://api.sendgrid.com", + "url_base": "https://10.0.27.27/api/v1/", }, "dynamic_stream_name": None, }, diff --git a/unit_tests/connector_builder/test_message_grouper.py b/unit_tests/connector_builder/test_message_grouper.py index e79ee117c..6c4f11526 100644 --- a/unit_tests/connector_builder/test_message_grouper.py +++ b/unit_tests/connector_builder/test_message_grouper.py @@ -307,6 +307,126 @@ def test_get_grouped_messages_with_logs(mock_entrypoint_read: Mock) -> None: assert actual_log == expected_logs[i] +@pytest.mark.parametrize( + "request_record_limit, max_record_limit, should_fail", + [ + pytest.param(1, 3, False, id="test_create_request_with_record_limit"), + pytest.param(3, 1, True, id="test_create_request_record_limit_exceeds_max"), + ], +) +@patch("airbyte_cdk.connector_builder.test_reader.reader.AirbyteEntrypoint.read") +def test_get_grouped_messages_record_limit( + mock_entrypoint_read: Mock, request_record_limit: int, max_record_limit: int, should_fail: bool +) -> None: + stream_name = "hashiras" + url = "https://demonslayers.com/api/v1/hashiras?era=taisho" + request = { + "headers": {"Content-Type": "application/json"}, + "method": "GET", + "body": {"content": '{"custom": "field"}'}, + } + response = { + "status_code": 200, + "headers": {"field": "value"}, + "body": {"content": '{"name": "field"}'}, + } + mock_source = make_mock_source( + mock_entrypoint_read, + iter( + [ + request_response_log_message(request, response, url, stream_name), + record_message(stream_name, {"name": "Shinobu Kocho"}), + record_message(stream_name, {"name": "Muichiro Tokito"}), + request_response_log_message(request, response, url, stream_name), + record_message(stream_name, {"name": "Mitsuri Kanroji"}), + ] + ), + ) + n_records = 2 + record_limit = min(request_record_limit, max_record_limit) + + api = TestReader(MAX_PAGES_PER_SLICE, MAX_SLICES, max_record_limit=max_record_limit) + # this is the call we expect to raise an exception + if should_fail: + with pytest.raises(ValueError): + api.run_test_read( + mock_source, + config=CONFIG, + configured_catalog=create_configured_catalog(stream_name), + stream_name=stream_name, + state=_NO_STATE, + record_limit=request_record_limit, + ) + else: + actual_response: StreamRead = api.run_test_read( + mock_source, + config=CONFIG, + configured_catalog=create_configured_catalog(stream_name), + stream_name=stream_name, + state=_NO_STATE, + record_limit=request_record_limit, + ) + single_slice = actual_response.slices[0] + total_records = 0 + for i, actual_page in enumerate(single_slice.pages): + total_records += len(actual_page.records) + assert total_records == min([record_limit, n_records]) + + assert (total_records >= max_record_limit) == actual_response.test_read_limit_reached + + +@pytest.mark.parametrize( + "max_record_limit", + [ + pytest.param(2, id="test_create_request_no_record_limit"), + pytest.param(1, id="test_create_request_no_record_limit_n_records_exceed_max"), + ], +) +@patch("airbyte_cdk.connector_builder.test_reader.reader.AirbyteEntrypoint.read") +def test_get_grouped_messages_default_record_limit( + mock_entrypoint_read: Mock, max_record_limit: int +) -> None: + stream_name = "hashiras" + url = "https://demonslayers.com/api/v1/hashiras?era=taisho" + request = { + "headers": {"Content-Type": "application/json"}, + "method": "GET", + "body": {"content": '{"custom": "field"}'}, + } + response = { + "status_code": 200, + "headers": {"field": "value"}, + "body": {"content": '{"name": "field"}'}, + } + mock_source = make_mock_source( + mock_entrypoint_read, + iter( + [ + request_response_log_message(request, response, url, stream_name), + record_message(stream_name, {"name": "Shinobu Kocho"}), + record_message(stream_name, {"name": "Muichiro Tokito"}), + request_response_log_message(request, response, url, stream_name), + record_message(stream_name, {"name": "Mitsuri Kanroji"}), + ] + ), + ) + n_records = 2 + + api = TestReader(MAX_PAGES_PER_SLICE, MAX_SLICES, max_record_limit=max_record_limit) + actual_response: StreamRead = api.run_test_read( + source=mock_source, + config=CONFIG, + configured_catalog=create_configured_catalog(stream_name), + stream_name=stream_name, + state=_NO_STATE, + ) + single_slice = actual_response.slices[0] + total_records = 0 + for i, actual_page in enumerate(single_slice.pages): + total_records += len(actual_page.records) + assert total_records == min([max_record_limit, n_records]) + + @patch("airbyte_cdk.connector_builder.test_reader.reader.AirbyteEntrypoint.read") def test_get_grouped_messages_limit_0(mock_entrypoint_read: Mock) -> None: stream_name = "hashiras" diff --git a/unit_tests/sources/declarative/incremental/test_concurrent_perpartitioncursor.py b/unit_tests/sources/declarative/incremental/test_concurrent_perpartitioncursor.py index cb774bda7..ba26f7c91 100644 --- a/unit_tests/sources/declarative/incremental/test_concurrent_perpartitioncursor.py +++ b/unit_tests/sources/declarative/incremental/test_concurrent_perpartitioncursor.py @@ -3622,7 +3622,6 @@ def test_given_no_partitions_processed_when_close_partition_then_no_state_update schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=slice, ) ) @@ -3707,7 +3706,6 @@ def test_given_unfinished_first_parent_partition_no_parent_state_update(): schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=slice, ) ) @@ -3802,7 +3800,6 @@ def test_given_unfinished_last_parent_partition_with_partial_parent_state_update schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=slice, ) ) @@ -3892,7 +3889,6 @@ def test_given_all_partitions_finished_when_close_partition_then_final_state_emi schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=slice, ) ) @@ -3966,7 +3962,6 @@ def test_given_partition_limit_exceeded_when_close_partition_then_switch_to_glob schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=slice, ) ) @@ -4051,7 +4046,6 @@ def test_semaphore_cleanup(): schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=s, ) ) @@ -4171,7 +4165,6 @@ def test_duplicate_partition_after_closing_partition_cursor_deleted(): schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=first_1, ) ) @@ -4183,7 +4176,6 @@ def test_duplicate_partition_after_closing_partition_cursor_deleted(): schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=two, ) ) @@ -4195,7 +4187,6 @@ def test_duplicate_partition_after_closing_partition_cursor_deleted(): schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=second_1, ) ) @@ -4256,7 +4247,6 @@ def test_duplicate_partition_after_closing_partition_cursor_exists(): schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=first_1, ) ) @@ -4268,7 +4258,6 @@ def test_duplicate_partition_after_closing_partition_cursor_exists(): schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=two, ) ) @@ -4281,7 +4270,6 @@ def test_duplicate_partition_after_closing_partition_cursor_exists(): schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=second_1, ) ) @@ -4339,7 +4327,6 @@ def test_duplicate_partition_while_processing(): schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=generated[1], ) ) @@ -4350,7 +4337,6 @@ def test_duplicate_partition_while_processing(): schema_loader=_EMPTY_SCHEMA_LOADER, retriever=MagicMock(), message_repository=MagicMock(), - max_records_limit=None, stream_slice=generated[0], ) ) diff --git a/unit_tests/sources/declarative/retrievers/test_simple_retriever.py b/unit_tests/sources/declarative/retrievers/test_simple_retriever.py index d39e84e4d..44f307a32 100644 --- a/unit_tests/sources/declarative/retrievers/test_simple_retriever.py +++ b/unit_tests/sources/declarative/retrievers/test_simple_retriever.py @@ -11,14 +11,7 @@ import requests from airbyte_cdk import YamlDeclarativeSource -from airbyte_cdk.models import ( - AirbyteLogMessage, - AirbyteMessage, - AirbyteRecordMessage, - Level, - SyncMode, - Type, -) +from airbyte_cdk.models import AirbyteLogMessage, AirbyteMessage, Level, SyncMode, Type from airbyte_cdk.sources.declarative.auth.declarative_authenticator import NoAuth from airbyte_cdk.sources.declarative.decoders import JsonDecoder from airbyte_cdk.sources.declarative.extractors import DpathExtractor, RecordSelector diff --git a/unit_tests/sources/declarative/schema/test_dynamic_schema_loader.py b/unit_tests/sources/declarative/schema/test_dynamic_schema_loader.py index 20147465f..97f89879c 100644 --- a/unit_tests/sources/declarative/schema/test_dynamic_schema_loader.py +++ b/unit_tests/sources/declarative/schema/test_dynamic_schema_loader.py @@ -10,7 +10,9 @@ from airbyte_cdk.sources.declarative.concurrent_declarative_source import ( ConcurrentDeclarativeSource, - TestLimits, +) +from airbyte_cdk.sources.declarative.parsers.model_to_component_factory import ( + ModelToComponentFactory, ) from airbyte_cdk.sources.declarative.schema import DynamicSchemaLoader, SchemaTypeIdentifier from airbyte_cdk.test.mock_http import HttpMocker, HttpRequest, HttpResponse @@ -351,13 +353,14 @@ def test_dynamic_schema_loader_with_type_conditions(): }, }, } - source = ConcurrentDeclarativeSource( source_config=_MANIFEST_WITH_TYPE_CONDITIONS, config=_CONFIG, catalog=None, state=None, - limits=TestLimits(), # Avoid caching on the HttpClient which could result in caching the requests/responses of other tests + component_factory=ModelToComponentFactory( + disable_cache=True + ), # Avoid caching on the HttpClient which could result in caching the requests/responses of other tests ) with HttpMocker() as http_mocker: http_mocker.get( diff --git a/unit_tests/sources/declarative/stream_slicers/test_declarative_partition_generator.py b/unit_tests/sources/declarative/stream_slicers/test_declarative_partition_generator.py index f9e2779f1..b09c708ad 100644 --- a/unit_tests/sources/declarative/stream_slicers/test_declarative_partition_generator.py +++ b/unit_tests/sources/declarative/stream_slicers/test_declarative_partition_generator.py @@ -4,8 +4,6 @@ from unittest import TestCase from unittest.mock import Mock -# This allows for the global total_record_counter to be reset between tests -import airbyte_cdk.sources.declarative.stream_slicers.declarative_partition_generator as declarative_partition_generator from airbyte_cdk.models import AirbyteLogMessage, AirbyteMessage, Level, Type from airbyte_cdk.sources.declarative.retrievers import Retriever from airbyte_cdk.sources.declarative.schema import InlineSchemaLoader @@ -14,7 +12,7 @@ ) from airbyte_cdk.sources.message import MessageRepository from airbyte_cdk.sources.streams.core import StreamData -from airbyte_cdk.sources.types import Record, StreamSlice +from airbyte_cdk.sources.types import StreamSlice _STREAM_NAME = "a_stream_name" _SCHEMA_LOADER = InlineSchemaLoader({"type": "object", "properties": {}}, {}) @@ -35,7 +33,7 @@ class StreamSlicerPartitionGeneratorTest(TestCase): def test_given_multiple_slices_partition_generator_uses_the_same_retriever(self) -> None: retriever = self._mock_retriever([]) message_repository = Mock(spec=MessageRepository) - partition_factory = declarative_partition_generator.DeclarativePartitionFactory( + partition_factory = DeclarativePartitionFactory( _STREAM_NAME, _SCHEMA_LOADER, retriever, @@ -50,7 +48,7 @@ def test_given_multiple_slices_partition_generator_uses_the_same_retriever(self) def test_given_a_mapping_when_read_then_yield_record(self) -> None: retriever = self._mock_retriever([_A_RECORD]) message_repository = Mock(spec=MessageRepository) - partition_factory = declarative_partition_generator.DeclarativePartitionFactory( + partition_factory = DeclarativePartitionFactory( _STREAM_NAME, _SCHEMA_LOADER, retriever, @@ -68,7 +66,7 @@ def test_given_a_mapping_when_read_then_yield_record(self) -> None: def test_given_not_a_record_when_read_then_send_to_message_repository(self) -> None: retriever = self._mock_retriever([_AIRBYTE_LOG_MESSAGE]) message_repository = Mock(spec=MessageRepository) - partition_factory = declarative_partition_generator.DeclarativePartitionFactory( + partition_factory = DeclarativePartitionFactory( _STREAM_NAME, _SCHEMA_LOADER, retriever, @@ -79,78 +77,6 @@ def test_given_not_a_record_when_read_then_send_to_message_repository(self) -> N message_repository.emit_message.assert_called_once_with(_AIRBYTE_LOG_MESSAGE) - def test_max_records_reached_stops_reading(self) -> None: - declarative_partition_generator.total_record_counter = 0 - - expected_records = [ - Record(data={"id": 1, "name": "Max"}, stream_name="stream_name"), - Record(data={"id": 1, "name": "Oscar"}, stream_name="stream_name"), - Record(data={"id": 1, "name": "Charles"}, stream_name="stream_name"), - Record(data={"id": 1, "name": "Alex"}, stream_name="stream_name"), - Record(data={"id": 1, "name": "Yuki"}, stream_name="stream_name"), - ] - - mock_records = expected_records + [ - Record(data={"id": 1, "name": "Lewis"}, stream_name="stream_name"), - Record(data={"id": 1, "name": "Lando"}, stream_name="stream_name"), - ] - - retriever = self._mock_retriever(mock_records) - message_repository = Mock(spec=MessageRepository) - partition_factory = declarative_partition_generator.DeclarativePartitionFactory( - _STREAM_NAME, - _SCHEMA_LOADER, - retriever, - message_repository, - max_records_limit=5, - ) - - partition = partition_factory.create(_A_STREAM_SLICE) - - actual_records = list(partition.read()) - - assert len(actual_records) == 5 - assert actual_records == expected_records - - def test_max_records_reached_on_previous_partition(self) -> None: - declarative_partition_generator.total_record_counter = 0 - - expected_records = [ - Record(data={"id": 1, "name": "Max"}, stream_name="stream_name"), - Record(data={"id": 1, "name": "Oscar"}, stream_name="stream_name"), - Record(data={"id": 1, "name": "Charles"}, stream_name="stream_name"), - ] - - mock_records = expected_records + [ - Record(data={"id": 1, "name": "Alex"}, stream_name="stream_name"), - Record(data={"id": 1, "name": "Yuki"}, stream_name="stream_name"), - ] - - retriever = self._mock_retriever(mock_records) - message_repository = Mock(spec=MessageRepository) - partition_factory = declarative_partition_generator.DeclarativePartitionFactory( - _STREAM_NAME, - _SCHEMA_LOADER, - retriever, - message_repository, - max_records_limit=3, - ) - - partition = partition_factory.create(_A_STREAM_SLICE) - - first_partition_records = list(partition.read()) - - assert len(first_partition_records) == 3 - assert first_partition_records == expected_records - - second_partition_records = list(partition.read()) - assert len(second_partition_records) == 0 - - # The DeclarativePartition exits out of the read before attempting to read_records() if - # the max_records_limit has already been reached. So we only expect to see read_records() - # called for the first partition read and not the second - retriever.read_records.assert_called_once() - @staticmethod def _mock_retriever(read_return_value: List[StreamData]) -> Mock: retriever = Mock(spec=Retriever) diff --git a/unit_tests/sources/declarative/test_concurrent_declarative_source.py b/unit_tests/sources/declarative/test_concurrent_declarative_source.py index 4279e53c6..bdfefbd80 100644 --- a/unit_tests/sources/declarative/test_concurrent_declarative_source.py +++ b/unit_tests/sources/declarative/test_concurrent_declarative_source.py @@ -1507,8 +1507,6 @@ def test_read_concurrent_with_failing_partition_in_the_middle(): ): messages.append(message) except AirbyteTracedException: - locations_states = get_states_for_stream(stream_name="locations", messages=messages) - assert len(locations_states) == 3 assert ( get_states_for_stream(stream_name="locations", messages=messages)[ -1 diff --git a/unit_tests/sources/streams/concurrent/scenarios/stream_facade_builder.py b/unit_tests/sources/streams/concurrent/scenarios/stream_facade_builder.py index 75b52f6b2..50695ba1e 100644 --- a/unit_tests/sources/streams/concurrent/scenarios/stream_facade_builder.py +++ b/unit_tests/sources/streams/concurrent/scenarios/stream_facade_builder.py @@ -50,10 +50,7 @@ def __init__( self._message_repository = InMemoryMessageRepository() threadpool_manager = ThreadPoolManager(threadpool, streams[0].logger) concurrent_source = ConcurrentSource( - threadpool=threadpool_manager, - logger=streams[0].logger, - slice_logger=NeverLogSliceLogger(), - message_repository=self._message_repository, + threadpool_manager, streams[0].logger, NeverLogSliceLogger(), self._message_repository ) super().__init__(concurrent_source) self._streams = streams diff --git a/unit_tests/sources/streams/concurrent/test_concurrent_read_processor.py b/unit_tests/sources/streams/concurrent/test_concurrent_read_processor.py index a681f75eb..d6ea64583 100644 --- a/unit_tests/sources/streams/concurrent/test_concurrent_read_processor.py +++ b/unit_tests/sources/streams/concurrent/test_concurrent_read_processor.py @@ -176,12 +176,10 @@ def test_handle_partition(self): self._partition_reader, ) - expected_cursor = handler._stream_name_to_instance[_ANOTHER_STREAM_NAME].cursor - handler.on_partition(self._a_closed_partition) self._thread_pool_manager.submit.assert_called_with( - self._partition_reader.process_partition, self._a_closed_partition, expected_cursor + self._partition_reader.process_partition, self._a_closed_partition ) assert ( self._a_closed_partition in handler._streams_to_running_partitions[_ANOTHER_STREAM_NAME] @@ -203,12 +201,10 @@ def test_handle_partition_emits_log_message_if_it_should_be_logged(self): self._partition_reader, ) - expected_cursor = handler._stream_name_to_instance[_STREAM_NAME].cursor - handler.on_partition(self._an_open_partition) self._thread_pool_manager.submit.assert_called_with( - self._partition_reader.process_partition, self._an_open_partition, expected_cursor + self._partition_reader.process_partition, self._an_open_partition ) self._message_repository.emit_message.assert_called_with(self._log_message) @@ -257,6 +253,8 @@ def test_handle_on_partition_complete_sentinel_with_messages_from_repository(sel ] assert messages == expected_messages + self._stream.cursor.close_partition.assert_called_once() + @freezegun.freeze_time("2020-01-01T00:00:00") def test_handle_on_partition_complete_sentinel_yields_status_message_if_the_stream_is_done( self, @@ -304,6 +302,55 @@ def test_handle_on_partition_complete_sentinel_yields_status_message_if_the_stre ) ] assert messages == expected_messages + self._another_stream.cursor.close_partition.assert_called_once() + + @freezegun.freeze_time("2020-01-01T00:00:00") + def test_given_exception_on_partition_complete_sentinel_then_yield_error_trace_message_and_stream_is_incomplete( + self, + ) -> None: + self._a_closed_partition.stream_name.return_value = self._stream.name + self._stream.cursor.close_partition.side_effect = ValueError + + handler = ConcurrentReadProcessor( + [self._stream], + self._partition_enqueuer, + self._thread_pool_manager, + self._logger, + self._slice_logger, + self._message_repository, + self._partition_reader, + ) + handler.start_next_partition_generator() + handler.on_partition(self._a_closed_partition) + list( + handler.on_partition_generation_completed( + PartitionGenerationCompletedSentinel(self._stream) + ) + ) + messages = list( + handler.on_partition_complete_sentinel( + PartitionCompleteSentinel(self._a_closed_partition) + ) + ) + + expected_status_message = AirbyteMessage( + type=MessageType.TRACE, + trace=AirbyteTraceMessage( + type=TraceType.STREAM_STATUS, + stream_status=AirbyteStreamStatusTraceMessage( + stream_descriptor=StreamDescriptor( + name=self._stream.name, + ), + status=AirbyteStreamStatus.INCOMPLETE, + ), + emitted_at=1577836800000.0, + ), + ) + assert list(map(lambda message: message.trace.type, messages)) == [ + TraceType.ERROR, + TraceType.STREAM_STATUS, + ] + assert messages[1] == expected_status_message @freezegun.freeze_time("2020-01-01T00:00:00") def test_handle_on_partition_complete_sentinel_yields_no_status_message_if_the_stream_is_not_done( @@ -332,6 +379,7 @@ def test_handle_on_partition_complete_sentinel_yields_no_status_message_if_the_s expected_messages = [] assert messages == expected_messages + self._stream.cursor.close_partition.assert_called_once() @freezegun.freeze_time("2020-01-01T00:00:00") def test_on_record_no_status_message_no_repository_messge(self): diff --git a/unit_tests/sources/streams/concurrent/test_partition_reader.py b/unit_tests/sources/streams/concurrent/test_partition_reader.py index a41750772..1910e034d 100644 --- a/unit_tests/sources/streams/concurrent/test_partition_reader.py +++ b/unit_tests/sources/streams/concurrent/test_partition_reader.py @@ -1,5 +1,6 @@ -# Copyright (c) 2025 Airbyte, Inc., all rights reserved. - +# +# Copyright (c) 2023 Airbyte, Inc., all rights reserved. +# import unittest from queue import Queue from typing import Callable, Iterable, List @@ -7,9 +8,7 @@ import pytest -from airbyte_cdk import InMemoryMessageRepository from airbyte_cdk.sources.concurrent_source.stream_thread_exception import StreamThreadException -from airbyte_cdk.sources.streams.concurrent.cursor import FinalStateCursor from airbyte_cdk.sources.streams.concurrent.partition_reader import PartitionReader from airbyte_cdk.sources.streams.concurrent.partitions.partition import Partition from airbyte_cdk.sources.streams.concurrent.partitions.types import ( @@ -27,15 +26,10 @@ class PartitionReaderTest(unittest.TestCase): def setUp(self) -> None: self._queue: Queue[QueueItem] = Queue() - self._partition_reader = PartitionReader(self._queue, None) + self._partition_reader = PartitionReader(self._queue) def test_given_no_records_when_process_partition_then_only_emit_sentinel(self): - cursor = FinalStateCursor( - stream_name="test", - stream_namespace=None, - message_repository=InMemoryMessageRepository(), - ) - self._partition_reader.process_partition(self._a_partition([]), cursor) + self._partition_reader.process_partition(self._a_partition([])) while queue_item := self._queue.get(): if not isinstance(queue_item, PartitionCompleteSentinel): @@ -46,24 +40,19 @@ def test_given_read_partition_successful_when_process_partition_then_queue_recor self, ): partition = self._a_partition(_RECORDS) - cursor = Mock() - self._partition_reader.process_partition(partition, cursor) + self._partition_reader.process_partition(partition) queue_content = self._consume_queue() assert queue_content == _RECORDS + [PartitionCompleteSentinel(partition)] - cursor.observe.assert_called() - cursor.close_partition.assert_called_once() - - def test_given_exception_from_read_when_process_partition_then_queue_records_and_exception_and_sentinel( + def test_given_exception_when_process_partition_then_queue_records_and_exception_and_sentinel( self, ): partition = Mock() - cursor = Mock() exception = ValueError() partition.read.side_effect = self._read_with_exception(_RECORDS, exception) - self._partition_reader.process_partition(partition, cursor) + self._partition_reader.process_partition(partition) queue_content = self._consume_queue() @@ -72,23 +61,6 @@ def test_given_exception_from_read_when_process_partition_then_queue_records_and PartitionCompleteSentinel(partition), ] - def test_given_exception_from_close_slice_when_process_partition_then_queue_records_and_exception_and_sentinel( - self, - ): - partition = self._a_partition(_RECORDS) - cursor = Mock() - exception = ValueError() - cursor.close_partition.side_effect = self._close_partition_with_exception(exception) - self._partition_reader.process_partition(partition, cursor) - - queue_content = self._consume_queue() - - # 4 total messages in queue. 2 records, 1 thread exception, 1 partition sentinel value - assert len(queue_content) == 4 - assert queue_content[:2] == _RECORDS - assert isinstance(queue_content[2], StreamThreadException) - assert queue_content[3] == PartitionCompleteSentinel(partition) - def _a_partition(self, records: List[Record]) -> Partition: partition = Mock(spec=Partition) partition.read.return_value = iter(records) @@ -104,13 +76,6 @@ def mocked_function() -> Iterable[Record]: return mocked_function - @staticmethod - def _close_partition_with_exception(exception: Exception) -> Callable[[Partition], None]: - def mocked_function(partition: Partition) -> None: - raise exception - - return mocked_function - def _consume_queue(self): queue_content = [] while queue_item := self._queue.get(): diff --git a/unit_tests/sources/streams/test_stream_read.py b/unit_tests/sources/streams/test_stream_read.py index cf550f8cf..bf13ac351 100644 --- a/unit_tests/sources/streams/test_stream_read.py +++ b/unit_tests/sources/streams/test_stream_read.py @@ -4,7 +4,6 @@ import logging from copy import deepcopy -from queue import Queue from typing import Any, Dict, Iterable, List, Mapping, MutableMapping, Optional, Union from unittest.mock import Mock @@ -590,10 +589,7 @@ def test_concurrent_incremental_read_two_slices(): *records_partition_2, ] - expected_state_1 = _create_state_message( - "__mock_stream", {"1": {"created_at": slice_timestamp_1}} - ) - expected_state_2 = _create_state_message( + expected_state = _create_state_message( "__mock_stream", {"1": {"created_at": slice_timestamp_1}, "2": {"created_at": slice_timestamp_2}}, ) @@ -621,27 +617,26 @@ def test_concurrent_incremental_read_two_slices(): for record in expected_records: assert record in actual_records - # We need to process partitions generated by a PartitionReader in order to trigger - # the ConcurrentCursor.close_partition() flow and validate state is updated with - # the observed record values - partition_reader = PartitionReader(queue=Mock(spec=Queue)) - assert isinstance(stream, StreamFacade) - abstract_stream = stream._abstract_stream - for partition in abstract_stream.generate_partitions(): - partition_reader.process_partition(partition=partition, cursor=cursor) + # We need run on_record to update cursor with record cursor value + for record in actual_records: + list( + handler.on_record( + Record( + data=record, + stream_name="__mock_stream", + ) + ) + ) assert len(actual_records) == len(expected_records) + # We don't have a real source that reads from the message_repository for state, so we read from the queue directly to verify + # the cursor observed records correctly and updated partition states + mock_partition = Mock() + cursor.close_partition(mock_partition) actual_state = [state for state in message_repository.consume_queue()] - assert len(actual_state) == 2 - assert ( - actual_state[0].state.stream.stream_state.__dict__ - == expected_state_1.state.stream.stream_state.__dict__ - ) - assert ( - actual_state[1].state.stream.stream_state.__dict__ - == expected_state_2.state.stream.stream_state.__dict__ - ) + assert len(actual_state) == 1 + assert actual_state[0] == expected_state def setup_stream_dependencies(configured_json_schema):