|
19 | 19 | Optional, |
20 | 20 | Set, |
21 | 21 | Tuple, |
| 22 | + Union, |
22 | 23 | ) |
23 | 24 |
|
24 | 25 | import orjson |
|
51 | 52 | from airbyte_cdk.sources.declarative.checks.connection_checker import ConnectionChecker |
52 | 53 | from airbyte_cdk.sources.declarative.concurrency_level import ConcurrencyLevel |
53 | 54 | from airbyte_cdk.sources.declarative.declarative_stream import DeclarativeStream |
54 | | -from airbyte_cdk.sources.declarative.extractors import RecordSelector |
55 | | -from airbyte_cdk.sources.declarative.extractors.record_filter import ( |
56 | | - ClientSideIncrementalRecordFilterDecorator, |
57 | | -) |
58 | 55 | from airbyte_cdk.sources.declarative.incremental import ( |
59 | 56 | ConcurrentPerPartitionCursor, |
60 | 57 | GlobalSubstreamCursor, |
@@ -205,7 +202,6 @@ def __init__( |
205 | 202 | # incremental streams running in full refresh. |
206 | 203 | component_factory = ModelToComponentFactory( |
207 | 204 | emit_connector_builder_messages=emit_connector_builder_messages, |
208 | | - disable_resumable_full_refresh=True, |
209 | 205 | message_repository=ConcurrentMessageRepository(queue, message_repository), |
210 | 206 | connector_state_manager=self._connector_state_manager, |
211 | 207 | max_concurrent_async_job_count=source_config.get("max_concurrent_async_job_count"), |
@@ -459,7 +455,7 @@ def discover(self, logger: logging.Logger, config: Mapping[str, Any]) -> Airbyte |
459 | 455 | ] |
460 | 456 | ) |
461 | 457 |
|
462 | | - def streams(self, config: Mapping[str, Any]) -> List[Stream]: |
| 458 | + def streams(self, config: Mapping[str, Any]) -> List[Union[Stream, AbstractStream]]: # type: ignore # we are migrating away from the AbstractSource and are expecting that this will only be called by ConcurrentDeclarativeSource or the Connector Builder |
463 | 459 | """ |
464 | 460 | The `streams` method is used as part of the AbstractSource in the following cases: |
465 | 461 | * ConcurrentDeclarativeSource.check -> ManifestDeclarativeSource.check -> AbstractSource.check -> DeclarativeSource.check_connection -> CheckStream.check_connection -> streams |
@@ -622,6 +618,10 @@ def _group_streams( |
622 | 618 | # these legacy Python streams the way we do low-code streams to determine if they are concurrent compatible, |
623 | 619 | # so we need to treat them as synchronous |
624 | 620 |
|
| 621 | + if isinstance(declarative_stream, AbstractStream): |
| 622 | + concurrent_streams.append(declarative_stream) |
| 623 | + continue |
| 624 | + |
625 | 625 | supports_file_transfer = ( |
626 | 626 | isinstance(declarative_stream, DeclarativeStream) |
627 | 627 | and "file_uploader" in name_to_stream_mapping[declarative_stream.name] |
@@ -691,7 +691,7 @@ def _group_streams( |
691 | 691 | partition_generator = StreamSlicerPartitionGenerator( |
692 | 692 | partition_factory=DeclarativePartitionFactory( |
693 | 693 | stream_name=declarative_stream.name, |
694 | | - json_schema=declarative_stream.get_json_schema(), |
| 694 | + schema_loader=declarative_stream._schema_loader, # type: ignore # We are accessing the private property but the public one is optional and we will remove this code soonish |
695 | 695 | retriever=retriever, |
696 | 696 | message_repository=self._message_repository, |
697 | 697 | max_records_limit=self._limits.max_records |
@@ -728,7 +728,7 @@ def _group_streams( |
728 | 728 | partition_generator = StreamSlicerPartitionGenerator( |
729 | 729 | partition_factory=DeclarativePartitionFactory( |
730 | 730 | stream_name=declarative_stream.name, |
731 | | - json_schema=declarative_stream.get_json_schema(), |
| 731 | + schema_loader=declarative_stream._schema_loader, # type: ignore # We are accessing the private property but the public one is optional and we will remove this code soonish |
732 | 732 | retriever=retriever, |
733 | 733 | message_repository=self._message_repository, |
734 | 734 | max_records_limit=self._limits.max_records |
@@ -762,7 +762,7 @@ def _group_streams( |
762 | 762 | partition_generator = StreamSlicerPartitionGenerator( |
763 | 763 | DeclarativePartitionFactory( |
764 | 764 | stream_name=declarative_stream.name, |
765 | | - json_schema=declarative_stream.get_json_schema(), |
| 765 | + schema_loader=declarative_stream._schema_loader, # type: ignore # We are accessing the private property but the public one is optional and we will remove this code soonish |
766 | 766 | retriever=declarative_stream.retriever, |
767 | 767 | message_repository=self._message_repository, |
768 | 768 | max_records_limit=self._limits.max_records if self._limits else None, |
@@ -826,7 +826,7 @@ def _group_streams( |
826 | 826 | partition_generator = StreamSlicerPartitionGenerator( |
827 | 827 | DeclarativePartitionFactory( |
828 | 828 | stream_name=declarative_stream.name, |
829 | | - json_schema=declarative_stream.get_json_schema(), |
| 829 | + schema_loader=declarative_stream._schema_loader, # type: ignore # We are accessing the private property but the public one is optional and we will remove this code soonish |
830 | 830 | retriever=retriever, |
831 | 831 | message_repository=self._message_repository, |
832 | 832 | max_records_limit=self._limits.max_records if self._limits else None, |
|
0 commit comments