|
20 | 20 |
|
21 | 21 | import atexit |
22 | 22 | import datetime |
| 23 | +import threading |
23 | 24 | from binascii import hexlify |
24 | 25 | from collections import defaultdict |
25 | 26 | from collections.abc import Mapping |
|
72 | 73 | ExponentialReconnectionPolicy, HostDistance, |
73 | 74 | RetryPolicy, IdentityTranslator, NoSpeculativeExecutionPlan, |
74 | 75 | NoSpeculativeExecutionPolicy, DefaultLoadBalancingPolicy, |
75 | | - NeverRetryPolicy) |
| 76 | + NeverRetryPolicy, ConstantReconnectionPolicy, ReconnectionPolicy, |
| 77 | + ShardReconnectionScope, ShardReconnectionScopeHost, NoDelayReconnectionPolicy) |
76 | 78 | from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler, |
77 | 79 | HostConnectionPool, HostConnection, |
78 | 80 | NoConnectionsAvailable) |
@@ -742,6 +744,32 @@ def auth_provider(self, value): |
742 | 744 |
|
743 | 745 | self._auth_provider = value |
744 | 746 |
|
| 747 | + _shard_reconnection_policy = None |
| 748 | + @property |
| 749 | + def shard_reconnection_policy(self): |
| 750 | + return self._shard_reconnection_policy |
| 751 | + |
| 752 | + @shard_reconnection_policy.setter |
| 753 | + def shard_reconnection_policy(self, srp): |
| 754 | + if self._config_mode == _ConfigMode.PROFILES: |
| 755 | + raise ValueError( |
| 756 | + "Cannot set Cluster.shard_reconnection_policy while using Configuration Profiles. Set this in a profile instead.") |
| 757 | + self._shard_reconnection_policy = srp |
| 758 | + self._config_mode = _ConfigMode.LEGACY |
| 759 | + |
| 760 | + _shard_reconnection_scope = None |
| 761 | + @property |
| 762 | + def shard_reconnection_scope(self): |
| 763 | + return self._shard_reconnection_scope |
| 764 | + |
| 765 | + @shard_reconnection_scope.setter |
| 766 | + def shard_reconnection_scope(self, scope): |
| 767 | + if self._config_mode == _ConfigMode.PROFILES: |
| 768 | + raise ValueError( |
| 769 | + "Cannot set Cluster.shard_reconnection_scope while using Configuration Profiles. Set this in a profile instead.") |
| 770 | + self._shard_reconnection_scope = scope |
| 771 | + self._config_mode = _ConfigMode.LEGACY |
| 772 | + |
745 | 773 | _load_balancing_policy = None |
746 | 774 | @property |
747 | 775 | def load_balancing_policy(self): |
@@ -1204,6 +1232,8 @@ def __init__(self, |
1204 | 1232 | shard_aware_options=None, |
1205 | 1233 | metadata_request_timeout=None, |
1206 | 1234 | column_encryption_policy=None, |
| 1235 | + shard_reconnection_policy=None, |
| 1236 | + shard_reconnection_scope=None, |
1207 | 1237 | ): |
1208 | 1238 | """ |
1209 | 1239 | ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as |
@@ -1309,6 +1339,24 @@ def __init__(self, |
1309 | 1339 | else: |
1310 | 1340 | self._load_balancing_policy = default_lbp_factory() # set internal attribute to avoid committing to legacy config mode |
1311 | 1341 |
|
| 1342 | + if shard_reconnection_scope is not None: |
| 1343 | + if isinstance(shard_reconnection_scope, type): |
| 1344 | + raise TypeError("shard_reconnection_scope should not be a class, it should be an instance of that class") |
| 1345 | + if not isinstance(shard_reconnection_policy, ShardReconnectionScope): |
| 1346 | + raise TypeError("load_balancing_policy should be an instance of class derived from ReconnectionPolicy") |
| 1347 | + self.shard_reconnection_scope = shard_reconnection_scope |
| 1348 | + else: |
| 1349 | + self._shard_reconnection_scope = ShardReconnectionScopeHost() # set internal attribute to avoid committing to legacy config mode |
| 1350 | + |
| 1351 | + if shard_reconnection_policy is not None: |
| 1352 | + if isinstance(shard_reconnection_policy, type): |
| 1353 | + raise TypeError("load_balancing_policy should not be a class, it should be an instance of that class") |
| 1354 | + if not isinstance(shard_reconnection_policy, ReconnectionPolicy): |
| 1355 | + raise TypeError("load_balancing_policy should be an instance of class derived from ReconnectionPolicy") |
| 1356 | + self.shard_reconnection_policy = shard_reconnection_policy |
| 1357 | + else: |
| 1358 | + self._shard_reconnection_policy = ConstantReconnectionPolicy(2, 0) # set internal attribute to avoid committing to legacy config mode |
| 1359 | + |
1312 | 1360 | if reconnection_policy is not None: |
1313 | 1361 | if isinstance(reconnection_policy, type): |
1314 | 1362 | raise TypeError("reconnection_policy should not be a class, it should be an instance of that class") |
@@ -2707,6 +2755,11 @@ def __init__(self, cluster, hosts, keyspace=None): |
2707 | 2755 | self._protocol_version = self.cluster.protocol_version |
2708 | 2756 |
|
2709 | 2757 | self.encoder = Encoder() |
| 2758 | + if isinstance(cluster.shard_reconnection_policy, NoDelayReconnectionPolicy): |
| 2759 | + self.shard_reconnection_scheduler = NoDelayShardReconnectionScheduler(self) |
| 2760 | + else: |
| 2761 | + self.shard_reconnection_scheduler = ShardReconnectionScheduler( |
| 2762 | + self, cluster, cluster.shard_reconnection_scope, cluster.shard_reconnection_policy) |
2710 | 2763 |
|
2711 | 2764 | # create connection pools in parallel |
2712 | 2765 | self._initial_connect_futures = set() |
@@ -3546,6 +3599,141 @@ class UserTypeDoesNotExist(Exception): |
3546 | 3599 | pass |
3547 | 3600 |
|
3548 | 3601 |
|
| 3602 | +class ScopeBucket(object): |
| 3603 | + def __init__(self, session, shard_reconnection_policy): |
| 3604 | + self._items = [] |
| 3605 | + self.last_run = None |
| 3606 | + self.session = session |
| 3607 | + self.policy = shard_reconnection_policy |
| 3608 | + self.lock = threading.Lock() |
| 3609 | + self.running = False |
| 3610 | + self.schedule = self.policy.new_schedule() |
| 3611 | + |
| 3612 | + def add(self, method, *args, **kwargs): |
| 3613 | + with self.lock: |
| 3614 | + self._items.append([method, args, kwargs]) |
| 3615 | + if not self.running: |
| 3616 | + self.running = True |
| 3617 | + self._schedule() |
| 3618 | + |
| 3619 | + def _get_delay(self): |
| 3620 | + try: |
| 3621 | + return next(self.schedule) |
| 3622 | + except StopIteration: |
| 3623 | + self.schedule = self.policy.new_schedule() |
| 3624 | + return next(self.schedule) |
| 3625 | + |
| 3626 | + def _schedule(self): |
| 3627 | + if self.session.is_shutdown: |
| 3628 | + return |
| 3629 | + delay = self._get_delay() |
| 3630 | + if delay: |
| 3631 | + self.session.cluster.scheduler.schedule(delay, self.run) |
| 3632 | + else: |
| 3633 | + self.session.submit(self.run) |
| 3634 | + |
| 3635 | + def run(self): |
| 3636 | + if self.session.is_shutdown: |
| 3637 | + return |
| 3638 | + |
| 3639 | + with self.lock: |
| 3640 | + try: |
| 3641 | + item = self._items.pop() |
| 3642 | + except IndexError: |
| 3643 | + self.running = False |
| 3644 | + return |
| 3645 | + |
| 3646 | + method, args, kwargs = item |
| 3647 | + try: |
| 3648 | + method(*args, **kwargs) |
| 3649 | + finally: |
| 3650 | + self._schedule() |
| 3651 | + |
| 3652 | + |
| 3653 | +class ShardReconnectionSchedulerBase(object): |
| 3654 | + def schedule(self, host_id, shard_id, method, *args, **kwargs): |
| 3655 | + raise NotImplementedError() |
| 3656 | + |
| 3657 | + def forced_schedule(self, host_id, shard_id, method, *args, **kwargs): |
| 3658 | + raise NotImplementedError() |
| 3659 | + |
| 3660 | + |
| 3661 | +class NoDelayShardReconnectionScheduler(ShardReconnectionSchedulerBase): |
| 3662 | + def __init__(self, session): |
| 3663 | + self.session = weakref.proxy(session) |
| 3664 | + self.already_scheduled = {} |
| 3665 | + |
| 3666 | + def _execute(self, scheduled_key, method, *args, **kwargs): |
| 3667 | + try: |
| 3668 | + method(*args, **kwargs) |
| 3669 | + finally: |
| 3670 | + self.already_scheduled[scheduled_key] = False |
| 3671 | + |
| 3672 | + def forced_schedule(self, host_id, shard_id, method, *args, **kwargs): |
| 3673 | + scheduled_key = f'{host_id}-{shard_id}' |
| 3674 | + self.already_scheduled[scheduled_key] = True |
| 3675 | + |
| 3676 | + if not self.session.is_shutdown: |
| 3677 | + self.session.submit(self._execute, scheduled_key, method, *args, **kwargs) |
| 3678 | + |
| 3679 | + def schedule(self, host_id, shard_id, method, *args, **kwargs): |
| 3680 | + scheduled_key = f'{host_id}-{shard_id}' |
| 3681 | + if self.already_scheduled.get(scheduled_key): |
| 3682 | + return |
| 3683 | + |
| 3684 | + self.already_scheduled[scheduled_key] = True |
| 3685 | + if not self.session.is_shutdown: |
| 3686 | + self.session.submit(self._execute, scheduled_key, method, *args, **kwargs) |
| 3687 | + |
| 3688 | + |
| 3689 | +class ShardReconnectionScheduler(ShardReconnectionSchedulerBase): |
| 3690 | + def __init__(self, session, cluster, shard_reconnection_scope, shard_reconnection_policy): |
| 3691 | + self.already_scheduled = {} |
| 3692 | + self.scopes = {} |
| 3693 | + self.session = weakref.proxy(session) |
| 3694 | + self.cluster = weakref.proxy(cluster) |
| 3695 | + self.shard_reconnection_scope = shard_reconnection_scope |
| 3696 | + self.shard_reconnection_policy = shard_reconnection_policy |
| 3697 | + self.lock = threading.Lock() |
| 3698 | + |
| 3699 | + def _execute(self, scheduled_key, method, *args, **kwargs): |
| 3700 | + try: |
| 3701 | + method(*args, **kwargs) |
| 3702 | + finally: |
| 3703 | + with self.lock: |
| 3704 | + self.already_scheduled[scheduled_key] = False |
| 3705 | + |
| 3706 | + def forced_schedule(self, host_id, shard_id, method, *args, **kwargs): |
| 3707 | + scope_id = self.shard_reconnection_scope.get_hash(self.cluster, host_id, shard_id) |
| 3708 | + scheduled_key = f'{host_id}-{shard_id}' |
| 3709 | + |
| 3710 | + with self.lock: |
| 3711 | + self.already_scheduled[scheduled_key] = True |
| 3712 | + |
| 3713 | + scope_info = self.scopes.get(scope_id, 0) |
| 3714 | + if not scope_info: |
| 3715 | + scope_info = ScopeBucket(self.session, self.shard_reconnection_policy) |
| 3716 | + self.scopes[scope_id] = scope_info |
| 3717 | + scope_info.add(self._execute, scheduled_key, method,*args, **kwargs) |
| 3718 | + return True |
| 3719 | + |
| 3720 | + def schedule(self, host_id, shard_id, method, *args, **kwargs): |
| 3721 | + scope_id = self.shard_reconnection_scope.get_hash(self.cluster, host_id, shard_id) |
| 3722 | + scheduled_key = f'{host_id}-{shard_id}' |
| 3723 | + |
| 3724 | + with self.lock: |
| 3725 | + if self.already_scheduled.get(scheduled_key): |
| 3726 | + return False |
| 3727 | + self.already_scheduled[scheduled_key] = True |
| 3728 | + |
| 3729 | + scope_info = self.scopes.get(scope_id, 0) |
| 3730 | + if not scope_info: |
| 3731 | + scope_info = ScopeBucket(self.session, self.shard_reconnection_policy) |
| 3732 | + self.scopes[scope_id] = scope_info |
| 3733 | + scope_info.add(self._execute, scheduled_key, method,*args, **kwargs) |
| 3734 | + return True |
| 3735 | + |
| 3736 | + |
3549 | 3737 | class _ControlReconnectionHandler(_ReconnectionHandler): |
3550 | 3738 | """ |
3551 | 3739 | Internal |
@@ -4432,6 +4620,9 @@ def shutdown(self): |
4432 | 4620 | self._queue.put_nowait((0, 0, None)) |
4433 | 4621 | self.join() |
4434 | 4622 |
|
| 4623 | + def empty(self): |
| 4624 | + return len(self._scheduled_tasks) == 0 and self._queue.empty() |
| 4625 | + |
4435 | 4626 | def schedule(self, delay, fn, *args, **kwargs): |
4436 | 4627 | self._insert_task(delay, (fn, args, tuple(kwargs.items()))) |
4437 | 4628 |
|
|
0 commit comments