Skip to content
This repository was archived by the owner on Oct 16, 2024. It is now read-only.

Commit 0c0a864

Browse files
authored
Merge pull request #210 from Yelp/mpiano/CLUSTERMAN-692
2 parents 9e1a795 + cc7f227 commit 0c0a864

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+123
-71
lines changed

clusterman/autoscaler/pool_manager.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,8 +89,7 @@ def reload_state(self, **cluster_connector_kwargs) -> None:
8989
``Autoscaler.run()``.
9090
"""
9191
logger.info("Reloading cluster connector state")
92-
# TODO: update mypy to avoid having to ignore this error (CLUSTERMAN-692)
93-
self.cluster_connector.reload_state(**cluster_connector_kwargs) # type: ignore
92+
self.cluster_connector.reload_state(**cluster_connector_kwargs)
9493

9594
logger.info("Reloading resource groups")
9695
self._reload_resource_groups()

clusterman/aws/auto_scaling_resource_group.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ def _get_launch_template_and_overrides(
199199
template = policy["LaunchTemplate"]["LaunchTemplateSpecification"]
200200
overrides = policy["LaunchTemplate"]["Overrides"]
201201
else:
202-
logger.warn(f"ASG {self.id} is not using LaunchTemplates, it will be unable to do smart scheduling")
202+
logger.warning(f"ASG {self.id} is not using LaunchTemplates, it will be unable to do smart scheduling")
203203
return None, []
204204

205205
launch_template_name = template["LaunchTemplateName"]

clusterman/aws/aws_resource_group.py

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from collections import defaultdict
1818
from socket import gethostbyaddr
1919
from typing import Any
20+
from typing import cast
2021
from typing import Collection
2122
from typing import List
2223
from typing import Mapping
@@ -32,6 +33,7 @@
3233
from clusterman.aws.client import InstanceDict
3334
from clusterman.aws.markets import get_instance_market
3435
from clusterman.aws.markets import InstanceMarket
36+
from clusterman.aws.markets import MarketDict
3537
from clusterman.interfaces.resource_group import InstanceMetadata
3638
from clusterman.interfaces.resource_group import ResourceGroup
3739

@@ -77,7 +79,7 @@ def get_instance_metadatas(self, state_filter: Optional[Collection[str]] = None)
7779
if state_filter and aws_state not in state_filter:
7880
continue
7981

80-
instance_market = get_instance_market(instance_dict)
82+
instance_market = get_instance_market(cast(MarketDict, instance_dict))
8183
instance_ip = instance_dict.get("PrivateIpAddress")
8284
hostname = gethostbyaddr(instance_ip)[0] if instance_ip else None
8385
is_cordoned = self._is_instance_cordoned(instance_dict)
@@ -127,14 +129,16 @@ def terminate_instances_by_id(self, instance_ids: List[str], batch_size: int = 5
127129

128130
instance_weights = {}
129131
for instance in ec2_describe_instances(instance_ids):
130-
instance_market = get_instance_market(instance)
132+
instance_market = get_instance_market(cast(MarketDict, instance))
131133
if not instance_market.az:
132134
logger.warning(
133135
f"Instance {instance['InstanceId']} missing AZ info, likely already terminated so skipping",
134136
)
135137
instance_ids.remove(instance["InstanceId"])
136138
continue
137-
instance_weights[instance["InstanceId"]] = self.market_weight(get_instance_market(instance))
139+
instance_weights[instance["InstanceId"]] = self.market_weight(
140+
get_instance_market(cast(MarketDict, instance))
141+
)
138142

139143
# AWS API recommends not terminating more than 1000 instances at a time, and to
140144
# terminate larger numbers in batches
@@ -186,15 +190,21 @@ def _get_instances_by_market(self):
186190
"""Responses from this API call are cached to prevent hitting any AWS request limits"""
187191
instance_dict: Mapping[InstanceMarket, List[Mapping]] = defaultdict(list)
188192
for instance in ec2_describe_instances(self.instance_ids):
189-
instance_dict[get_instance_market(instance)].append(instance)
193+
instance_dict[get_instance_market(cast(MarketDict, instance))].append(instance)
190194
return instance_dict
191195

192196
@abstractproperty
193197
def _target_capacity(self): # pragma: no cover
194198
pass
195199

196200
@classmethod
197-
def load(cls, cluster: str, pool: str, config: Any, **kwargs: Any) -> Mapping[str, "AWSResourceGroup"]:
201+
def load( # type: ignore # (mypy errors with "incompatible signature with supertype")
202+
cls,
203+
cluster: str,
204+
pool: str,
205+
config: Any,
206+
**kwargs: Any,
207+
) -> Mapping[str, "AWSResourceGroup"]:
198208
"""Load a list of corresponding resource groups
199209
200210
:param cluster: a cluster name

clusterman/aws/client.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353
"PrivateIpAddress": str,
5454
"State": InstanceStateDict,
5555
"LaunchTime": str,
56+
"PrivateDnsName": str,
5657
"Tags": Sequence[Mapping[str, str]],
5758
},
5859
)

clusterman/aws/spot_fleet_resource_group.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ def _target_capacity(self) -> float:
145145
return self._configuration["SpotFleetRequestConfig"]["TargetCapacity"]
146146

147147
@classmethod
148-
def load(
148+
def load( # type: ignore # (mypy errors with "incompatible signature with supertype")
149149
cls,
150150
cluster: str,
151151
pool: str,

clusterman/batch/autoscaler_bootstrap.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@
1616
import time
1717
import traceback
1818
import xmlrpc.client
19+
from typing import Any
20+
from typing import cast
21+
from typing import Mapping
1922

2023
import colorlog
2124
from yelp_batch.batch import batch_command_line_arguments
@@ -56,7 +59,10 @@ def wait_for_process(
5659
while True:
5760
try:
5861
states = [
59-
rpc.supervisor.getProcessInfo(f"{process_name}:{process_name}_{i}")["statename"]
62+
cast(
63+
Mapping[str, Any],
64+
rpc.supervisor.getProcessInfo(f"{process_name}:{process_name}_{i}"),
65+
)["statename"]
6066
for i in range(num_procs)
6167
]
6268
except OSError:

clusterman/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def setup_config(args: argparse.Namespace) -> None:
6161
# different region.
6262
elif cluster:
6363
aws_region = staticconf.read_string(f"clusters.{cluster}.aws_region", default=None)
64-
if pool:
64+
if pool and scheduler:
6565
load_cluster_pool_config(cluster, pool, scheduler, signals_branch_or_tag)
6666

6767
staticconf.DictConfiguration({"aws": {"region": aws_region}})

clusterman/kubernetes/util.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,10 @@
1717
from enum import auto
1818
from enum import Enum
1919
from functools import partial
20+
from typing import Any
21+
from typing import Hashable
2022
from typing import List
23+
from typing import MutableMapping
2124
from typing import Type
2225

2326
import colorlog
@@ -43,7 +46,9 @@
4346
DEFAULT_KUBERNETES_DISK_REQUEST = "0" # Kubernetes doesn't schedule based on disk allocation right now
4447
KUBERNETES_API_CACHE_SIZE = 16
4548
KUBERNETES_API_CACHE_TTL = 60
46-
KUBERNETES_API_CACHE = TTLCache(maxsize=KUBERNETES_API_CACHE_SIZE, ttl=KUBERNETES_API_CACHE_TTL)
49+
KUBERNETES_API_CACHE: MutableMapping[Hashable, Any] = TTLCache(
50+
maxsize=KUBERNETES_API_CACHE_SIZE, ttl=KUBERNETES_API_CACHE_TTL
51+
)
4752
VERSION_MATCH_EXPR = re.compile(r"(\W|^)(?P<release>\d+\.\d+(\.\d+)?)(\W|$)")
4853
logger = colorlog.getLogger(__name__)
4954

clusterman/math/piecewise.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,8 @@ def _merged_breakpoints(
287287
bp1 = zip_longest(fn1.breakpoints.items(), [], fillvalue=1)
288288
yprev0, yprev1 = fn0._initial_value, fn1._initial_value
289289

290-
for (x, y), fnnum in merge(bp0, bp1):
290+
for xy_tuple, fnnum in merge(bp0, bp1):
291+
x, y = cast(Tuple[XValue[T], float], xy_tuple)
291292
if fnnum == 0:
292293
yield x, y, yprev1
293294
yprev0 = y

clusterman/math/piecewise_types.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14+
from typing import Hashable
1415
from typing import TypeVar
1516

1617
from typing_extensions import Protocol
@@ -19,15 +20,15 @@
1920
T = TypeVar("T")
2021

2122

22-
class XValueDiff(Protocol[T]):
23+
class XValueDiff(Protocol[T], Hashable):
2324
def __mul__(self, other: int) -> "XValueDiff[T]":
2425
...
2526

2627
def __truediv__(self, other: "XValueDiff[T]") -> float:
2728
...
2829

2930

30-
class XValue(Protocol[T]):
31+
class XValue(Protocol[T], Hashable):
3132
def __add__(self, other: XValueDiff[T]) -> "XValue[T]":
3233
...
3334

0 commit comments

Comments
 (0)