Skip to content

Commit c8a633a

Browse files
authored
sync torchx .pyre_configuration.internal with external config and upg… (pytorch#986) (pytorch#986)
Summary: There are some issues with Pyre versions in github CI, and at the moment we aren't getting type errors. Making a PR to (a) bump to Ubuntu 24.04, because the root cause is a glibc issue (b) upgrade to the 2024-11-25 release (c) suppress errors I'm making a fresh PR because the original one doesn't seem to trigger github CI (maybe because it was exported from a diff) Test Plan: Ran github CI, the problem is fixed. Reviewed By: jesszzzz Differential Revision: D66994745 Pulled By: stroxler
1 parent 5c2db0e commit c8a633a

File tree

7 files changed

+14
-8
lines changed

7 files changed

+14
-8
lines changed

.github/workflows/pyre.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ on:
88

99
jobs:
1010
pyre:
11-
runs-on: ubuntu-20.04
11+
runs-on: ubuntu-24.04
1212
steps:
1313
- name: Setup Python
1414
uses: actions/setup-python@v2

torchx/examples/apps/lightning/data.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,17 +64,17 @@ def __len__(self) -> int:
6464
# our trainer and other components that need to load data.
6565

6666

67-
# pyre-fixme[13]: Attribute `test_ds` is never initialized.
68-
# pyre-fixme[13]: Attribute `train_ds` is never initialized.
69-
# pyre-fixme[13]: Attribute `val_ds` is never initialized.
7067
class TinyImageNetDataModule(pl.LightningDataModule):
7168
"""
7269
TinyImageNetDataModule is a pytorch LightningDataModule for the tiny
7370
imagenet dataset.
7471
"""
7572

73+
# pyre-fixme[13]: Attribute `test_ds` is never initialized.
7674
train_ds: ImageFolderSamplesDataset
75+
# pyre-fixme[13]: Attribute `train_ds` is never initialized.
7776
val_ds: ImageFolderSamplesDataset
77+
# pyre-fixme[13]: Attribute `val_ds` is never initialized.
7878
test_ds: ImageFolderSamplesDataset
7979

8080
def __init__(

torchx/examples/apps/tracker/main.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,8 @@ def test(
9999
for data, target in test_loader:
100100
data, target = data.to(device), target.to(device)
101101
output = model(data)
102+
# pyre-fixme[58]: `+` is not supported for operand types `int` and
103+
# `Union[bool, float, int]`.
102104
test_loss += F.nll_loss(
103105
output, target, reduction="sum"
104106
).item() # sum up batch loss

torchx/pipelines/kfp/adapter.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,9 @@ def component_spec_from_app(app: api.AppDef) -> Tuple[str, api.Role]:
5050

5151
role = app.roles[0]
5252
assert (
53-
role.num_replicas == 1
53+
role.num_replicas
54+
== 1
55+
# pyre-fixme[16]: `AppDef` has no attribute `num_replicas`.
5456
), f"KFP adapter only supports one replica, got {app.num_replicas}"
5557

5658
command = [role.entrypoint, *role.args]

torchx/schedulers/aws_batch_scheduler.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -809,6 +809,8 @@ def _stream_events(
809809
startFromHead=True,
810810
**args,
811811
)
812+
# pyre-fixme[66]: Exception handler type annotation `unknown` must
813+
# extend BaseException.
812814
except self._log_client.exceptions.ResourceNotFoundException:
813815
return [] # noqa: B901
814816
if response["nextForwardToken"] == next_token:

torchx/schedulers/aws_sagemaker_scheduler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -267,9 +267,9 @@ def _submit_dryrun(
267267
raise ValueError(
268268
f"{key} is controlled by aws_sagemaker_scheduler and is set to {job_def[key]}"
269269
)
270-
value = cfg.get(key) # pyre-ignore[26]
270+
value = cfg.get(key) # type: ignore
271271
if value is not None:
272-
job_def[key] = value
272+
job_def[key] = value # type: ignore
273273

274274
req = AWSSageMakerJob(
275275
job_name=job_name,

torchx/schedulers/ray/ray_driver.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def load_actor_json(filename: str) -> List[RayActor]:
116116
return actors
117117

118118

119-
def create_placement_group_async(replicas: List[RayActor]) -> PlacementGroup:
119+
def create_placement_group_async(replicas: List[RayActor]) -> PlacementGroup: # type: ignore
120120
"""return a placement group reference, the corresponding placement group could be scheduled or pending"""
121121
bundles = []
122122
for replica in replicas:

0 commit comments

Comments
 (0)