Skip to content

Commit a1dad2f

Browse files
Skylion007pytorchmergebot
authored andcommitted
[BE][Ez]: Autotype torch/profiler with ruff ANN (pytorch#157923)
Apply ruff autotyping fixes to add annotations to torch profiler Pull Request resolved: pytorch#157923 Approved by: https://github.com/albanD, https://github.com/sraikund16
1 parent 53ab730 commit a1dad2f

File tree

5 files changed

+60
-57
lines changed

5 files changed

+60
-57
lines changed

torch/profiler/_memory_profiler.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -514,7 +514,7 @@ def __init__(self, op_tree: OpTree) -> None:
514514
def flow_nodes(self) -> tuple[DataFlowNode, ...]:
515515
return tuple(self._flow_nodes)
516516

517-
def validate(self):
517+
def validate(self) -> None:
518518
# Check that each (Tensor, version) pair has a unique creation node
519519
outputs: set[tuple[TensorKey, int]] = set()
520520
for node in self.flow_nodes:
@@ -964,7 +964,7 @@ def _set_optimizer_state(self) -> None:
964964
if key is not None:
965965
self._categories.set_by_id(key, Category.OPTIMIZER_STATE)
966966

967-
def _set_autograd_detail(self):
967+
def _set_autograd_detail(self) -> None:
968968
prior = {None, Category.AUTOGRAD_DETAIL}
969969
for node in self._data_flow_graph.flow_nodes:
970970
if RecordScope.BACKWARD_FUNCTION in get_scopes(node._event):
@@ -976,7 +976,7 @@ def _set_autograd_detail(self):
976976

977977

978978
class MemoryProfileTimeline:
979-
def __init__(self, memory_profile):
979+
def __init__(self, memory_profile) -> None:
980980
"""The minimum representation of the memory profile timeline
981981
includes the memory timeline and categories. The timeline
982982
consists of [timestamp, action, (TensorKey, version), numbytes]
@@ -999,7 +999,7 @@ def _coalesce_timeline(self, device_str):
999999
times: list[int] = []
10001000
sizes: list[list[int]] = []
10011001

1002-
def update(key, version, delta):
1002+
def update(key, version, delta) -> None:
10031003
category = (
10041004
self.categories.get(key, version)
10051005
if isinstance(key, TensorKey)

torch/profiler/_pattern_matcher.py

Lines changed: 25 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ class Pattern:
2626
In subclass, define description and skip property.
2727
"""
2828

29-
def __init__(self, prof: profile, should_benchmark: bool = False):
29+
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
3030
self.prof = prof
3131
self.should_benchmark = should_benchmark
3232
self.name = "Please specify a name for pattern"
@@ -39,7 +39,7 @@ def __init__(self, prof: profile, should_benchmark: bool = False):
3939
self.tid_root.setdefault(event.start_tid, []).append(event)
4040

4141
@property
42-
def skip(self):
42+
def skip(self) -> bool:
4343
return False
4444

4545
def report(self, event: _ProfilerEvent):
@@ -66,8 +66,8 @@ def summary(self, events: list[_ProfilerEvent]):
6666
)
6767
return default_summary
6868

69-
def benchmark_summary(self, events: list[_ProfilerEvent]):
70-
def format_time(time_ns: int):
69+
def benchmark_summary(self, events: list[_ProfilerEvent]) -> str:
70+
def format_time(time_ns: int) -> str:
7171
unit_lst = ["ns", "us", "ms"]
7272
for unit in unit_lst:
7373
if time_ns < 1000:
@@ -135,7 +135,9 @@ def go_up_until(self, event: _ProfilerEvent, predicate):
135135

136136

137137
class NamePattern(Pattern):
138-
def __init__(self, prof: profile, name: str, should_benchmark: bool = False):
138+
def __init__(
139+
self, prof: profile, name: str, should_benchmark: bool = False
140+
) -> None:
139141
super().__init__(prof, should_benchmark)
140142
self.description = f"Matched Name Event: {name}"
141143
self.name = name
@@ -161,7 +163,7 @@ class ExtraCUDACopyPattern(Pattern):
161163
If at any step we failed, it is not a match.
162164
"""
163165

164-
def __init__(self, prof: profile, should_benchmark: bool = False):
166+
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
165167
super().__init__(prof, should_benchmark)
166168
self.name = "Extra CUDA Copy Pattern"
167169
self.description = "Filled a CPU tensor and immediately moved it to GPU. Please initialize it on GPU."
@@ -174,7 +176,7 @@ def __init__(self, prof: profile, should_benchmark: bool = False):
174176
}
175177

176178
@property
177-
def skip(self):
179+
def skip(self) -> bool:
178180
return not self.prof.with_stack or not self.prof.record_shapes
179181

180182
def match(self, event):
@@ -248,7 +250,7 @@ class ForLoopIndexingPattern(Pattern):
248250
We also keep a dictionary to avoid duplicate match in the for loop.
249251
"""
250252

251-
def __init__(self, prof: profile, should_benchmark: bool = False):
253+
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
252254
super().__init__(prof, should_benchmark)
253255
self.name = "For Loop Indexing Pattern"
254256
self.description = "For loop indexing detected. Vectorization recommended."
@@ -271,7 +273,7 @@ def match(self, event: _ProfilerEvent):
271273
return False
272274

273275
# Custom event list matching
274-
def same_ops(list1, list2):
276+
def same_ops(list1, list2) -> bool:
275277
if len(list1) != len(list2):
276278
return False
277279
for op1, op2 in zip(list1, list2):
@@ -295,7 +297,7 @@ def same_ops(list1, list2):
295297

296298

297299
class FP32MatMulPattern(Pattern):
298-
def __init__(self, prof: profile, should_benchmark: bool = False):
300+
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
299301
super().__init__(prof, should_benchmark)
300302
self.name = "FP32 MatMul Pattern"
301303
self.description = (
@@ -316,7 +318,7 @@ def skip(self):
316318
)
317319
return has_tf32 is False or super().skip or not self.prof.record_shapes
318320

319-
def match(self, event: _ProfilerEvent):
321+
def match(self, event: _ProfilerEvent) -> bool:
320322
# If we saw this pattern once, we don't need to match it again
321323
if event.tag != _EventType.TorchOp:
322324
return False
@@ -365,7 +367,7 @@ class OptimizerSingleTensorPattern(Pattern):
365367
String match
366368
"""
367369

368-
def __init__(self, prof: profile, should_benchmark: bool = False):
370+
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
369371
super().__init__(prof, should_benchmark)
370372
self.name = "Optimizer Single Tensor Pattern"
371373
self.optimizers_with_foreach = ["adam", "sgd", "adamw"]
@@ -375,7 +377,7 @@ def __init__(self, prof: profile, should_benchmark: bool = False):
375377
)
376378
self.url = ""
377379

378-
def match(self, event: _ProfilerEvent):
380+
def match(self, event: _ProfilerEvent) -> bool:
379381
for optimizer in self.optimizers_with_foreach:
380382
if event.name.endswith(f"_single_tensor_{optimizer}"):
381383
return True
@@ -400,7 +402,7 @@ class SynchronizedDataLoaderPattern(Pattern):
400402
401403
"""
402404

403-
def __init__(self, prof: profile, should_benchmark: bool = False):
405+
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
404406
super().__init__(prof, should_benchmark)
405407
self.name = "Synchronized DataLoader Pattern"
406408
self.description = (
@@ -412,7 +414,7 @@ def __init__(self, prof: profile, should_benchmark: bool = False):
412414
"#enable-async-data-loading-and-augmentation"
413415
)
414416

415-
def match(self, event: _ProfilerEvent):
417+
def match(self, event: _ProfilerEvent) -> bool:
416418
def is_dataloader_function(name: str, function_name: str):
417419
return name.startswith(
418420
os.path.join("torch", "utils", "data", "dataloader.py")
@@ -459,7 +461,7 @@ class GradNotSetToNonePattern(Pattern):
459461
String match
460462
"""
461463

462-
def __init__(self, prof: profile, should_benchmark: bool = False):
464+
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
463465
super().__init__(prof, should_benchmark)
464466
self.name = "Gradient Set To Zero Instead of None Pattern"
465467
self.description = (
@@ -471,7 +473,7 @@ def __init__(self, prof: profile, should_benchmark: bool = False):
471473
"#disable-gradient-calculation-for-validation-or-inference"
472474
)
473475

474-
def match(self, event: _ProfilerEvent):
476+
def match(self, event: _ProfilerEvent) -> bool:
475477
if not event.name.endswith(": zero_grad"):
476478
return False
477479
if not event.children:
@@ -500,7 +502,7 @@ class Conv2dBiasFollowedByBatchNorm2dPattern(Pattern):
500502
String match
501503
"""
502504

503-
def __init__(self, prof: profile, should_benchmark: bool = False):
505+
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
504506
super().__init__(prof, should_benchmark)
505507
self.name = "Enabling Bias in Conv2d Followed By BatchNorm Pattern"
506508
self.description = "Detected bias enabled in Conv2d that is followed by BatchNorm2d. Please set 'bias=False' in Conv2d."
@@ -531,17 +533,17 @@ def match(self, event: _ProfilerEvent):
531533

532534

533535
class MatMulDimInFP16Pattern(Pattern):
534-
def __init__(self, prof: profile, should_benchmark: bool = False):
536+
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
535537
super().__init__(prof, should_benchmark)
536538
self.name = "Matrix Multiplication Dimension Not Aligned Pattern"
537539
self.description = "Detected matmul with dimension not aligned. Please use matmul with aligned dimension."
538540
self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#use-mixed-precision-and-amp"
539541

540542
@property
541-
def skip(self):
543+
def skip(self) -> bool:
542544
return not self.prof.with_stack or not self.prof.record_shapes
543545

544-
def match(self, event: _ProfilerEvent):
546+
def match(self, event: _ProfilerEvent) -> bool:
545547
def mutiple_of(shapes, multiple):
546548
return all(dim % multiple == 0 for shape in shapes for dim in shape[-2:])
547549

@@ -584,7 +586,7 @@ def closest_multiple(shapes, multiple):
584586
return shapes_factor_map
585587

586588

587-
def source_code_location(event: Optional[_ProfilerEvent]):
589+
def source_code_location(event: Optional[_ProfilerEvent]) -> str:
588590
while event:
589591
if event.tag == _EventType.PyCall or event.tag == _EventType.PyCCall:
590592
assert isinstance(
@@ -611,7 +613,7 @@ def report_all_anti_patterns(
611613
should_benchmark: bool = False,
612614
print_enable: bool = True,
613615
json_report_dir: Optional[str] = None,
614-
):
616+
) -> None:
615617
report_dict: dict = {}
616618
anti_patterns = [
617619
ExtraCUDACopyPattern(prof, should_benchmark),

torch/profiler/_utils.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ class Interval:
5252

5353

5454
class EventKey:
55-
def __init__(self, event):
55+
def __init__(self, event) -> None:
5656
self.event = event
5757

5858
def __hash__(self):
@@ -61,7 +61,7 @@ def __hash__(self):
6161
def __eq__(self, other):
6262
return self.event.id == other.event.id
6363

64-
def __repr__(self):
64+
def __repr__(self) -> str:
6565
return f"{self.event.name}"
6666

6767
def intervals_overlap(self, intervals: list[Interval]):
@@ -98,7 +98,7 @@ def intervals_overlap(self, intervals: list[Interval]):
9898

9999

100100
class BasicEvaluation:
101-
def __init__(self, prof: profile):
101+
def __init__(self, prof: profile) -> None:
102102
self.profile = prof
103103
self.metrics: dict[EventKey, EventMetrics] = {}
104104
self.compute_self_time()
@@ -110,7 +110,7 @@ def __init__(self, prof: profile):
110110
self.queue_depth_list = self.compute_queue_depth()
111111
self.compute_idle_time()
112112

113-
def compute_self_time(self):
113+
def compute_self_time(self) -> None:
114114
"""
115115
Computes event's self time(total time - time in child ops).
116116
"""
@@ -234,7 +234,7 @@ def new_old_event_comparator(event):
234234

235235
return queue_depth_list
236236

237-
def compute_idle_time(self):
237+
def compute_idle_time(self) -> None:
238238
"""
239239
Computes idle time of the profile.
240240
"""
@@ -386,7 +386,7 @@ def source_code_location(event):
386386
# https://github.com/pytorch/pytorch/issues/75504
387387
# TODO(dberard) - deprecate / remove workaround for CUDA >= 12, when
388388
# we stop supporting older CUDA versions.
389-
def _init_for_cuda_graphs():
389+
def _init_for_cuda_graphs() -> None:
390390
from torch.autograd.profiler import profile
391391

392392
with profile():

torch/profiler/itt.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
# mypy: allow-untyped-defs
22
from contextlib import contextmanager
3+
from typing import NoReturn
34

45

56
try:
@@ -8,13 +9,13 @@
89

910
class _ITTStub:
1011
@staticmethod
11-
def _fail(*args, **kwargs):
12+
def _fail(*args, **kwargs) -> NoReturn:
1213
raise RuntimeError(
1314
"ITT functions not installed. Are you sure you have a ITT build?"
1415
)
1516

1617
@staticmethod
17-
def is_available():
18+
def is_available() -> bool:
1819
return False
1920

2021
rangePush = _fail

0 commit comments

Comments
 (0)