Skip to content

Commit 6108a0b

Browse files
Thomas Polasekfacebook-github-bot
authored andcommitted
Convert directory fbcode/executorch to use the Ruff Formatter (#6232)
Summary: Pull Request resolved: #6232 Converts the directory specified to use the Ruff formatter in pyfmt ruff_dog If this diff causes merge conflicts when rebasing, please run `hg status -n -0 --change . -I '**/*.{py,pyi}' | xargs -0 arc pyfmt` on your diff, and amend any changes before rebasing onto latest. That should help reduce or eliminate any merge conflicts. allow-large-files bypass-github-export-checks Reviewed By: amyreese Differential Revision: D64264425 fbshipit-source-id: 81ddaf7b0d00a85f51b3ad6173bb0825eee33079
1 parent 55ed63f commit 6108a0b

File tree

6 files changed

+17
-3
lines changed

6 files changed

+17
-3
lines changed

examples/apple/coreml/scripts/export.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,10 @@
1414

1515
import torch
1616

17+
# pyre-fixme[21]: Could not find module `executorch.backends.apple.coreml.compiler`.
1718
from executorch.backends.apple.coreml.compiler import CoreMLBackend
1819

20+
# pyre-fixme[21]: Could not find module `executorch.backends.apple.coreml.partition`.
1921
from executorch.backends.apple.coreml.partition import CoreMLPartitioner
2022
from executorch.devtools.etrecord import generate_etrecord
2123
from executorch.exir import to_edge
@@ -76,6 +78,7 @@ def parse_args() -> argparse.ArgumentParser:
7678
parser.add_argument("--save_processed_bytes", action=argparse.BooleanOptionalAction)
7779

7880
args = parser.parse_args()
81+
# pyre-fixme[7]: Expected `ArgumentParser` but got `Namespace`.
7982
return args
8083

8184

examples/models/llama2/export_llama_lib.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -650,6 +650,7 @@ def _export_llama(modelname, args) -> LLMEdgeManager: # noqa: C901
650650
if args.num_sharding > 0:
651651
model_sharding.split_graph(
652652
builder_exported_to_edge.edge_manager.exported_program(),
653+
# pyre-fixme[16]: `Optional` has no attribute `__getitem__`.
653654
builder_exported_to_edge.metadata["get_n_layers"],
654655
shares=args.num_sharding,
655656
)
@@ -669,6 +670,7 @@ def _export_llama(modelname, args) -> LLMEdgeManager: # noqa: C901
669670
if args.num_sharding > 0 and args.qnn:
670671
from executorch.backends.qualcomm.utils.utils import canonicalize_program
671672

673+
# pyre-fixme[16]: Module `backends` has no attribute `qualcomm`.
672674
canonicalize_program(builder.edge_manager.exported_program())
673675

674676
builder = builder.to_executorch()
@@ -686,6 +688,7 @@ def _export_llama(modelname, args) -> LLMEdgeManager: # noqa: C901
686688
if args.num_sharding > 0 and args.qnn:
687689
from executorch.backends.qualcomm.utils.utils import canonicalize_program
688690

691+
# pyre-fixme[16]: Module `backends` has no attribute `qualcomm`.
689692
canonicalize_program(builder.edge_manager.exported_program())
690693

691694
builder = builder.to_executorch()
@@ -912,7 +915,6 @@ def _get_source_transforms( # noqa
912915

913916
if args.use_kv_cache:
914917
if args.qnn:
915-
# pyre-ignore: Undefined import [21]: Could not find a module corresponding to import `executorch.backends.qualcomm.utils.utils`
916918
from executorch.backends.qualcomm.utils.utils import (
917919
convert_linear_to_conv2d,
918920
)
@@ -924,6 +926,7 @@ def _get_source_transforms( # noqa
924926
if args.optimized_rotation_path:
925927
transforms.append(fuse_layer_norms)
926928
transforms.append(get_model_with_r1_r2(args.optimized_rotation_path))
929+
# pyre-fixme[16]: Module `backends` has no attribute `qualcomm`.
927930
transforms.append(convert_linear_to_conv2d)
928931

929932
elif args.mps:

examples/models/phi-3-mini/phi_3_mini.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,22 @@ def __init__(self, model: Phi3ForCausalLM, max_batch_size: int, max_seq_len: int
1717
super().__init__()
1818
self.model = model
1919
self.cache = ETStaticCache(
20+
# pyre-fixme[16]: `Phi3ForCausalLM` has no attribute `config`.
2021
config=model.config,
2122
max_batch_size=max_batch_size,
2223
max_cache_len=max_seq_len,
24+
# pyre-fixme[16]: `Phi3ForCausalLM` has no attribute `device`.
2325
device=self.model.device,
26+
# pyre-fixme[16]: `Phi3ForCausalLM` has no attribute `dtype`.
2427
dtype=self.model.dtype,
2528
)
2629

2730
def forward(
2831
self,
32+
# pyre-fixme[9]: input_ids has type `LongTensor`; used as `None`.
2933
input_ids: torch.LongTensor = None,
3034
) -> torch.FloatTensor:
35+
# pyre-fixme[16]: `Phi3ForCausalLM` has no attribute `forward`.
3136
return self.model.forward(
3237
input_ids=input_ids,
3338
use_cache=True,

examples/models/phi-3-mini/static_cache.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ def __init__(
3434
)
3535

3636
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
37+
# pyre-fixme[16]: `ETStaticCache` has no attribute `key_cache`.
3738
return (self.key_cache[layer_idx][0, 0].any(dim=-1)).sum().item()
3839

3940
def get_usable_length(

extension/llm/export/builder.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,10 +181,11 @@ def capture_pre_autograd_graph(self) -> "LLMEdgeManager":
181181
# 1. torch.nn.attention.sdpa_kernel([SDPBackend.MATH]) is for bypassing the dynamo error when tracing
182182
# 2. torch.no_grad() is for getting rid of the dropout (not sure why training ops will show up)
183183
with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]), torch.no_grad():
184-
# pyre-fixme[8]
185184
if hasattr(self.args, "qnn") and self.args.qnn:
186185
# TODO: this is temporary and export_for_training doesn't work with qnn either. We need a
187186
# functional graph. See issue https://github.com/pytorch/executorch/pull/4627 for more details
187+
# pyre-fixme[8]: Attribute has type `Optional[GraphModule]`; used as
188+
# `Module`.
188189
self.pre_autograd_graph_module = torch.export.export(
189190
self.model,
190191
self.example_inputs,
@@ -193,6 +194,8 @@ def capture_pre_autograd_graph(self) -> "LLMEdgeManager":
193194
strict=True,
194195
).module()
195196
else:
197+
# pyre-fixme[8]: Attribute has type `Optional[GraphModule]`; used as
198+
# `Module`.
196199
self.pre_autograd_graph_module = export_for_training(
197200
self.model,
198201
self.example_inputs,

extension/llm/tokenizer/utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
66

7-
# pyre-ignore: Undefined import [21]: Could not find a module corresponding to import `executorch.examples.models.llama2.tokenizer.tiktoken`.
87
from executorch.examples.models.llama2.tokenizer.tiktoken import Tokenizer as Tiktoken
98
from executorch.extension.llm.tokenizer.tokenizer import (
109
Tokenizer as SentencePieceTokenizer,

0 commit comments

Comments
 (0)