Skip to content

Commit fc65000

Browse files
Zonglin Pengfacebook-github-bot
authored andcommitted
remove internal names
Summary: jarvis -> meta Differential Revision: D66243117
1 parent 7473851 commit fc65000

File tree

4 files changed

+19
-21
lines changed

4 files changed

+19
-21
lines changed

backends/cadence/aot/pass_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def get_all_available_cadence_passes() -> Set[ExportPass]:
5050
return set(ALL_CADENCE_PASSES.keys())
5151

5252

53-
# Create a new filter to filter out relevant passes from all Jarvis passes.
53+
# Create a new filter to filter out relevant passes from all passes.
5454
def create_cadence_pass_filter(
5555
opt_level: int, debug: bool = False
5656
) -> Callable[[ExportPass], bool]:

backends/cadence/aot/passes.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
@register_cadence_pass(CadencePassAttribute(opt_level=0))
4141
class InitializePipeline(ExportPass):
4242
"""
43-
Initialize the Jarvis pipeline. This should invariably be the first pass to
43+
Initialize the pass pipeline. This should invariably be the first pass to
4444
run.
4545
"""
4646

@@ -54,7 +54,7 @@ def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
5454
@register_cadence_pass(CadencePassAttribute(opt_level=0))
5555
class FinalizePipeline(ExportPass):
5656
"""
57-
The final cleanup pass after running the Jarvis pipeline.
57+
The final cleanup pass after running the pass pipeline.
5858
"""
5959

6060
def call(self, graph_module: torch.fx.GraphModule) -> PassResult:

backends/cadence/aot/utils.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -124,29 +124,29 @@ def get_ops_count(graph_module: torch.fx.GraphModule) -> Dict[str, int]:
124124

125125

126126
# Print the ops and how many times they occur multiple graph modules:
127-
# from export, from to_edge, and from Jarvis. Print the available
127+
# from export, from to_edge, and from final. Print the available
128128
# implementations for each op, and error out if the op is not supported.
129129
def print_ops_info(
130130
to_edge_gm: torch.fx.GraphModule,
131-
jarvis_gm: torch.fx.GraphModule,
131+
final_gm: torch.fx.GraphModule,
132132
) -> None:
133133
to_edge_ops_count = get_ops_count(to_edge_gm)
134-
jarvis_ops_count = get_ops_count(jarvis_gm)
134+
final_ops_count = get_ops_count(final_gm)
135135

136136
removed_ops = []
137137
# Get the counts of the ops that are removed from the final graph
138138
for k in to_edge_ops_count:
139-
if k not in jarvis_ops_count:
139+
if k not in final_ops_count:
140140
removed_ops.append(k)
141141

142142
# Create a dict of ops and their counts to pass to tabulate
143143
ops_count = [
144144
[
145145
op,
146-
jarvis_ops_count[op],
146+
final_ops_count[op],
147147
to_edge_ops_count[op] if op in to_edge_ops_count else 0,
148148
]
149-
for op in jarvis_ops_count
149+
for op in final_ops_count
150150
]
151151
sorted_ops_count = sorted(ops_count, key=lambda x: x[1], reverse=True)
152152

@@ -166,7 +166,7 @@ def print_ops_info(
166166
sorted_ops_count,
167167
headers=[
168168
"Final Operators ", # one character longer than the longest op name
169-
"Jarvis (Final) Graph",
169+
"Final Graph",
170170
"To_edge Graph",
171171
"Export Graph",
172172
],
@@ -181,7 +181,7 @@ def print_ops_info(
181181
removed_ops_count,
182182
headers=[
183183
"Deleted Operators ", # one character longer than the longest op name
184-
"Jarvis (Final) Graph",
184+
"Final Graph",
185185
"To_edge Graph",
186186
"Export Graph",
187187
],

backends/cadence/runtime/runtime.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
from torch.utils._pytree import TreeSpec
2929

3030

31-
class JarvisETDump:
31+
class CadenceETDump:
3232
def __init__(self, output_dir: str) -> None:
3333
self.tensor_dump_dir: str = os.path.join(output_dir, "tensors")
3434
self.etdump_path: str = os.path.join(output_dir, "etdump.etdp")
@@ -64,28 +64,26 @@ def get_outputs(self, log_to_stdout: bool = False) -> Tuple[torch.Tensor]:
6464
for event_block in self.et_inspector.event_blocks
6565
if event_block.name == "Execute"
6666
]
67-
logging.debug(f"[Jarvis][ETdump] output: {output}")
67+
logging.debug(f"[ETdump] output: {output}")
6868
return output[0]
6969

7070
def print_event_block(self) -> None:
71-
logging.debug("[Jarvis][ETdump] data tabular:")
71+
logging.debug("[ETdump] data tabular:")
7272
if logging.getLogger().level <= logging.DEBUG:
7373
self.et_inspector.print_data_tabular()
7474

7575
def print_event_data(self) -> None:
76-
logging.debug("[Jarvis][ETdump] event data ")
76+
logging.debug("[ETdump] event data ")
7777
for event_block in self.et_inspector.event_blocks:
7878
for event in event_block.events:
7979
logging.debug(event)
8080

8181
def dump_intermediate_tensors(self) -> None:
8282
if self.etrecord_path is None:
83-
logging.info("[Jarvis][ETdump] Intermediate tensors not available")
83+
logging.info("[ETdump] Intermediate tensors not available")
8484
return
8585

86-
logging.info(
87-
f"[Jarvis][ETdump] Dumping intermediate tensors to {self.tensor_dump_dir}"
88-
)
86+
logging.info(f"[ETdump] Dumping intermediate tensors to {self.tensor_dump_dir}")
8987
os.makedirs(self.tensor_dump_dir, exist_ok=True)
9088
exec_blocks = [
9189
eb for eb in self.et_inspector.event_blocks if eb.name == "Execute"
@@ -153,13 +151,13 @@ def run(
153151
if working_dir is None:
154152
working_dir = tempfile.mkdtemp(dir="/tmp")
155153

156-
# initialize Jarvis e2e Executor with executorch_cfg.
154+
# initialize e2e Executor with executorch_cfg.
157155
executor = Executor(working_dir)
158156

159157
# run Executor
160158
executor()
161159

162-
etdump = JarvisETDump(output_dir=working_dir)
160+
etdump = CadenceETDump(output_dir=working_dir)
163161
outputs = etdump.get_outputs()
164162

165163
assert isinstance(out_spec, TreeSpec)

0 commit comments

Comments
 (0)