88import contextlib
99import struct
1010
11- from typing import final , Dict , List
11+ from typing import Dict , final , List
1212
1313import mtk_converter
1414import mtk_neuron
2222from executorch .exir .backend .compile_spec_schema import CompileSpec
2323
2424SKIP_COMPILE_SPEC_KEYS = {"ImportForever" }
25- EXTRACT_SHARED_BLOB_KEY = ' ExtractSharedBlobKey'
25+ EXTRACT_SHARED_BLOB_KEY = " ExtractSharedBlobKey"
2626HEADER_SIZE = 13
2727HEADER_VERSION = 1
2828
@@ -45,14 +45,16 @@ def assert_default_dim_order(edge_graph_module: torch.fx.GraphModule) -> None:
4545
4646
4747def _pack_header (num_inputs , num_outputs , model_bytes_size ):
48- header_bytes = struct .pack ("<BIII" , HEADER_VERSION , num_inputs , num_outputs , model_bytes_size )
48+ header_bytes = struct .pack (
49+ "<BIII" , HEADER_VERSION , num_inputs , num_outputs , model_bytes_size
50+ )
4951 assert len (header_bytes ) == HEADER_SIZE
5052 return header_bytes
5153
5254
5355def _unpack_header (header_bytes ):
5456 assert len (header_bytes ) == HEADER_SIZE
55- version , num_inputs , num_outputs , buffer_size = struct .unpack (' <BIII' , header_bytes )
57+ version , num_inputs , num_outputs , buffer_size = struct .unpack (" <BIII" , header_bytes )
5658 assert version == HEADER_VERSION
5759 return num_inputs , num_outputs , buffer_size
5860
@@ -89,7 +91,7 @@ def preprocess(
8991 if spec .key in SKIP_COMPILE_SPEC_KEYS :
9092 continue
9193 if spec .key == EXTRACT_SHARED_BLOB_KEY :
92- compile_options .append (' --dla-opt=0' )
94+ compile_options .append (" --dla-opt=0" )
9395 continue
9496
9597 # General compile spec handling
@@ -151,7 +153,7 @@ def preprocess_multimethod(
151153 shared_blob_key = None
152154 for spec in compile_specs [method_name ][idx ]:
153155 if spec .key == EXTRACT_SHARED_BLOB_KEY :
154- shared_blob_key = spec .value .decode (' utf-8' )
156+ shared_blob_key = spec .value .decode (" utf-8" )
155157
156158 if shared_blob_key is None :
157159 continue
@@ -164,20 +166,26 @@ def preprocess_multimethod(
164166 models_dict [shared_blob_key ].append (model_bytes )
165167 result_dict [shared_blob_key ].append (result )
166168
167- data_store_output_dict = dict ()
169+ data_store_output_dict = {}
168170 for key , models in models_dict .items ():
169171 ndm = NamedDataStore ()
170- blob , new_models = mtk_neuron .extract_shared_data (models , options = '-e union' )
172+ blob , new_models = mtk_neuron .extract_shared_data (
173+ models , options = "-e union"
174+ )
171175 ndm .add_named_data (key , bytes (blob ))
172176 data_store_output_dict [key ] = ndm .get_named_data_store_output ()
173177 models .clear ()
174178 models .extend (new_models )
175179
176180 for key , data_store_output in data_store_output_dict .items ():
177- for idx , (model_info , model_bytes ) in enumerate (zip (infos_dict [key ], models_dict [key ])):
181+ for idx , (model_info , model_bytes ) in enumerate (
182+ zip (infos_dict [key ], models_dict [key ])
183+ ):
178184 num_inputs , num_outputs = model_info
179185 header_bytes = _pack_header (num_inputs , num_outputs , len (model_bytes ))
180186 result_dict [key ][idx ].data_store_output = data_store_output
181- result_dict [key ][idx ].processed_bytes = bytes (header_bytes + model_bytes )
187+ result_dict [key ][idx ].processed_bytes = bytes (
188+ header_bytes + model_bytes
189+ )
182190
183191 return preprocess_results
0 commit comments