|
1 | 1 | import os
|
2 | 2 | import torch
|
3 | 3 | import json
|
| 4 | +import shutil |
4 | 5 | from typing import Union, Callable
|
5 | 6 | from . import utils
|
6 | 7 |
|
@@ -80,76 +81,105 @@ def forward(self, s0 : torch.SymInt, L_x_ : torch.Tensor):
|
80 | 81 | def wrapper(model: torch.nn.Module):
|
81 | 82 | assert isinstance(model, torch.nn.Module), f"{type(model)=}"
|
82 | 83 |
|
83 |
| - def extractor(gm: torch.fx.GraphModule, sample_inputs): |
84 |
| - # 1. Get workspace path |
85 |
| - workspace_path = os.environ.get("GRAPH_NET_EXTRACT_WORKSPACE") |
86 |
| - if not workspace_path: |
87 |
| - raise EnvironmentError( |
88 |
| - "Environment variable 'GRAPH_NET_EXTRACT_WORKSPACE' is not set." |
| 84 | + class GraphExtractor: |
| 85 | + def __init__(self): |
| 86 | + self.subgraph_counter = 0 |
| 87 | + |
| 88 | + def move_files(self, source_dir, target_dir): |
| 89 | + os.makedirs(target_dir, exist_ok=True) |
| 90 | + for item in os.listdir(source_dir): |
| 91 | + source_path = os.path.join(source_dir, item) |
| 92 | + if os.path.isfile(source_path): |
| 93 | + target_path = os.path.join(target_dir, item) |
| 94 | + shutil.move(source_path, target_path) |
| 95 | + |
| 96 | + def __call__(self, gm: torch.fx.GraphModule, sample_inputs): |
| 97 | + # 1. Get workspace path |
| 98 | + workspace_path = os.environ.get("GRAPH_NET_EXTRACT_WORKSPACE") |
| 99 | + if not workspace_path: |
| 100 | + raise EnvironmentError( |
| 101 | + "Environment variable 'GRAPH_NET_EXTRACT_WORKSPACE' is not set." |
| 102 | + ) |
| 103 | + model_path = os.path.join(workspace_path, name) |
| 104 | + os.makedirs(model_path, exist_ok=True) |
| 105 | + |
| 106 | + if self.subgraph_counter == 0: |
| 107 | + subgraph_path = model_path |
| 108 | + else: |
| 109 | + if self.subgraph_counter == 1: |
| 110 | + subgraph_0_path = os.path.join(model_path, f"subgraph_0") |
| 111 | + self.move_files(model_path, subgraph_0_path) |
| 112 | + |
| 113 | + subgraph_path = os.path.join( |
| 114 | + model_path, f"subgraph_{self.subgraph_counter}" |
| 115 | + ) |
| 116 | + os.makedirs(subgraph_path, exist_ok=True) |
| 117 | + |
| 118 | + self.subgraph_counter += 1 |
| 119 | + |
| 120 | + # 2. Get full params |
| 121 | + params = {} |
| 122 | + input_idx = 0 |
| 123 | + unique_id = 0 |
| 124 | + |
| 125 | + def try_rename_placeholder(node): |
| 126 | + assert node.op == "placeholder" |
| 127 | + if not placeholder_auto_rename: |
| 128 | + return |
| 129 | + nonlocal unique_id |
| 130 | + node.target = f"v{unique_id}" |
| 131 | + unique_id += 1 |
| 132 | + node.name = f"v{unique_id}" |
| 133 | + unique_id += 1 |
| 134 | + |
| 135 | + for node in gm.graph.nodes: |
| 136 | + if node.op == "placeholder": |
| 137 | + try_rename_placeholder(node) |
| 138 | + input = sample_inputs[input_idx] |
| 139 | + if isinstance(input, torch.SymInt): |
| 140 | + input = torch.tensor(4) |
| 141 | + params[node.target] = input |
| 142 | + input_idx += 1 |
| 143 | + assert input_idx == len(sample_inputs) |
| 144 | + if mut_graph_codes is not None: |
| 145 | + assert isinstance(mut_graph_codes, list) |
| 146 | + mut_graph_codes.append(gm.code) |
| 147 | + # 3. Generate and save model code |
| 148 | + base_code = gm.code |
| 149 | + # gm.graph.print_tabular() |
| 150 | + write_code = utils.apply_templates(base_code) |
| 151 | + with open(os.path.join(subgraph_path, "model.py"), "w") as fp: |
| 152 | + fp.write(write_code) |
| 153 | + |
| 154 | + # 4. Save metadata |
| 155 | + metadata = { |
| 156 | + "framework": "torch", |
| 157 | + "num_devices_required": 1, |
| 158 | + "num_nodes_required": 1, |
| 159 | + "dynamic": bool(dynamic), |
| 160 | + "model_name": name, |
| 161 | + } |
| 162 | + with open(os.path.join(subgraph_path, "graph_net.json"), "w") as f: |
| 163 | + json.dump(metadata, f, indent=4) |
| 164 | + |
| 165 | + # 5. Save tensor metadata |
| 166 | + # Adapt to different input structures (e.g., single tensor vs. dict/tuple of tensors) |
| 167 | + converted = utils.convert_state_and_inputs(params, []) |
| 168 | + utils.save_converted_to_text(converted, file_path=subgraph_path) |
| 169 | + utils.save_constraints_text( |
| 170 | + converted, |
| 171 | + file_path=os.path.join( |
| 172 | + subgraph_path, "input_tensor_constraints.py" |
| 173 | + ), |
89 | 174 | )
|
90 |
| - model_path = os.path.join(workspace_path, name) |
91 |
| - os.makedirs(model_path, exist_ok=True) |
92 |
| - |
93 |
| - # 2. Get full params |
94 |
| - params = {} |
95 |
| - input_idx = 0 |
96 |
| - unique_id = 0 |
97 |
| - |
98 |
| - def try_rename_placeholder(node): |
99 |
| - assert node.op == "placeholder" |
100 |
| - if not placeholder_auto_rename: |
101 |
| - return |
102 |
| - nonlocal unique_id |
103 |
| - node.target = f"v{unique_id}" |
104 |
| - unique_id += 1 |
105 |
| - node.name = f"v{unique_id}" |
106 |
| - unique_id += 1 |
107 |
| - |
108 |
| - for node in gm.graph.nodes: |
109 |
| - if node.op == "placeholder": |
110 |
| - try_rename_placeholder(node) |
111 |
| - input = sample_inputs[input_idx] |
112 |
| - if isinstance(input, torch.SymInt): |
113 |
| - input = torch.tensor(4) |
114 |
| - params[node.target] = input |
115 |
| - input_idx += 1 |
116 |
| - assert input_idx == len(sample_inputs) |
117 |
| - if mut_graph_codes is not None: |
118 |
| - assert isinstance(mut_graph_codes, list) |
119 |
| - mut_graph_codes.append(gm.code) |
120 |
| - # 3. Generate and save model code |
121 |
| - base_code = gm.code |
122 |
| - # gm.graph.print_tabular() |
123 |
| - write_code = utils.apply_templates(base_code) |
124 |
| - with open(os.path.join(model_path, "model.py"), "w") as fp: |
125 |
| - fp.write(write_code) |
126 |
| - |
127 |
| - # 4. Save metadata |
128 |
| - metadata = { |
129 |
| - "framework": "torch", |
130 |
| - "num_devices_required": 1, |
131 |
| - "num_nodes_required": 1, |
132 |
| - "dynamic": bool(dynamic), |
133 |
| - "model_name": name, |
134 |
| - } |
135 |
| - with open(os.path.join(model_path, "graph_net.json"), "w") as f: |
136 |
| - json.dump(metadata, f, indent=4) |
137 |
| - |
138 |
| - # 5. Save tensor metadata |
139 |
| - # Adapt to different input structures (e.g., single tensor vs. dict/tuple of tensors) |
140 |
| - converted = utils.convert_state_and_inputs(params, []) |
141 |
| - utils.save_converted_to_text(converted, file_path=model_path) |
142 |
| - utils.save_constraints_text( |
143 |
| - converted, |
144 |
| - file_path=os.path.join(model_path, "input_tensor_constraints.py"), |
145 |
| - ) |
146 | 175 |
|
147 |
| - print( |
148 |
| - f"Graph and tensors for '{name}' extracted successfully to: {model_path}" |
149 |
| - ) |
| 176 | + print( |
| 177 | + f"Graph and tensors for '{name}' extracted successfully to: {model_path}" |
| 178 | + ) |
150 | 179 |
|
151 |
| - return gm.forward |
| 180 | + return gm.forward |
152 | 181 |
|
| 182 | + extractor = GraphExtractor() |
153 | 183 | # return torch.compile(backend=extractor, dynamic=dynamic)
|
154 | 184 | compiled_model = torch.compile(model, backend=extractor, dynamic=dynamic)
|
155 | 185 |
|
|
0 commit comments