|
20 | 20 |
|
21 | 21 | __all__ = [ |
22 | 22 | "job_id", |
| 23 | + "dynamic_shapes", |
| 24 | + "assume_static_by_default", |
| 25 | + "automatic_dynamic_shapes", |
| 26 | + "recompile_limit", |
| 27 | + "accumulated_recompile_limit", |
| 28 | + "verbose", |
| 29 | + "capture_scalar_outputs", |
| 30 | + "capture_dynamic_output_shape_ops", |
| 31 | + "log_file_name", |
| 32 | + "fail_on_recompile_limit_hit", |
| 33 | + "allow_unspec_int_on_nn_module", |
| 34 | + "skip_tensor_guards_with_matching_dict_tags", |
| 35 | + "enable_cpp_symbolic_shape_guards", |
| 36 | + "wrap_top_frame", |
| 37 | + "reorderable_logging_functions", |
23 | 38 | ] |
24 | 39 |
|
25 | 40 |
|
|
121 | 136 | """ |
122 | 137 |
|
123 | 138 |
|
| 139 | +# Cross-cutting configuration options that affect the entire compilation pipeline |
| 140 | + |
| 141 | +dynamic_shapes: bool = Config(alias="torch._dynamo.config.dynamic_shapes") |
| 142 | +""" |
| 143 | +Controls whether the compilation pipeline supports dynamic tensor shapes. |
| 144 | +When enabled, the compiler can handle tensors with varying dimensions across |
| 145 | +different invocations. This is a cross-cutting setting that affects shape |
| 146 | +inference, guard generation, and code generation across the entire compilation |
| 147 | +stack. |
| 148 | +""" |
| 149 | + |
| 150 | +assume_static_by_default: bool = Config( |
| 151 | + alias="torch._dynamo.config.assume_static_by_default" |
| 152 | +) |
| 153 | +""" |
| 154 | +When enabled, all tensor dimensions are assumed to be static unless explicitly |
| 155 | +marked as dynamic or detected as changing. This compilation-wide behavior affects |
| 156 | +how the entire stack handles shape specialization and can improve performance |
| 157 | +for static workloads. |
| 158 | +""" |
| 159 | + |
| 160 | +automatic_dynamic_shapes: bool = Config( |
| 161 | + alias="torch._dynamo.config.automatic_dynamic_shapes" |
| 162 | +) |
| 163 | +""" |
| 164 | +Enables automatic detection and handling of dynamic shapes. When a tensor's |
| 165 | +shape changes between compilations, the system automatically marks those |
| 166 | +dimensions as dynamic rather than requiring manual specification. This |
| 167 | +cross-cutting optimization improves the user experience by reducing recompilations. |
| 168 | +""" |
| 169 | + |
| 170 | +recompile_limit: int = Config(alias="torch._dynamo.config.recompile_limit") |
| 171 | +""" |
| 172 | +Maximum number of recompilations allowed for a single function before falling |
| 173 | +back to eager execution. This compilation performance control prevents excessive |
| 174 | +recompilation overhead that can degrade overall performance. |
| 175 | +""" |
| 176 | + |
| 177 | +accumulated_recompile_limit: int = Config( |
| 178 | + alias="torch._dynamo.config.accumulated_recompile_limit" |
| 179 | +) |
| 180 | +""" |
| 181 | +Global limit on total recompilations across all compiled functions to prevent |
| 182 | +runaway recompilation scenarios. This safeguard protects against compilation |
| 183 | +performance issues that could affect the entire program. |
| 184 | +""" |
| 185 | + |
| 186 | +verbose: bool = Config(alias="torch._dynamo.config.verbose") |
| 187 | +""" |
| 188 | +Enables verbose debugging output for Dynamo. When enabled, provides detailed |
| 189 | +information about Dynamo's compilation decisions, optimizations, and potential |
| 190 | +issues. |
| 191 | +""" |
| 192 | + |
| 193 | + |
| 194 | +# TorchDynamo-specific configuration options |
| 195 | + |
| 196 | +capture_scalar_outputs: bool = Config( |
| 197 | + alias="torch._dynamo.config.capture_scalar_outputs" |
| 198 | +) |
| 199 | +""" |
| 200 | +Controls whether TorchDynamo captures operations that return scalar values (like .item()) |
| 201 | +into the FX graph. When disabled, these operations cause graph breaks. This is a |
| 202 | +TorchDynamo-specific tracing behavior that affects how the tracer handles |
| 203 | +scalar-returning operations. |
| 204 | +""" |
| 205 | + |
| 206 | +capture_dynamic_output_shape_ops: bool = Config( |
| 207 | + alias="torch._dynamo.config.capture_dynamic_output_shape_ops" |
| 208 | +) |
| 209 | +""" |
| 210 | +Controls whether TorchDynamo captures operations with dynamic output shapes (like |
| 211 | +nonzero, unique) into the FX graph. When disabled, these operations cause graph breaks. |
| 212 | +This is a TorchDynamo-specific setting for handling operations with unpredictable |
| 213 | +output shapes during tracing. |
| 214 | +""" |
| 215 | + |
| 216 | +log_file_name: Optional[str] = Config(alias="torch._dynamo.config.log_file_name") |
| 217 | +""" |
| 218 | +Specifies a file path for TorchDynamo-specific logging output. When set, internal |
| 219 | +TorchDynamo debug information is written to this file rather than stdout. This is |
| 220 | +useful for debugging TorchDynamo's internal tracing behavior. |
| 221 | +""" |
| 222 | + |
| 223 | +fail_on_recompile_limit_hit: bool = Config( |
| 224 | + alias="torch._dynamo.config.fail_on_recompile_limit_hit" |
| 225 | +) |
| 226 | +""" |
| 227 | +Raises a hard error when recompile limits are exceeded instead of falling back |
| 228 | +to eager execution. This is useful for detecting excessive recompilation in |
| 229 | +performance-critical deployments where you want to ensure compilation overhead |
| 230 | +is kept under control. |
| 231 | +""" |
| 232 | + |
| 233 | +allow_unspec_int_on_nn_module: bool = Config( |
| 234 | + alias="torch._dynamo.config.allow_unspec_int_on_nn_module" |
| 235 | +) |
| 236 | +""" |
| 237 | +Allows integer attributes of nn.Module instances to be unspecialized through |
| 238 | +the dynamic shape mechanism. By default, TorchDynamo specializes on all integer |
| 239 | +module attributes, but this can cause excessive recompilation when integers |
| 240 | +like step counters change frequently. |
| 241 | +""" |
| 242 | + |
| 243 | +skip_tensor_guards_with_matching_dict_tags: bool = Config( |
| 244 | + alias="torch._dynamo.config.skip_tensor_guards_with_matching_dict_tags" |
| 245 | +) |
| 246 | +""" |
| 247 | +Optimizes guard generation by treating tensors as immutable when they are |
| 248 | +dictionary values with consistent dictionary tags across invocations. This |
| 249 | +reduces guard overhead for tensors stored in persistent data structures. |
| 250 | +""" |
| 251 | + |
| 252 | +enable_cpp_symbolic_shape_guards: bool = Config( |
| 253 | + alias="torch._dynamo.config.enable_cpp_symbolic_shape_guards" |
| 254 | +) |
| 255 | +""" |
| 256 | +Uses C++ implementation for symbolic shape guard evaluation to improve performance. |
| 257 | +The C++ guard manager can significantly speed up guard checking for symbolic shapes |
| 258 | +in shape-polymorphic compilations. |
| 259 | +""" |
| 260 | + |
| 261 | +wrap_top_frame: bool = Config(alias="torch._dynamo.config.wrap_top_frame") |
| 262 | +""" |
| 263 | +Wraps the top-level decorated function/module in a frame wrapper to ensure |
| 264 | +nn.Module hooks are compiled within the same frame as the main function. This |
| 265 | +improves compilation coverage for models that rely on hooks. |
| 266 | +""" |
| 267 | + |
| 268 | +reorderable_logging_functions: set = Config( |
| 269 | + alias="torch._dynamo.config.reorderable_logging_functions" |
| 270 | +) |
| 271 | +""" |
| 272 | +A set of logging functions that can be reordered to execute after the compiled |
| 273 | +portion of the graph, allowing larger graphs to be captured. Functions in this |
| 274 | +set will have their execution deferred to avoid graph breaks, though this may |
| 275 | +affect the timing of log output. In particular, mutated values will not be logged |
| 276 | +at the right time, leading to incorrect logging. |
| 277 | +""" |
| 278 | + |
| 279 | + |
124 | 280 | install_config_module(sys.modules[__name__]) |
0 commit comments