@@ -21,7 +21,7 @@ class XPUOptions:
2121 num_ctas : int = 1
2222 num_stages : int = 2
2323 cluster_dims : tuple = (1 , 1 , 1 )
24- threads_per_warp : int = 32
24+ warp_size : int = 32
2525 optimize_epilogue : bool = False
2626 enable_fp_fusion : bool = True
2727 launch_cooperative_grid : bool = False
@@ -177,10 +177,10 @@ def load_dialects(self, ctx):
177177
178178 @staticmethod
179179 def validate_options (opt , properties ):
180- # Check threads_per_warp and num_threads are within limits.
181- if opt .threads_per_warp not in properties ['sub_group_sizes' ]:
180+ # Check warp_size and num_threads are within limits.
181+ if opt .warp_size not in properties ['sub_group_sizes' ]:
182182 raise ValueError (
183- f"threads_per_warp ={ opt .threads_per_warp } is unsupported for the target (supported values are { properties ['sub_group_sizes' ]} )"
183+ f"warp_size ={ opt .warp_size } is unsupported for the target (supported values are { properties ['sub_group_sizes' ]} )"
184184 )
185185 if opt .num_warps > properties ['max_num_sub_groups' ]:
186186 raise ValueError (
@@ -197,7 +197,7 @@ def annotate_module(mod, properties, opt, target_arch):
197197 module_opts .support_sg_2d_block = properties ["has_subgroup_2d_block_io" ]
198198 module_opts .support_dpas = properties ["has_subgroup_matrix_multiply_accumulate" ]
199199 module_opts .support_bf16_conversion = properties ["has_bfloat16_conversions" ]
200- module_opts .threads_per_warp = opt .threads_per_warp
200+ module_opts .threads_per_warp = opt .warp_size
201201 module_opts .target_arch = target_arch
202202 intel .passes .ttgpuir .add_triton_annotate_module (pm , module_opts )
203203 pm .run (mod )
@@ -241,8 +241,8 @@ def make_ttgir(mod, metadata, opt, properties):
241241 # Annotate module with information required by subsequent transformations.
242242 XPUBackend .annotate_module (mod , properties , opt , "spir64" )
243243
244- # Overwrite the threads_per_warp option with the module annotation.
245- opt .threads_per_warp = intel .get_threads_per_warp (mod )
244+ # Overwrite the warp_size option with the module annotation.
245+ opt .warp_size = intel .get_threads_per_warp (mod )
246246 XPUBackend .validate_options (opt , properties )
247247
248248 if (properties ["has_subgroup_2d_block_io" ] and properties ["has_subgroup_matrix_multiply_accumulate" ]
@@ -251,7 +251,7 @@ def make_ttgir(mod, metadata, opt, properties):
251251
252252 pm = ir .pass_manager (mod .context )
253253 pm .enable_debug ()
254- passes .ttir .add_convert_to_ttgpuir (pm , "xpu" , opt .num_warps , opt .threads_per_warp , opt .num_ctas )
254+ passes .ttir .add_convert_to_ttgpuir (pm , "xpu" , opt .num_warps , opt .warp_size , opt .num_ctas )
255255 # optimize TTGIR
256256 intel .passes .ttgpuir .add_coalesce (pm )
257257 intel .passes .ttgpuir .add_remove_layout_conversions (pm )
@@ -296,11 +296,11 @@ def gluon_to_ttgir(self, src, metadata, options):
296296 pm = ir .pass_manager (mod .context )
297297 pm .enable_debug ()
298298
299- passes .ttgpuir .add_inliner (pm )
299+ passes .gluon .add_inliner (pm )
300300 passes .gluon .add_resolve_auto_encodings (pm )
301301 passes .common .add_sccp (pm )
302302 passes .ttir .add_loop_aware_cse (pm )
303- passes .ttgpuir .add_canonicalizer (pm )
303+ passes .gluon .add_canonicalizer (pm )
304304 passes .ttgpuir .add_combine_tensor_select_and_if (pm )
305305
306306 pm .run (mod )
0 commit comments