diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index c97374a15b..d0adfd020a 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -4,20 +4,6 @@ # XPU implementation doesn't claimn FP8 now # https://github.com/intel/torch-xpu-ops/issues/461 "float8", - # workarounds for the following tests - # https://github.com/intel/torch-xpu-ops/issues/1214 - "test_python_ref__refs_exp_xpu_complex128", - "test_python_ref__refs_sigmoid_xpu_complex128", - "test_python_ref_executor__refs_log2_executor_aten_xpu_complex128", - "test_python_ref_executor__refs_exp_executor_aten_xpu_complex128", - "test_python_ref_torch_fallback__refs_log2_xpu_complex128", - "test_python_ref_torch_fallback__refs_log10_xpu_complex128", - "test_python_ref_torch_fallback__refs_sigmoid_xpu_complex128", - "test_python_ref_executor__refs_log10_executor_aten_xpu_complex128", - "test_noncontiguous_samples_histogram_xpu_float32", - "test_python_ref_executor__refs_sigmoid_executor_aten_xpu_complex128", - # TODO: Fix the following tests - "test_out_warning_torch__scaled_mm_xpu", # To be removed from this file. # CUDA and XPU both XFAIL now. "test_out_narrow_copy_xpu_float32", @@ -38,7 +24,6 @@ "test_errors_dot_xpu", "test_errors_vdot_xpu", # Linalg OPs not supported - "test_noncontiguous_samples_linalg_tensorsolve_xpu_float32", "test_noncontiguous_samples_logdet_xpu_float32", # Sparse CSR OPs not supported # RuntimeError: device type of values (xpu) must be CPU or CUDA or Meta @@ -96,9 +81,6 @@ "test_python_ref_executor__refs_mul_executor_aten_xpu_complex32", "test_python_ref_torch_fallback__refs_div_no_rounding_mode_xpu_complex32", "test_python_ref_torch_fallback__refs_pow_xpu_complex32", - # unexpected success because of cpu fallback - # Linalg OPs not supported - "test_out_triangular_solve_xpu_float32", # Newly added: # Cuda skipped it "test_non_standard_bool_values_sort_xpu_bool", # The implementation aligns with CUDA, RuntimeError: "sort" not implemented for 'Bool'. @@ -111,9 +93,6 @@ # Jiterator is only supported on CUDA and ROCm GPUs, none are available. # https://github.com/intel/torch-xpu-ops/issues/584 "_jiterator_", - # https://github.com/intel/torch-xpu-ops/issues/157 - # Segfault: - "test_dtypes_nn_functional_multi_head_attention_forward_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 # Linalg OPs not supported "test_dtypes_pca_lowrank_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 "test_dtypes_svd_lowrank_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 @@ -166,25 +145,20 @@ "test_dtypes_lu_solve_xpu", "test_dtypes_lu_xpu", "test_dtypes_mv_xpu", - "test_dtypes_nn_functional_scaled_dot_product_attention_xpu", "test_dtypes_norm_nuc_xpu", "test_dtypes_pinverse_xpu", "test_dtypes_qr_xpu", "test_dtypes_svd_xpu", - "test_dtypes_tensordot_xpu", "test_dtypes_triangular_solve_xpu", "test_noncontiguous_samples___rmatmul___xpu_complex64", "test_noncontiguous_samples___rmatmul___xpu_int64", "test_noncontiguous_samples_addbmm_xpu_complex64", - "test_noncontiguous_samples_addbmm_xpu_float32", "test_noncontiguous_samples_addbmm_xpu_int64", "test_noncontiguous_samples_addmm_decomposed_xpu_complex64", "test_noncontiguous_samples_addmm_decomposed_xpu_int64", "test_noncontiguous_samples_addmm_xpu_complex64", - "test_noncontiguous_samples_addmm_xpu_float32", "test_noncontiguous_samples_addmm_xpu_int64", "test_noncontiguous_samples_addmv_xpu_complex64", - "test_noncontiguous_samples_addmv_xpu_float32", "test_noncontiguous_samples_addmv_xpu_int64", "test_noncontiguous_samples_addr_xpu_complex64", "test_noncontiguous_samples_baddbmm_xpu_complex64", @@ -199,8 +173,6 @@ "test_noncontiguous_samples_einsum_xpu_complex64", "test_noncontiguous_samples_einsum_xpu_int64", "test_noncontiguous_samples_geqrf_xpu_complex64", - "test_noncontiguous_samples_inner_xpu_complex64", - "test_noncontiguous_samples_inner_xpu_int64", "test_noncontiguous_samples_linalg_cholesky_ex_xpu_complex64", "test_noncontiguous_samples_linalg_cholesky_xpu_complex64", "test_noncontiguous_samples_linalg_cond_xpu_complex64", @@ -263,11 +235,7 @@ "test_numpy_ref_addbmm_xpu_float64", "test_numpy_ref_addbmm_xpu_int64", "test_numpy_ref_linalg_tensorinv_xpu_complex128", - "test_out_addbmm_xpu_float32", - "test_out_addmm_xpu_float32", "test_out_addmv_xpu_float32", - "test_out_baddbmm_xpu_float32", - "test_out_mm_xpu_float32", "test_out_mv_xpu_float32", "test_out_requires_grad_error_addbmm_xpu_complex64", "test_out_requires_grad_error_addmm_decomposed_xpu_complex64", @@ -278,7 +246,6 @@ "test_out_requires_grad_error_cholesky_inverse_xpu_complex64", "test_out_requires_grad_error_cholesky_solve_xpu_complex64", "test_out_requires_grad_error_cholesky_xpu_complex64", - "test_out_requires_grad_error_inner_xpu_complex64", "test_out_requires_grad_error_linalg_cholesky_ex_xpu_complex64", "test_out_requires_grad_error_linalg_cholesky_xpu_complex64", "test_out_requires_grad_error_linalg_eig_xpu_complex64", @@ -305,38 +272,23 @@ "test_out_requires_grad_error_qr_xpu_complex64", "test_out_requires_grad_error_tensordot_xpu_complex64", "test_out_requires_grad_error_triangular_solve_xpu_complex64", - "test_out_warning_addmm_decomposed_xpu", - "test_out_warning_addmm_xpu", - "test_out_warning_addmv_xpu", - "test_out_warning_baddbmm_xpu", - "test_out_warning_bmm_xpu", - "test_out_warning_matmul_xpu", - "test_out_warning_mm_xpu", - "test_out_warning_mv_xpu", - "test_out_warning_nn_functional_linear_xpu", "test_python_ref__refs_linalg_svd_xpu_complex128", "test_python_ref__refs_linalg_svd_xpu_complex64", "test_python_ref__refs_linalg_svd_xpu_float64", "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex128", "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex64", "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_float64", - "test_python_ref_executor__refs_nn_functional_pdist_executor_aten_xpu_float64", "test_python_ref_meta__refs_linalg_svd_xpu_complex128", "test_python_ref_meta__refs_linalg_svd_xpu_complex64", "test_python_ref_meta__refs_linalg_svd_xpu_float64", - "test_python_ref_meta__refs_nn_functional_pdist_xpu_float64", "test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex128", "test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex64", "test_python_ref_torch_fallback__refs_linalg_svd_xpu_float64", - "test_python_ref_torch_fallback__refs_nn_functional_pdist_xpu_float64", "test_variant_consistency_eager___rmatmul___xpu_complex64", "test_variant_consistency_eager_addmm_decomposed_xpu_complex64", "test_variant_consistency_eager_addmm_xpu_complex64", - "test_variant_consistency_eager_addmm_xpu_float32", "test_variant_consistency_eager_addmv_xpu_complex64", - "test_variant_consistency_eager_addmv_xpu_float32", "test_variant_consistency_eager_baddbmm_xpu_complex64", - "test_variant_consistency_eager_baddbmm_xpu_float32", "test_variant_consistency_eager_bmm_xpu_complex64", "test_variant_consistency_eager_cholesky_inverse_xpu_complex64", "test_variant_consistency_eager_cholesky_solve_xpu_complex64", @@ -345,7 +297,6 @@ "test_variant_consistency_eager_cov_xpu_complex64", "test_variant_consistency_eager_einsum_xpu_complex64", "test_variant_consistency_eager_geqrf_xpu_complex64", - "test_variant_consistency_eager_inner_xpu_complex64", "test_variant_consistency_eager_linalg_cholesky_ex_xpu_complex64", "test_variant_consistency_eager_linalg_cholesky_xpu_complex64", "test_variant_consistency_eager_linalg_cond_xpu_complex64", @@ -419,7 +370,6 @@ "test_conj_view_cov_xpu_complex64", "test_conj_view_einsum_xpu_complex64", "test_conj_view_geqrf_xpu_complex64", - "test_conj_view_inner_xpu_complex64", "test_conj_view_linalg_cholesky_ex_xpu_complex64", "test_conj_view_linalg_cholesky_xpu_complex64", "test_conj_view_linalg_cond_xpu_complex64", @@ -483,7 +433,6 @@ "test_neg_conj_view_corrcoef_xpu_complex128", "test_neg_conj_view_cov_xpu_complex128", "test_neg_conj_view_geqrf_xpu_complex128", - "test_neg_conj_view_inner_xpu_complex128", "test_neg_conj_view_linalg_cholesky_ex_xpu_complex128", "test_neg_conj_view_linalg_cholesky_xpu_complex128", "test_neg_conj_view_linalg_cond_xpu_complex128", @@ -525,25 +474,15 @@ "test_neg_conj_view_qr_xpu_complex128", "test_neg_conj_view_tensordot_xpu_complex128", "test_neg_conj_view_triangular_solve_xpu_complex128", - "test_neg_view___rmatmul___xpu_float64", "test_neg_view__refs_linalg_svd_xpu_float64", - "test_neg_view__refs_nn_functional_pdist_xpu_float64", - "test_neg_view_addbmm_xpu_float64", "test_neg_view_addmm_decomposed_xpu_float64", "test_neg_view_addmm_xpu_float64", "test_neg_view_addmv_xpu_float64", - "test_neg_view_addr_xpu_float64", "test_neg_view_baddbmm_xpu_float64", - "test_neg_view_bmm_xpu_float64", - "test_neg_view_cdist_xpu_float64", "test_neg_view_cholesky_inverse_xpu_float64", "test_neg_view_cholesky_solve_xpu_float64", "test_neg_view_cholesky_xpu_float64", - "test_neg_view_corrcoef_xpu_float64", - "test_neg_view_cov_xpu_float64", - "test_neg_view_einsum_xpu_float64", "test_neg_view_geqrf_xpu_float64", - "test_neg_view_inner_xpu_float64", "test_neg_view_linalg_cholesky_ex_xpu_float64", "test_neg_view_linalg_cholesky_xpu_float64", "test_neg_view_linalg_cond_xpu_float64", @@ -562,7 +501,6 @@ "test_neg_view_linalg_matrix_power_xpu_float64", "test_neg_view_linalg_matrix_rank_hermitian_xpu_float64", "test_neg_view_linalg_matrix_rank_xpu_float64", - "test_neg_view_linalg_multi_dot_xpu_float64", "test_neg_view_linalg_norm_subgradients_at_zero_xpu_float64", "test_neg_view_linalg_norm_xpu_float64", "test_neg_view_linalg_pinv_hermitian_xpu_float64", @@ -573,16 +511,8 @@ "test_neg_view_linalg_svd_xpu_float64", "test_neg_view_linalg_svdvals_xpu_float64", "test_neg_view_linalg_tensorinv_xpu_float64", - "test_neg_view_linalg_tensorsolve_xpu_float64", "test_neg_view_logdet_xpu_float64", "test_neg_view_lu_xpu_float64", - "test_neg_view_matmul_xpu_float64", - "test_neg_view_mm_xpu_float64", - "test_neg_view_mv_xpu_float64", - "test_neg_view_nn_functional_bilinear_xpu_float64", - "test_neg_view_nn_functional_linear_xpu_float64", - "test_neg_view_nn_functional_multi_head_attention_forward_xpu_float64", - "test_neg_view_nn_functional_scaled_dot_product_attention_xpu_float64", "test_neg_view_norm_nuc_xpu_float64", "test_neg_view_ormqr_xpu_float64", "test_neg_view_pca_lowrank_xpu_float64", @@ -590,7 +520,6 @@ "test_neg_view_qr_xpu_float64", "test_neg_view_svd_lowrank_xpu_float64", "test_neg_view_svd_xpu_float64", - "test_neg_view_tensordot_xpu_float64", "test_neg_view_triangular_solve_xpu_float64", "test_noncontiguous_samples_pca_lowrank_xpu_complex64", "test_noncontiguous_samples_svd_lowrank_xpu_complex64", @@ -612,48 +541,16 @@ "test_dtypes_histogram_xpu", # Unexpected success, CUDA got XFAIL because CUDA does not have historgramadd supported "test_errors_histogramdd_xpu", - # 2025 bundle std::pow complex result is different on host and device - "test_python_ref__refs_square_xpu_complex64", - "test_python_ref_torch_fallback__refs_square_xpu_complex64", - "test_python_ref_torch_fallback__refs_exp_xpu_complex128", # Failed on rolling driver, passed on preci "test_python_ref__refs_div_trunc_rounding_xpu_float64", "test_python_ref_executor__refs_div_trunc_rounding_executor_aten_xpu_float64", "test_python_ref_torch_fallback__refs_div_trunc_rounding_xpu_float64", - # TODO: passed from source code building version, investigate - "test_python_ref__refs_log2_xpu_complex128", - # The following dtypes did not work in backward but are listed by the OpInfo: {torch.bfloat16}. - "test_dtypes_fft_fft2_xpu", - "test_dtypes_fft_fft_xpu", - "test_dtypes_fft_fftn_xpu", - "test_dtypes_fft_hfft2_xpu", - "test_dtypes_fft_hfft_xpu", - "test_dtypes_fft_hfftn_xpu", - "test_dtypes_fft_ifft2_xpu", - "test_dtypes_fft_ifft_xpu", - "test_dtypes_fft_ifftn_xpu", - "test_dtypes_fft_ihfft2_xpu", - "test_dtypes_fft_ihfft_xpu", - "test_dtypes_fft_ihfftn_xpu", - "test_dtypes_fft_irfft2_xpu", - "test_dtypes_fft_irfft_xpu", - "test_dtypes_fft_irfftn_xpu", - "test_dtypes_fft_rfft2_xpu", - "test_dtypes_fft_rfft_xpu", - "test_dtypes_fft_rfftn_xpu", ), "test_binary_ufuncs_xpu.py": ( "test_fmod_remainder_by_zero_integral_xpu_int64", # zero division is an undefined behavior: different handles on different backends "test_div_rounding_numpy_xpu_float16", # Calculation error. XPU implementation uses opmath type. # AssertionError: Jiterator is only supported on CUDA and ROCm GPUs, none are available. "_jiterator_", - # nextafter: Numeric error due to `std::nextafter` difference between CPU (GCC) and XPU (SYCL) - # https://github.com/intel/torch-xpu-ops/issues/623 - # AssertionError: Scalars are not equal! - # Expected 9.183549615799121e-41 but got 0.0. - # Absolute difference: 9.183549615799121e-41 - # Relative difference: 1.0 - "test_nextafter_bfloat16_xpu_bfloat16", ), "test_scatter_gather_ops_xpu.py": ( # AssertionError: Tensor-likes are not equal! @@ -723,209 +620,34 @@ # oneDNN issues # Double and complex datatype matmul is not supported in oneDNN # https://github.com/intel/torch-xpu-ops/issues/253 - "test_sdp_math_gradcheck_contiguous_inputs_False_xpu", - "test_sdp_math_gradcheck_contiguous_inputs_True_xpu", "test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_False_xpu", - "test_transformerencoder_batch_first_True_training_False_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_True_training_False_enable_nested_tensor_False_xpu", - "test_transformerencoder_batch_first_False_training_True_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_False_training_True_enable_nested_tensor_False_xpu", - "test_transformerencoder_batch_first_False_training_False_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_False_training_False_enable_nested_tensor_False_xpu", - "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_2_xpu", "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_2_xpu", "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_0_xpu", - # https://github.com/intel/torch-xpu-ops/issues/1432 - "test_multiheadattention_fastpath_attn_mask_attn_mask_dim_2_key_padding_mask_dim_2_bool_xpu", - "test_multiheadattention_fastpath_attn_mask_attn_mask_dim_3_key_padding_mask_dim_2_bool_xpu", - "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_False_d_model_12_xpu", - "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_True_d_model_12_xpu", - "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_True_use_autocast_False_d_model_12_xpu", - "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_True_use_autocast_True_d_model_12_xpu", ), "test_complex_xpu.py": None, "test_modules_xpu.py": ( # oneDNN issues # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_cpu_gpu_parity_nn_Bilinear_xpu_float64", - "test_cpu_gpu_parity_nn_GRUCell_xpu_float64", - "test_cpu_gpu_parity_nn_GRU_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_GRU_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_LSTMCell_xpu_float64", - "test_cpu_gpu_parity_nn_LSTM_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_LSTM_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_Linear_xpu_float64", - "test_cpu_gpu_parity_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_MultiheadAttention_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_RNNCell_xpu_float64", - "test_cpu_gpu_parity_nn_RNN_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_RNN_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_TransformerDecoderLayer_xpu_float64", - "test_cpu_gpu_parity_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_TransformerEncoder_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_Transformer_xpu_float64", - "test_forward_nn_Bilinear_xpu_float64", - "test_forward_nn_GRUCell_xpu_float64", - "test_forward_nn_GRU_eval_mode_xpu_float64", - "test_forward_nn_GRU_train_mode_xpu_float64", - "test_forward_nn_LSTMCell_xpu_float64", - "test_forward_nn_LSTM_eval_mode_xpu_float64", - "test_forward_nn_LSTM_train_mode_xpu_float64", - "test_forward_nn_Linear_xpu_float64", - "test_forward_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_forward_nn_MultiheadAttention_train_mode_xpu_float64", - "test_forward_nn_RNNCell_xpu_float64", - "test_forward_nn_RNN_eval_mode_xpu_float64", - "test_forward_nn_RNN_train_mode_xpu_float64", - "test_forward_nn_TransformerDecoderLayer_xpu_float64", - "test_forward_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_forward_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_forward_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_forward_nn_TransformerEncoder_train_mode_xpu_float64", - "test_forward_nn_Transformer_xpu_float64", - "test_grad_nn_Bilinear_xpu_float64", - "test_grad_nn_GRUCell_xpu_float64", - "test_grad_nn_GRU_eval_mode_xpu_float64", - "test_grad_nn_GRU_train_mode_xpu_float64", - "test_grad_nn_LSTMCell_xpu_float64", - "test_grad_nn_LSTM_eval_mode_xpu_float64", - "test_grad_nn_LSTM_train_mode_xpu_float64", - "test_grad_nn_Linear_xpu_float64", "test_grad_nn_MultiheadAttention_eval_mode_xpu_float64", "test_grad_nn_MultiheadAttention_train_mode_xpu_float64", - "test_grad_nn_RNNCell_xpu_float64", - "test_grad_nn_RNN_eval_mode_xpu_float64", - "test_grad_nn_RNN_train_mode_xpu_float64", - "test_grad_nn_TransformerDecoderLayer_xpu_float64", - "test_grad_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_grad_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_grad_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_grad_nn_TransformerEncoder_train_mode_xpu_float64", - "test_grad_nn_Transformer_xpu_float64", - "test_gradgrad_nn_Bilinear_xpu_float64", - "test_gradgrad_nn_GRUCell_xpu_float64", - "test_gradgrad_nn_GRU_eval_mode_xpu_float64", - "test_gradgrad_nn_GRU_train_mode_xpu_float64", - "test_gradgrad_nn_LSTMCell_xpu_float64", - "test_gradgrad_nn_LSTM_eval_mode_xpu_float64", - "test_gradgrad_nn_LSTM_train_mode_xpu_float64", - "test_gradgrad_nn_Linear_xpu_float64", "test_gradgrad_nn_MultiheadAttention_eval_mode_xpu_float64", "test_gradgrad_nn_MultiheadAttention_train_mode_xpu_float64", - "test_gradgrad_nn_RNNCell_xpu_float64", - "test_gradgrad_nn_RNN_eval_mode_xpu_float64", - "test_gradgrad_nn_RNN_train_mode_xpu_float64", - "test_gradgrad_nn_TransformerDecoderLayer_xpu_float64", - "test_gradgrad_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_gradgrad_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_gradgrad_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_gradgrad_nn_TransformerEncoder_train_mode_xpu_float64", "test_gradgrad_nn_Transformer_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_Bilinear_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_GRUCell_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_LSTMCell_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_Linear_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_RNNCell_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_TransformerDecoderLayer_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_TransformerEncoderLayer_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_TransformerEncoder_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_Transformer_xpu_float64", - "test_memory_format_nn_GRUCell_xpu_float64", - "test_memory_format_nn_GRU_eval_mode_xpu_float64", - "test_memory_format_nn_GRU_train_mode_xpu_float64", - "test_memory_format_nn_LSTMCell_xpu_float64", - "test_memory_format_nn_LSTM_eval_mode_xpu_float64", - "test_memory_format_nn_LSTM_train_mode_xpu_float64", - "test_memory_format_nn_RNNCell_xpu_float64", - "test_memory_format_nn_RNN_eval_mode_xpu_float64", - "test_memory_format_nn_RNN_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_Bilinear_xpu_float64", - "test_multiple_device_transfer_nn_GRUCell_xpu_float64", - "test_multiple_device_transfer_nn_GRU_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_GRU_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_LSTMCell_xpu_float64", - "test_multiple_device_transfer_nn_LSTM_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_LSTM_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_Linear_xpu_float64", - "test_multiple_device_transfer_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_MultiheadAttention_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_RNNCell_xpu_float64", - "test_multiple_device_transfer_nn_RNN_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_RNN_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_TransformerDecoderLayer_xpu_float64", - "test_multiple_device_transfer_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_TransformerEncoder_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_Transformer_xpu_float64", - "test_non_contiguous_tensors_nn_Bilinear_xpu_float64", - "test_non_contiguous_tensors_nn_GRUCell_xpu_float64", - "test_non_contiguous_tensors_nn_GRU_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_GRU_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_LSTMCell_xpu_float64", - "test_non_contiguous_tensors_nn_LSTM_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_LSTM_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_Linear_xpu_float64", - "test_non_contiguous_tensors_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_MultiheadAttention_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_RNNCell_xpu_float64", - "test_non_contiguous_tensors_nn_RNN_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_RNN_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_TransformerDecoderLayer_xpu_float64", - "test_non_contiguous_tensors_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_TransformerEncoder_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_Transformer_xpu_float64", - "test_save_load_nn_Bilinear_xpu_float64", - "test_save_load_nn_GRUCell_xpu_float64", - "test_save_load_nn_GRU_eval_mode_xpu_float64", - "test_save_load_nn_GRU_train_mode_xpu_float64", - "test_save_load_nn_LSTMCell_xpu_float64", - "test_save_load_nn_LSTM_eval_mode_xpu_float64", - "test_save_load_nn_LSTM_train_mode_xpu_float64", - "test_save_load_nn_Linear_xpu_float64", - "test_save_load_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_save_load_nn_MultiheadAttention_train_mode_xpu_float64", - "test_save_load_nn_RNNCell_xpu_float64", - "test_save_load_nn_RNN_eval_mode_xpu_float64", - "test_save_load_nn_RNN_train_mode_xpu_float64", - "test_save_load_nn_TransformerDecoderLayer_xpu_float64", - "test_save_load_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_save_load_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_save_load_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_save_load_nn_TransformerEncoder_train_mode_xpu_float64", - "test_save_load_nn_Transformer_xpu_float64", # Unexpected success: "test_cpu_gpu_parity_nn_ConvTranspose1d_xpu_complex32", "test_cpu_gpu_parity_nn_ConvTranspose2d_xpu_complex32", @@ -986,39 +708,10 @@ "test_transformerencoderlayer_xpu_float32", # oneDNN issues # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_TransformerDecoderLayer_empty_xpu", - "test_TransformerDecoder_empty_xpu", - "test_TransformerEncoder_empty_xpu", - "test_Transformer_empty_xpu", - "test_affine_grid", - "test_affine_grid_3d", - "test_RNN_cpu_vs_cudnn_no_dropout", - "test_RNN_cpu_vs_cudnn_with_dropout", - "test_GRU_grad_and_gradgrad_xpu_float64", - "test_LSTM_grad_and_gradgrad_xpu_float64", - "test_lstmcell_backward_only_one_output_grad_xpu_float64", - "test_module_to_empty_xpu_float64", - "test_RNN_change_dropout", - "test_RNN_dropout", - "test_rnn_fused_xpu_float64", - "test_rnn_retain_variables_xpu_float64", "test_transformerencoderlayer_xpu_float64", - "test_variable_sequence_xpu_float64", # Unexpected success: CUDA only test case, launch grid_y == 2**16 (larger than CUDA maximum y-dimension limit 65535) and expect fail. # SYCL don't have this limitation and hence can pass. "test_upsamplingNearest2d_launch_fail_xpu", - # Could not run 'aten::_thnn_fused_lstm_cell' with arguments from the 'CPU' backend. - "test_RNN_cudnn_weight_norm", - "test_partial_flat_weights", - "test_variable_sequence_xpu_float16", - "test_variable_sequence_xpu_float32", - # CPU fallback could not cover - # NotImplementedError: Could not run 'aten::_thnn_fused_gru_cell' with arguments from the 'CPU' backend. This could be because the operator doesn't exist for this backend, or was omitted during the selective/custom build pro... - "test_cudnn_weight_tying", - "test_RNN_input_size_zero", - "test_rnn_fused_xpu_float32", - "test_rnn_retain_variables_xpu_float16", - "test_rnn_retain_variables_xpu_float32", # AssertionError: False is not true "test_ctc_loss_cudnn_xpu", # want "xpu" in function name "test_ctc_loss_cudnn_tensor", # want "xpu" in function name @@ -1041,21 +734,7 @@ ), "nn/test_pooling_xpu.py": None, "nn/test_dropout_xpu.py": None, - "test_dataloader_xpu.py": ( - # Skip for XPU didn't support - # https://github.com/intel/torch-xpu-ops/issues/613 - "test_nested_tensor_multiprocessing_context_forkserver_xpu", - "test_nested_tensor_multiprocessing_context_spawn_xpu", - # pinned memory issue - # https://github.com/intel/torch-xpu-ops/issues/296 - "test_custom_batch_pin", - "test_sequential_pin_memory", - "test_shuffle_pin_memory", - "test_pin_memory", - # failed in preci - # https://github.com/intel/torch-xpu-ops/issues/928 - "test_segfault", - ), + "test_dataloader_xpu.py": None, "test_tensor_creation_ops_xpu.py": ( # CPU only (vs Numpy). CUDA skips these cases since non-deterministic results are outputed for inf and nan. "test_float_to_int_conversion_finite_xpu_int8", @@ -1066,10 +745,6 @@ ), "test_autocast_xpu.py": None, "test_autograd_xpu.py": ( - # https://github.com/intel/torch-xpu-ops/issues/618 - # c10::NotImplementedError - "test_autograd_composite_implicit_and_dispatch_registration_xpu", - "test_autograd_multiple_dispatch_registrations_xpu", # AttributeError: module 'torch.xpu' has no attribute "test_profiler_emit_nvtx_xpu", # Double and complex datatype matmul is not supported in oneDNN @@ -1077,14 +752,6 @@ # module 'torch._C' has no attribute '_scatter' "test_checkpointing_without_reentrant_dataparallel", "test_dataparallel_saved_tensors_hooks", - # Runtime error after enabling PTI - # RuntimeError: Fail to enable Kineto Profiler on XPU due to error code: 200 - # https://github.com/intel/torch-xpu-ops/issues/731 - "test_profiler", - "test_record_function", - # Sometimes, will raise AssertionError: "Simulate error" does not match "grad can be implicitly created only for scalar outputs" - # https://github.com/intel/torch-xpu-ops/issues/1071 - "test_reentrant_parent_error_on_cpu_xpu", ), "test_reductions_xpu.py": ( # Accumulate error due to different accumulation order. @@ -1097,10 +764,6 @@ # std operations get different behavior on std::complex operarands for extremal cases "test_reference_numerics_extremal__refs_log_xpu_complex64", "test_reference_numerics_extremal_log_xpu_complex64", - "test_reference_numerics_extremal__refs_tanh_xpu_complex128", - "test_reference_numerics_extremal__refs_tanh_xpu_complex64", - "test_reference_numerics_extremal_tanh_xpu_complex128", - "test_reference_numerics_extremal_tanh_xpu_complex64", "test_reference_numerics_extremal__refs_acos_xpu_complex64", "test_reference_numerics_extremal__refs_acosh_xpu_complex64", "test_reference_numerics_extremal_acos_xpu_complex64", @@ -1115,26 +778,18 @@ "test_reference_numerics_extremal__refs_log1p_xpu_complex64", "test_reference_numerics_extremal_log10_xpu_complex64", "test_reference_numerics_extremal_log1p_xpu_complex64", - "test_reference_numerics_extremal__refs_tan_xpu_complex128", - "test_reference_numerics_extremal__refs_tan_xpu_complex64", - "test_reference_numerics_extremal_tan_xpu_complex128", - "test_reference_numerics_extremal_tan_xpu_complex64", - "test_reference_numerics_large__refs_tan_xpu_complex32", - "test_reference_numerics_large_tan_xpu_complex32", "test_reference_numerics_large__refs_asinh_xpu_complex128", "test_reference_numerics_large__refs_asinh_xpu_complex64", "test_reference_numerics_large__refs_asinh_xpu_complex32", "test_reference_numerics_large_asinh_xpu_complex128", "test_reference_numerics_large_asinh_xpu_complex64", "test_reference_numerics_large_asinh_xpu_complex32", - "test_reference_numerics_normal_exp_xpu_complex128", # AssertionError: Tensor-likes are not close! # exceeded maximum allowed difference # Greatest absolute difference: 6.266784475883469e-05 at index (463, 204) (up to 1e-05 allowed) # Greatest relative difference: 1.9145216356264427e-05 at index (463, 204) (up to 1.3e-06 allowed) "test_reference_numerics_normal__refs_asinh_xpu_complex64", "test_reference_numerics_normal_asinh_xpu_complex64", - "test_batch_vs_slicing__refs_sigmoid_xpu_complex128", # Unexpected success: CUDA uses thrust::sqrt and has accuracy issue. XPU use std::sqrt and has no issue. "test_reference_numerics_large_rsqrt_xpu_complex32", # Numeric difference @@ -1153,52 +808,11 @@ "test_reference_numerics_normal_polygamma_polygamma_n_4_xpu_float16", # CUDA XFAIL "test_reference_numerics_large__refs_rsqrt_xpu_complex32", - # 2025 bundle std::pow complex result is different on host and device - "test_exp_xpu_complex64", - "test_reference_numerics_extremal__refs_exp2_xpu_complex64", - "test_reference_numerics_extremal__refs_exp_xpu_complex64", - "test_reference_numerics_extremal_exp2_xpu_complex64", - "test_reference_numerics_extremal_exp_xpu_complex64", - "test_reference_numerics_large__refs_exp_xpu_complex32", - "test_reference_numerics_large_exp_xpu_complex32", ), "test_masked_xpu.py": ( - # Summary: Sparse CSR for XPU is not supported - # NotImplementedError: Could not run 'aten::_to_sparse_csr' with arguments from the 'SparseXPU' backend. - # https://github.com/intel/torch-xpu-ops/issues/357 - "test_mask_layout_sparse_coo_masked_amax_xpu_bfloat16", - "test_mask_layout_sparse_coo_masked_amax_xpu_float16", - "test_mask_layout_sparse_coo_masked_amax_xpu_float32", - "test_mask_layout_sparse_coo_masked_amax_xpu_float64", - "test_mask_layout_sparse_coo_masked_amin_xpu_bfloat16", - "test_mask_layout_sparse_coo_masked_amin_xpu_float16", - "test_mask_layout_sparse_coo_masked_amin_xpu_float32", - "test_mask_layout_sparse_coo_masked_amin_xpu_float64", - "test_mask_layout_sparse_coo_masked_prod_xpu_bfloat16", - "test_mask_layout_sparse_coo_masked_prod_xpu_bool", - "test_mask_layout_sparse_coo_masked_prod_xpu_complex128", - "test_mask_layout_sparse_coo_masked_prod_xpu_complex64", - "test_mask_layout_sparse_coo_masked_prod_xpu_float16", - "test_mask_layout_sparse_coo_masked_prod_xpu_float32", - "test_mask_layout_sparse_coo_masked_prod_xpu_float64", - "test_mask_layout_sparse_coo_masked_prod_xpu_int16", - "test_mask_layout_sparse_coo_masked_prod_xpu_int32", - "test_mask_layout_sparse_coo_masked_prod_xpu_int64", - "test_mask_layout_sparse_coo_masked_prod_xpu_int8", - "test_mask_layout_sparse_coo_masked_prod_xpu_uint8", - # NotImplementedError: Could not run 'aten::_to_sparse_csr' with arguments from the 'SparseXPU' backend. - "test_mask_layout_sparse_coo_masked_sum_xpu_bfloat16", - "test_mask_layout_sparse_coo_masked_sum_xpu_bool", + # Segmentation fault "test_mask_layout_sparse_coo_masked_sum_xpu_complex128", - "test_mask_layout_sparse_coo_masked_sum_xpu_complex64", - "test_mask_layout_sparse_coo_masked_sum_xpu_float16", - "test_mask_layout_sparse_coo_masked_sum_xpu_float32", "test_mask_layout_sparse_coo_masked_sum_xpu_float64", - "test_mask_layout_sparse_coo_masked_sum_xpu_int16", - "test_mask_layout_sparse_coo_masked_sum_xpu_int32", - "test_mask_layout_sparse_coo_masked_sum_xpu_int64", - "test_mask_layout_sparse_coo_masked_sum_xpu_int8", - "test_mask_layout_sparse_coo_masked_sum_xpu_uint8", ), "test_view_ops_xpu.py": ( # Need quantization support, NotImplementedError: Could not run 'aten::_empty_affine_quantized' with arguments from the 'QuantizedXPU' backend. @@ -1218,33 +832,24 @@ "nn/test_lazy_modules_xpu.py": None, "test_linalg_xpu.py": ( # Summary: - # All linear algebra related ops are not supported for XPU. - # _convert_weight_to_int4pack not support - "_int4_mm_m_", # RuntimeError: Double and complex datatype matmul is not supported in oneDNN "test_tensordot_out_kernel_errors_with_autograd_xpu_complex64", "test_tensordot_out_kernel_errors_with_autograd_xpu_float32", - "test_1_sized_with_0_strided_xpu_float64", "test_addbmm_xpu_complex128", "test_addbmm_xpu_complex64", - "test_addbmm_xpu_float64", "test_addmm_gelu_xpu_float64", "test_addmm_relu_xpu_float64", "test_addmm_sizes_xpu_float64", "test_addmm_xpu_complex128", "test_addmm_xpu_complex64", "test_addmm_xpu_float64", - "test_addmv_rowmajor_colmajor_incx_incy_lda_xpu_float64", "test_addmv_xpu_complex128", "test_addmv_xpu_complex64", - "test_addmv_xpu_float64", "test_baddbmm_xpu_complex128", "test_baddbmm_xpu_complex64", "test_baddbmm_xpu_float64", "test_bmm_xpu_complex128", "test_bmm_xpu_complex64", - "test_bmm_xpu_float64", - "test_blas_alpha_beta_empty_xpu_float64", "test_cholesky_errors_and_warnings_xpu_complex128", "test_cholesky_errors_and_warnings_xpu_complex64", "test_cholesky_errors_and_warnings_xpu_float64", @@ -1269,14 +874,11 @@ "test_cholesky_xpu_float64", "test_corner_cases_of_cublasltmatmul_xpu_complex128", "test_corner_cases_of_cublasltmatmul_xpu_complex64", - "test_corner_cases_of_cublasltmatmul_xpu_float64", "test_eig_check_magma_xpu_float32", "test_einsum_random_xpu_complex128", "test_einsum_random_xpu_float64", "test_einsum_sublist_format_xpu_complex128", - "test_einsum_sublist_format_xpu_float64", "test_einsum_xpu_complex128", - "test_einsum_xpu_float64", "test_inner_xpu_complex64", "test_invariance_error_spectral_decompositions_xpu_complex128", "test_inverse_many_batches_xpu_complex128", @@ -1330,7 +932,6 @@ "test_matrix_power_negative_xpu_complex128", "test_matrix_power_negative_xpu_float64", "test_matrix_power_non_negative_xpu_complex128", - "test_matrix_power_non_negative_xpu_float64", "test_matrix_rank_atol_rtol_xpu_float64", "test_matrix_rank_xpu_complex128", "test_matrix_rank_xpu_complex64", @@ -1339,9 +940,7 @@ "test_mm_conjtranspose_xpu", "test_mm_xpu_complex128", "test_mm_xpu_complex64", - "test_mm_xpu_float64", "test_multi_dot_xpu_complex128", - "test_multi_dot_xpu_float64", "test_old_cholesky_batched_many_batches_xpu_float64", "test_old_cholesky_batched_upper_xpu_complex128", "test_old_cholesky_batched_upper_xpu_complex64", @@ -1373,7 +972,6 @@ "test_solve_xpu_complex128", "test_solve_xpu_complex64", "test_solve_xpu_float64", - "test_strided_mm_bmm_xpu_float64", "test_svd_lowrank_xpu_complex128", "test_svd_lowrank_xpu_float64", "test_svd_xpu_complex128", @@ -1404,11 +1002,6 @@ "test_dot_invalid_args_xpu", "test_vdot_invalid_args_xpu", "test__int_mm_errors_xpu", - # https://github.com/intel/torch-xpu-ops/issues/821 - # RuntimeError: Fail to enable Kineto Profiler on XPU due to error code: 200 - "test_norm_fused_type_promotion_xpu_bfloat16", - # AssertionError: True is not false - "test_norm_fused_type_promotion_xpu_float16", # https://github.com/intel/torch-xpu-ops/issues/814 # xpu does not have '_cuda_tunableop_is_enabled' API "_tunableop_", @@ -1435,9 +1028,7 @@ # TODO: align input data type for convert_weight_to_int4pack with CUDA # XPU expects weight to be kInt, while CUDA expects kByte "test__int4_mm_m_32_k_32_n_48_xpu", - "test__int4_mm_m_32_k_32_n_64_xpu", "test__int4_mm_m_32_k_64_n_48_xpu", - "test__int4_mm_m_32_k_64_n_64_xpu", "test__int4_mm_m_64_k_32_n_48_xpu", "test__int4_mm_m_64_k_32_n_64_xpu", "test__int4_mm_m_64_k_64_n_48_xpu", @@ -1451,9 +1042,7 @@ "test_compile_int4_mm_m_64_k_64_n_48_xpu", "test_compile_int4_mm_m_64_k_64_n_64_xpu", "test__int4_mm_m_32_k_32_n_48_xpu", - "test__int4_mm_m_32_k_32_n_64_xpu", "test__int4_mm_m_32_k_64_n_48_xpu", - "test__int4_mm_m_32_k_64_n_64_xpu", "test__int4_mm_m_64_k_32_n_48_xpu", "test__int4_mm_m_64_k_32_n_64_xpu", "test__int4_mm_m_64_k_64_n_48_xpu", @@ -1479,19 +1068,14 @@ # RuntimeError: Double and complex datatype matmul is not supported in oneDNN "test_fn_fwgrad_bwgrad___rmatmul___xpu_complex128", "test_fn_fwgrad_bwgrad___rmatmul___xpu_float64", - "test_fn_fwgrad_bwgrad_addbmm_xpu_float64", "test_fn_fwgrad_bwgrad_addmm_decomposed_xpu_complex128", - "test_fn_fwgrad_bwgrad_addmm_decomposed_xpu_float64", "test_fn_fwgrad_bwgrad_addmm_xpu_complex128", - "test_fn_fwgrad_bwgrad_addmm_xpu_float64", "test_fn_fwgrad_bwgrad_addmv_xpu_complex128", "test_fn_fwgrad_bwgrad_addmv_xpu_float64", "test_fn_fwgrad_bwgrad_addr_xpu_complex128", "test_fn_fwgrad_bwgrad_addr_xpu_float64", "test_fn_fwgrad_bwgrad_baddbmm_xpu_complex128", - "test_fn_fwgrad_bwgrad_baddbmm_xpu_float64", "test_fn_fwgrad_bwgrad_bmm_xpu_complex128", - "test_fn_fwgrad_bwgrad_bmm_xpu_float64", "test_fn_fwgrad_bwgrad_cholesky_inverse_xpu_complex128", "test_fn_fwgrad_bwgrad_cholesky_inverse_xpu_float64", "test_fn_fwgrad_bwgrad_cholesky_solve_xpu_complex128", @@ -1499,11 +1083,7 @@ "test_fn_fwgrad_bwgrad_cholesky_xpu_complex128", "test_fn_fwgrad_bwgrad_cholesky_xpu_float64", "test_fn_fwgrad_bwgrad_corrcoef_xpu_complex128", - "test_fn_fwgrad_bwgrad_corrcoef_xpu_float64", "test_fn_fwgrad_bwgrad_einsum_xpu_complex128", - "test_fn_fwgrad_bwgrad_einsum_xpu_float64", - "test_fn_fwgrad_bwgrad_inner_xpu_complex128", - "test_fn_fwgrad_bwgrad_inner_xpu_float64", "test_fn_fwgrad_bwgrad_linalg_cholesky_ex_xpu_complex128", "test_fn_fwgrad_bwgrad_linalg_cholesky_ex_xpu_float64", "test_fn_fwgrad_bwgrad_linalg_cholesky_xpu_complex128", @@ -1537,7 +1117,6 @@ "test_fn_fwgrad_bwgrad_linalg_matrix_power_xpu_complex128", "test_fn_fwgrad_bwgrad_linalg_matrix_power_xpu_float64", "test_fn_fwgrad_bwgrad_linalg_multi_dot_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_multi_dot_xpu_float64", "test_fn_fwgrad_bwgrad_linalg_norm_xpu_float64", "test_fn_fwgrad_bwgrad_linalg_pinv_hermitian_xpu_complex128", "test_fn_fwgrad_bwgrad_linalg_pinv_hermitian_xpu_float64", @@ -1558,7 +1137,6 @@ "test_fn_fwgrad_bwgrad_linalg_tensorinv_xpu_complex128", "test_fn_fwgrad_bwgrad_linalg_tensorinv_xpu_float64", "test_fn_fwgrad_bwgrad_linalg_tensorsolve_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_tensorsolve_xpu_float64", "test_fn_fwgrad_bwgrad_logdet_xpu_complex128", "test_fn_fwgrad_bwgrad_logdet_xpu_float64", "test_fn_fwgrad_bwgrad_lu_solve_xpu_complex128", @@ -1567,14 +1145,9 @@ "test_fn_fwgrad_bwgrad_matmul_xpu_complex128", "test_fn_fwgrad_bwgrad_matmul_xpu_float64", "test_fn_fwgrad_bwgrad_mm_xpu_complex128", - "test_fn_fwgrad_bwgrad_mm_xpu_float64", "test_fn_fwgrad_bwgrad_mv_xpu_complex128", "test_fn_fwgrad_bwgrad_mv_xpu_float64", - "test_fn_fwgrad_bwgrad_nn_functional_bilinear_xpu_float64", "test_fn_fwgrad_bwgrad_nn_functional_linear_xpu_complex128", - "test_fn_fwgrad_bwgrad_nn_functional_linear_xpu_float64", - "test_fn_fwgrad_bwgrad_nn_functional_multi_head_attention_forward_xpu_float64", - "test_fn_fwgrad_bwgrad_nn_functional_scaled_dot_product_attention_xpu_float64", "test_fn_fwgrad_bwgrad_norm_nuc_xpu_complex128", "test_fn_fwgrad_bwgrad_norm_nuc_xpu_float64", "test_fn_fwgrad_bwgrad_ormqr_xpu_complex128", @@ -1588,12 +1161,10 @@ "test_fn_fwgrad_bwgrad_svd_xpu_complex128", "test_fn_fwgrad_bwgrad_svd_xpu_float64", "test_fn_fwgrad_bwgrad_tensordot_xpu_complex128", - "test_fn_fwgrad_bwgrad_tensordot_xpu_float64", "test_forward_mode_AD___rmatmul___xpu_complex128", "test_forward_mode_AD___rmatmul___xpu_float64", "test_forward_mode_AD_addbmm_xpu_float64", "test_forward_mode_AD_addmm_decomposed_xpu_complex128", - "test_forward_mode_AD_addmm_decomposed_xpu_float64", "test_forward_mode_AD_addmm_xpu_complex128", "test_forward_mode_AD_addmm_xpu_float64", "test_forward_mode_AD_addmv_xpu_complex128", @@ -1601,7 +1172,6 @@ "test_forward_mode_AD_baddbmm_xpu_complex128", "test_forward_mode_AD_baddbmm_xpu_float64", "test_forward_mode_AD_bmm_xpu_complex128", - "test_forward_mode_AD_bmm_xpu_float64", "test_forward_mode_AD_cholesky_inverse_xpu_complex128", "test_forward_mode_AD_cholesky_inverse_xpu_float64", "test_forward_mode_AD_cholesky_solve_xpu_complex128", @@ -1609,13 +1179,8 @@ "test_forward_mode_AD_cholesky_xpu_complex128", "test_forward_mode_AD_cholesky_xpu_float64", "test_forward_mode_AD_corrcoef_xpu_complex128", - "test_forward_mode_AD_corrcoef_xpu_float64", "test_forward_mode_AD_dot_xpu_complex128", - "test_forward_mode_AD_dot_xpu_float64", "test_forward_mode_AD_einsum_xpu_complex128", - "test_forward_mode_AD_einsum_xpu_float64", - "test_forward_mode_AD_inner_xpu_complex128", - "test_forward_mode_AD_inner_xpu_float64", "test_forward_mode_AD_linalg_cholesky_ex_xpu_complex128", "test_forward_mode_AD_linalg_cholesky_ex_xpu_float64", "test_forward_mode_AD_linalg_cholesky_xpu_complex128", @@ -1649,7 +1214,6 @@ "test_forward_mode_AD_linalg_matrix_power_xpu_complex128", "test_forward_mode_AD_linalg_matrix_power_xpu_float64", "test_forward_mode_AD_linalg_multi_dot_xpu_complex128", - "test_forward_mode_AD_linalg_multi_dot_xpu_float64", "test_forward_mode_AD_linalg_norm_xpu_float64", "test_forward_mode_AD_linalg_pinv_hermitian_xpu_complex128", "test_forward_mode_AD_linalg_pinv_hermitian_xpu_float64", @@ -1671,7 +1235,6 @@ "test_forward_mode_AD_linalg_tensorinv_xpu_complex128", "test_forward_mode_AD_linalg_tensorinv_xpu_float64", "test_forward_mode_AD_linalg_tensorsolve_xpu_complex128", - "test_forward_mode_AD_linalg_tensorsolve_xpu_float64", "test_forward_mode_AD_logdet_xpu_complex128", "test_forward_mode_AD_logdet_xpu_float64", "test_forward_mode_AD_lu_solve_xpu_complex128", @@ -1680,12 +1243,9 @@ "test_forward_mode_AD_matmul_xpu_complex128", "test_forward_mode_AD_matmul_xpu_float64", "test_forward_mode_AD_mm_xpu_complex128", - "test_forward_mode_AD_mm_xpu_float64", "test_forward_mode_AD_mv_xpu_complex128", "test_forward_mode_AD_mv_xpu_float64", - "test_forward_mode_AD_nn_functional_bilinear_xpu_float64", "test_forward_mode_AD_nn_functional_linear_xpu_complex128", - "test_forward_mode_AD_nn_functional_linear_xpu_float64", "test_forward_mode_AD_norm_nuc_xpu_complex128", "test_forward_mode_AD_norm_nuc_xpu_float64", "test_forward_mode_AD_pca_lowrank_xpu_float64", @@ -1697,7 +1257,6 @@ "test_forward_mode_AD_svd_xpu_complex128", "test_forward_mode_AD_svd_xpu_float64", "test_forward_mode_AD_tensordot_xpu_complex128", - "test_forward_mode_AD_tensordot_xpu_float64", "test_forward_mode_AD_triangular_solve_xpu_complex128", "test_forward_mode_AD_triangular_solve_xpu_float64", "test_inplace_forward_mode_AD_addbmm_xpu_float64", @@ -1728,9 +1287,6 @@ "test_forward_mode_AD_nn_functional_conv_transpose2d_xpu_float64", "test_forward_mode_AD_nn_functional_conv_transpose3d_xpu_complex128", "test_forward_mode_AD_nn_functional_conv_transpose3d_xpu_float64", - # issue: https://github.com/intel/torch-xpu-ops/issues/809 - "test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_complex128", - "test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_float64", ), # "test_matmul_cuda_xpu.py": ( # # AssertionError: "Bias is not supported when out_dtype is set to Float32" does not match "Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend. @@ -1774,14 +1330,7 @@ # AttributeError: 'TestQuantizedOpsXPU' object has no attribute 'test_qsoftmax' "test_qsoftmax_qnnpack_xpu", ), - "quantization/core/test_workflow_ops_xpu.py": ( - # AssertionError: Not equal to tolerance rtol=1e-06, atol=1e-06 - # Max absolute difference among violations: 1.731507e+10 - # Max relative difference among violations: 0.01587304 - # ACTUAL: array([-1.108163e+12, 1.108163e+12], dtype=float32) - # DESIRED: array([-1.108163e+12, 1.090847e+12], dtype=float32) - "test_fq_module_per_tensor_xpu", - ), + "quantization/core/test_workflow_ops_xpu.py": None, "quantization/core/test_workflow_module_xpu.py": None, "quantization/core/test_quantized_tensor_xpu.py": ( # Summary: Quantized OPs are not supported for XPU @@ -1813,23 +1362,18 @@ "test_ops_gradients_xpu.py": ( # All are oneDNN issues ### Error #0 in TestBwdGradientsXPU , totally 271 , RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_fn_grad_index_reduce_prod_xpu_float64", - "test_inplace_grad_index_reduce_prod_xpu_float64", "test_fn_grad___rmatmul___xpu_complex128", "test_fn_grad___rmatmul___xpu_float64", "test_fn_grad_addbmm_xpu_float64", "test_fn_grad_addmm_decomposed_xpu_complex128", - "test_fn_grad_addmm_decomposed_xpu_float64", "test_fn_grad_addmm_xpu_complex128", "test_fn_grad_addmm_xpu_float64", "test_fn_grad_addmv_xpu_complex128", "test_fn_grad_addmv_xpu_float64", "test_fn_grad_addr_xpu_complex128", - "test_fn_grad_addr_xpu_float64", "test_fn_grad_baddbmm_xpu_complex128", "test_fn_grad_baddbmm_xpu_float64", "test_fn_grad_bmm_xpu_complex128", - "test_fn_grad_bmm_xpu_float64", "test_fn_grad_cdist_xpu_float64", "test_fn_grad_cholesky_inverse_xpu_complex128", "test_fn_grad_cholesky_inverse_xpu_float64", @@ -1838,11 +1382,7 @@ "test_fn_grad_cholesky_xpu_complex128", "test_fn_grad_cholesky_xpu_float64", "test_fn_grad_corrcoef_xpu_complex128", - "test_fn_grad_corrcoef_xpu_float64", "test_fn_grad_einsum_xpu_complex128", - "test_fn_grad_einsum_xpu_float64", - "test_fn_grad_inner_xpu_complex128", - "test_fn_grad_inner_xpu_float64", "test_fn_grad_linalg_cholesky_ex_xpu_complex128", "test_fn_grad_linalg_cholesky_ex_xpu_float64", "test_fn_grad_linalg_cholesky_xpu_complex128", @@ -1875,7 +1415,6 @@ "test_fn_grad_linalg_matrix_power_xpu_complex128", "test_fn_grad_linalg_matrix_power_xpu_float64", "test_fn_grad_linalg_multi_dot_xpu_complex128", - "test_fn_grad_linalg_multi_dot_xpu_float64", "test_fn_grad_linalg_norm_xpu_float64", "test_fn_grad_linalg_pinv_hermitian_xpu_complex128", "test_fn_grad_linalg_pinv_hermitian_xpu_float64", @@ -1897,7 +1436,6 @@ "test_fn_grad_linalg_tensorinv_xpu_complex128", "test_fn_grad_linalg_tensorinv_xpu_float64", "test_fn_grad_linalg_tensorsolve_xpu_complex128", - "test_fn_grad_linalg_tensorsolve_xpu_float64", "test_fn_grad_logdet_xpu_complex128", "test_fn_grad_logdet_xpu_float64", "test_fn_grad_lu_solve_xpu_complex128", @@ -1906,14 +1444,10 @@ "test_fn_grad_matmul_xpu_complex128", "test_fn_grad_matmul_xpu_float64", "test_fn_grad_mm_xpu_complex128", - "test_fn_grad_mm_xpu_float64", "test_fn_grad_mv_xpu_complex128", "test_fn_grad_mv_xpu_float64", - "test_fn_grad_nn_functional_bilinear_xpu_float64", "test_fn_grad_nn_functional_linear_xpu_complex128", - "test_fn_grad_nn_functional_linear_xpu_float64", "test_fn_grad_nn_functional_multi_head_attention_forward_xpu_float64", - "test_fn_grad_nn_functional_scaled_dot_product_attention_xpu_float64", "test_fn_grad_norm_nuc_xpu_complex128", "test_fn_grad_norm_nuc_xpu_float64", "test_fn_grad_ormqr_xpu_complex128", @@ -1927,24 +1461,18 @@ "test_fn_grad_svd_xpu_complex128", "test_fn_grad_svd_xpu_float64", "test_fn_grad_tensordot_xpu_complex128", - "test_fn_grad_tensordot_xpu_float64", "test_fn_grad_triangular_solve_xpu_complex128", "test_fn_grad_triangular_solve_xpu_float64", "test_fn_gradgrad___rmatmul___xpu_complex128", "test_fn_gradgrad___rmatmul___xpu_float64", - "test_fn_gradgrad_addbmm_xpu_float64", "test_fn_gradgrad_addmm_decomposed_xpu_complex128", - "test_fn_gradgrad_addmm_decomposed_xpu_float64", "test_fn_gradgrad_addmm_xpu_complex128", - "test_fn_gradgrad_addmm_xpu_float64", "test_fn_gradgrad_addmv_xpu_complex128", "test_fn_gradgrad_addmv_xpu_float64", "test_fn_gradgrad_addr_xpu_complex128", "test_fn_gradgrad_addr_xpu_float64", "test_fn_gradgrad_baddbmm_xpu_complex128", - "test_fn_gradgrad_baddbmm_xpu_float64", "test_fn_gradgrad_bmm_xpu_complex128", - "test_fn_gradgrad_bmm_xpu_float64", "test_fn_gradgrad_cholesky_inverse_xpu_complex128", "test_fn_gradgrad_cholesky_inverse_xpu_float64", "test_fn_gradgrad_cholesky_solve_xpu_complex128", @@ -1952,11 +1480,7 @@ "test_fn_gradgrad_cholesky_xpu_complex128", "test_fn_gradgrad_cholesky_xpu_float64", "test_fn_gradgrad_corrcoef_xpu_complex128", - "test_fn_gradgrad_corrcoef_xpu_float64", "test_fn_gradgrad_einsum_xpu_complex128", - "test_fn_gradgrad_einsum_xpu_float64", - "test_fn_gradgrad_inner_xpu_complex128", - "test_fn_gradgrad_inner_xpu_float64", "test_fn_gradgrad_linalg_cholesky_ex_xpu_complex128", "test_fn_gradgrad_linalg_cholesky_ex_xpu_float64", "test_fn_gradgrad_linalg_cholesky_xpu_complex128", @@ -1990,7 +1514,6 @@ "test_fn_gradgrad_linalg_matrix_power_xpu_complex128", "test_fn_gradgrad_linalg_matrix_power_xpu_float64", "test_fn_gradgrad_linalg_multi_dot_xpu_complex128", - "test_fn_gradgrad_linalg_multi_dot_xpu_float64", "test_fn_gradgrad_linalg_pinv_hermitian_xpu_complex128", "test_fn_gradgrad_linalg_pinv_hermitian_xpu_float64", "test_fn_gradgrad_linalg_pinv_singular_xpu_float64", @@ -2010,7 +1533,6 @@ "test_fn_gradgrad_linalg_tensorinv_xpu_complex128", "test_fn_gradgrad_linalg_tensorinv_xpu_float64", "test_fn_gradgrad_linalg_tensorsolve_xpu_complex128", - "test_fn_gradgrad_linalg_tensorsolve_xpu_float64", "test_fn_gradgrad_logdet_xpu_complex128", "test_fn_gradgrad_logdet_xpu_float64", "test_fn_gradgrad_lu_solve_xpu_complex128", @@ -2019,14 +1541,9 @@ "test_fn_gradgrad_matmul_xpu_complex128", "test_fn_gradgrad_matmul_xpu_float64", "test_fn_gradgrad_mm_xpu_complex128", - "test_fn_gradgrad_mm_xpu_float64", "test_fn_gradgrad_mv_xpu_complex128", "test_fn_gradgrad_mv_xpu_float64", - "test_fn_gradgrad_nn_functional_bilinear_xpu_float64", "test_fn_gradgrad_nn_functional_linear_xpu_complex128", - "test_fn_gradgrad_nn_functional_linear_xpu_float64", - "test_fn_gradgrad_nn_functional_multi_head_attention_forward_xpu_float64", - "test_fn_gradgrad_nn_functional_scaled_dot_product_attention_xpu_float64", "test_fn_gradgrad_norm_nuc_xpu_complex128", "test_fn_gradgrad_norm_nuc_xpu_float64", "test_fn_gradgrad_ormqr_xpu_complex128", @@ -2040,7 +1557,6 @@ "test_fn_gradgrad_svd_xpu_complex128", "test_fn_gradgrad_svd_xpu_float64", "test_fn_gradgrad_tensordot_xpu_complex128", - "test_fn_gradgrad_tensordot_xpu_float64", "test_fn_gradgrad_triangular_solve_xpu_complex128", "test_fn_gradgrad_triangular_solve_xpu_float64", "test_inplace_grad_addbmm_xpu_float64", @@ -2051,20 +1567,15 @@ "test_inplace_grad_addmv_xpu_complex128", "test_inplace_grad_addmv_xpu_float64", "test_inplace_grad_addr_xpu_complex128", - "test_inplace_grad_addr_xpu_float64", "test_inplace_grad_baddbmm_xpu_complex128", "test_inplace_grad_baddbmm_xpu_float64", - "test_inplace_gradgrad_addbmm_xpu_float64", "test_inplace_gradgrad_addmm_decomposed_xpu_complex128", - "test_inplace_gradgrad_addmm_decomposed_xpu_float64", "test_inplace_gradgrad_addmm_xpu_complex128", - "test_inplace_gradgrad_addmm_xpu_float64", "test_inplace_gradgrad_addmv_xpu_complex128", "test_inplace_gradgrad_addmv_xpu_float64", "test_inplace_gradgrad_addr_xpu_complex128", "test_inplace_gradgrad_addr_xpu_float64", "test_inplace_gradgrad_baddbmm_xpu_complex128", - "test_inplace_gradgrad_baddbmm_xpu_float64", "test_fn_grad_pca_lowrank_xpu_complex128", "test_fn_grad_svd_lowrank_xpu_complex128", "test_fn_gradgrad_pca_lowrank_xpu_complex128", @@ -2088,9 +1599,6 @@ "test_fn_gradgrad_index_reduce_prod_xpu_float64", "test_inplace_gradgrad_index_reduce_mean_xpu_float64", "test_inplace_gradgrad_index_reduce_prod_xpu_float64", - # issue: https://github.com/intel/torch-xpu-ops/issues/809 - "test_fn_gradgrad_nn_functional_conv3d_xpu_complex128", - "test_fn_gradgrad_nn_functional_conv3d_xpu_float64", ), "test_torch_xpu.py": ( # 'torch.xpu' has no attribute ... @@ -2164,72 +1672,10 @@ # internally uses index_put deterministic implementation # dependent on "test_index_put_non_accumulate_deterministic" "test_index_copy_deterministic", - # scatter_add needs handle XPU deterministic - # https://github.com/intel/torch-xpu-ops/issues/906 - "test_gather_backward_deterministic_path_xpu", - "test_scatter_add_one_dim_deterministic_xpu", - # Precision error - # Fail occasionally - # Mismatched elements: 1 / 60 (1.7%) - # Greatest absolute difference: 0.0625 at index (2, 1, 4) (up to 1e-05 allowed) - # Greatest relative difference: 0.001125335693359375 at index (2, 1, 4) (up to 0.001 allowed) - "test_index_reduce_reduce_mean_xpu_bfloat16", - "test_index_reduce_reduce_mean_xpu_float16", - "test_index_reduce_reduce_prod_xpu_float16", - ), - "nn/test_multihead_attention_xpu.py": ( - # known oneDNN issue - # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_multihead_attention_dtype_batch_first_xpu_float64", - "test_multihead_attention_dtype_xpu_float64", - "test_multihead_attn_fast_path_query_and_bias_have_different_dtypes_xpu_float64", - "test_multihead_attn_fast_path_small_test_xpu_float64", - "test_multihead_attn_in_proj_bias_none_xpu_float64", - "test_multihead_attn_in_proj_weight_none_xpu_float64", ), + "nn/test_multihead_attention_xpu.py": None, "test_native_mha_xpu.py": ( - # NestedTensorXPU related OPs - # NotImplementedError: Could not run 'aten::_native_multi_head_attention' with arguments from the 'NestedTensorXPU' backend. - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float32", + # AssertionError: Scalars are not equal! "test_transform_bias_rescale_qkv_nested_xpu_float32", ), "test_comparison_utils_xpu.py": None, @@ -2284,29 +1730,16 @@ "test_dispatch_symbolic_meta_outplace_addbmm_xpu_complex", "test_meta_inplace_addbmm_xpu_complex", "test_meta_outplace_addbmm_xpu_complex", - "test_dispatch_meta_inplace_addbmm_xpu_float64", "test_dispatch_meta_inplace_addmm_decomposed_xpu_complex", - "test_dispatch_meta_inplace_addmm_decomposed_xpu_float64", "test_dispatch_meta_inplace_addmm_xpu_complex", - "test_dispatch_meta_inplace_addmm_xpu_float64", "test_dispatch_meta_inplace_addmv_xpu_complex", - "test_dispatch_meta_inplace_addmv_xpu_float64", "test_dispatch_meta_inplace_baddbmm_xpu_complex", - "test_dispatch_meta_inplace_baddbmm_xpu_float64", "test_dispatch_meta_outplace___rmatmul___xpu_complex", - "test_dispatch_meta_outplace___rmatmul___xpu_float64", - "test_dispatch_meta_outplace_addbmm_xpu_float64", "test_dispatch_meta_outplace_addmm_decomposed_xpu_complex", - "test_dispatch_meta_outplace_addmm_decomposed_xpu_float64", "test_dispatch_meta_outplace_addmm_xpu_complex", - "test_dispatch_meta_outplace_addmm_xpu_float64", "test_dispatch_meta_outplace_addmv_xpu_complex", - "test_dispatch_meta_outplace_addmv_xpu_float64", "test_dispatch_meta_outplace_baddbmm_xpu_complex", - "test_dispatch_meta_outplace_baddbmm_xpu_float64", "test_dispatch_meta_outplace_bmm_xpu_complex", - "test_dispatch_meta_outplace_bmm_xpu_float64", - "test_dispatch_meta_outplace_cdist_xpu_float64", "test_dispatch_meta_outplace_cholesky_inverse_xpu_complex", "test_dispatch_meta_outplace_cholesky_inverse_xpu_float64", "test_dispatch_meta_outplace_cholesky_solve_xpu_complex", @@ -2314,15 +1747,10 @@ "test_dispatch_meta_outplace_cholesky_xpu_complex", "test_dispatch_meta_outplace_cholesky_xpu_float64", "test_dispatch_meta_outplace_corrcoef_xpu_complex", - "test_dispatch_meta_outplace_corrcoef_xpu_float64", "test_dispatch_meta_outplace_cov_xpu_complex", - "test_dispatch_meta_outplace_cov_xpu_float64", "test_dispatch_meta_outplace_einsum_xpu_complex", - "test_dispatch_meta_outplace_einsum_xpu_float64", "test_dispatch_meta_outplace_geqrf_xpu_complex", "test_dispatch_meta_outplace_geqrf_xpu_float64", - "test_dispatch_meta_outplace_inner_xpu_complex", - "test_dispatch_meta_outplace_inner_xpu_float64", "test_dispatch_meta_outplace_linalg_cholesky_ex_xpu_complex", "test_dispatch_meta_outplace_linalg_cholesky_ex_xpu_float64", "test_dispatch_meta_outplace_linalg_cholesky_xpu_complex", @@ -2358,7 +1786,6 @@ "test_dispatch_meta_outplace_linalg_matrix_rank_xpu_complex", "test_dispatch_meta_outplace_linalg_matrix_rank_xpu_float64", "test_dispatch_meta_outplace_linalg_multi_dot_xpu_complex", - "test_dispatch_meta_outplace_linalg_multi_dot_xpu_float64", "test_dispatch_meta_outplace_linalg_pinv_hermitian_xpu_complex", "test_dispatch_meta_outplace_linalg_pinv_hermitian_xpu_float64", "test_dispatch_meta_outplace_linalg_pinv_singular_xpu_complex", @@ -2380,16 +1807,9 @@ "test_dispatch_meta_outplace_lu_xpu_complex", "test_dispatch_meta_outplace_lu_xpu_float64", "test_dispatch_meta_outplace_matmul_xpu_complex", - "test_dispatch_meta_outplace_matmul_xpu_float64", "test_dispatch_meta_outplace_mm_xpu_complex", - "test_dispatch_meta_outplace_mm_xpu_float64", "test_dispatch_meta_outplace_mv_xpu_complex", - "test_dispatch_meta_outplace_mv_xpu_float64", - "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_float64", "test_dispatch_meta_outplace_nn_functional_linear_xpu_complex", - "test_dispatch_meta_outplace_nn_functional_linear_xpu_float64", - "test_dispatch_meta_outplace_nn_functional_multi_head_attention_forward_xpu_float64", - "test_dispatch_meta_outplace_nn_functional_scaled_dot_product_attention_xpu_float64", "test_dispatch_meta_outplace_pca_lowrank_xpu_complex", "test_dispatch_meta_outplace_pca_lowrank_xpu_float64", "test_dispatch_meta_outplace_pinverse_xpu_complex", @@ -2401,32 +1821,18 @@ "test_dispatch_meta_outplace_svd_xpu_complex", "test_dispatch_meta_outplace_svd_xpu_float64", "test_dispatch_meta_outplace_tensordot_xpu_complex", - "test_dispatch_meta_outplace_tensordot_xpu_float64", "test_dispatch_meta_outplace_triangular_solve_xpu_complex", "test_dispatch_meta_outplace_triangular_solve_xpu_float64", - "test_dispatch_symbolic_meta_inplace_addbmm_xpu_float64", "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_complex", - "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_float64", "test_dispatch_symbolic_meta_inplace_addmm_xpu_complex", - "test_dispatch_symbolic_meta_inplace_addmm_xpu_float64", "test_dispatch_symbolic_meta_inplace_addmv_xpu_complex", - "test_dispatch_symbolic_meta_inplace_addmv_xpu_float64", "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_complex", - "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_float64", "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_complex", - "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_float64", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_float64", "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_complex", - "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_float64", "test_dispatch_symbolic_meta_outplace_addmm_xpu_complex", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_float64", "test_dispatch_symbolic_meta_outplace_addmv_xpu_complex", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_float64", "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_complex", - "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_float64", "test_dispatch_symbolic_meta_outplace_bmm_xpu_complex", - "test_dispatch_symbolic_meta_outplace_bmm_xpu_float64", - "test_dispatch_symbolic_meta_outplace_cdist_xpu_float64", "test_dispatch_symbolic_meta_outplace_cholesky_inverse_xpu_complex", "test_dispatch_symbolic_meta_outplace_cholesky_inverse_xpu_float64", "test_dispatch_symbolic_meta_outplace_cholesky_solve_xpu_complex", @@ -2434,15 +1840,10 @@ "test_dispatch_symbolic_meta_outplace_cholesky_xpu_complex", "test_dispatch_symbolic_meta_outplace_cholesky_xpu_float64", "test_dispatch_symbolic_meta_outplace_corrcoef_xpu_complex", - "test_dispatch_symbolic_meta_outplace_corrcoef_xpu_float64", "test_dispatch_symbolic_meta_outplace_cov_xpu_complex", - "test_dispatch_symbolic_meta_outplace_cov_xpu_float64", "test_dispatch_symbolic_meta_outplace_einsum_xpu_complex", - "test_dispatch_symbolic_meta_outplace_einsum_xpu_float64", "test_dispatch_symbolic_meta_outplace_geqrf_xpu_complex", "test_dispatch_symbolic_meta_outplace_geqrf_xpu_float64", - "test_dispatch_symbolic_meta_outplace_inner_xpu_complex", - "test_dispatch_symbolic_meta_outplace_inner_xpu_float64", "test_dispatch_symbolic_meta_outplace_linalg_cholesky_ex_xpu_complex", "test_dispatch_symbolic_meta_outplace_linalg_cholesky_ex_xpu_float64", "test_dispatch_symbolic_meta_outplace_linalg_cholesky_xpu_complex", @@ -2478,7 +1879,6 @@ "test_dispatch_symbolic_meta_outplace_linalg_matrix_rank_xpu_complex", "test_dispatch_symbolic_meta_outplace_linalg_matrix_rank_xpu_float64", "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_float64", "test_dispatch_symbolic_meta_outplace_linalg_pinv_hermitian_xpu_complex", "test_dispatch_symbolic_meta_outplace_linalg_pinv_hermitian_xpu_float64", "test_dispatch_symbolic_meta_outplace_linalg_pinv_singular_xpu_complex", @@ -2500,16 +1900,9 @@ "test_dispatch_symbolic_meta_outplace_lu_xpu_complex", "test_dispatch_symbolic_meta_outplace_lu_xpu_float64", "test_dispatch_symbolic_meta_outplace_matmul_xpu_complex", - "test_dispatch_symbolic_meta_outplace_matmul_xpu_float64", "test_dispatch_symbolic_meta_outplace_mm_xpu_complex", - "test_dispatch_symbolic_meta_outplace_mm_xpu_float64", "test_dispatch_symbolic_meta_outplace_mv_xpu_complex", - "test_dispatch_symbolic_meta_outplace_mv_xpu_float64", - "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_float64", "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_complex", - "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_float64", - "test_dispatch_symbolic_meta_outplace_nn_functional_multi_head_attention_forward_xpu_float64", - "test_dispatch_symbolic_meta_outplace_nn_functional_scaled_dot_product_attention_xpu_float64", "test_dispatch_symbolic_meta_outplace_pca_lowrank_xpu_complex", "test_dispatch_symbolic_meta_outplace_pca_lowrank_xpu_float64", "test_dispatch_symbolic_meta_outplace_pinverse_xpu_complex", @@ -2521,32 +1914,18 @@ "test_dispatch_symbolic_meta_outplace_svd_xpu_complex", "test_dispatch_symbolic_meta_outplace_svd_xpu_float64", "test_dispatch_symbolic_meta_outplace_tensordot_xpu_complex", - "test_dispatch_symbolic_meta_outplace_tensordot_xpu_float64", "test_dispatch_symbolic_meta_outplace_triangular_solve_xpu_complex", "test_dispatch_symbolic_meta_outplace_triangular_solve_xpu_float64", - "test_meta_inplace_addbmm_xpu_float64", "test_meta_inplace_addmm_decomposed_xpu_complex", - "test_meta_inplace_addmm_decomposed_xpu_float64", "test_meta_inplace_addmm_xpu_complex", - "test_meta_inplace_addmm_xpu_float64", "test_meta_inplace_addmv_xpu_complex", - "test_meta_inplace_addmv_xpu_float64", "test_meta_inplace_baddbmm_xpu_complex", - "test_meta_inplace_baddbmm_xpu_float64", "test_meta_outplace___rmatmul___xpu_complex", - "test_meta_outplace___rmatmul___xpu_float64", - "test_meta_outplace_addbmm_xpu_float64", "test_meta_outplace_addmm_decomposed_xpu_complex", - "test_meta_outplace_addmm_decomposed_xpu_float64", "test_meta_outplace_addmm_xpu_complex", - "test_meta_outplace_addmm_xpu_float64", "test_meta_outplace_addmv_xpu_complex", - "test_meta_outplace_addmv_xpu_float64", "test_meta_outplace_baddbmm_xpu_complex", - "test_meta_outplace_baddbmm_xpu_float64", "test_meta_outplace_bmm_xpu_complex", - "test_meta_outplace_bmm_xpu_float64", - "test_meta_outplace_cdist_xpu_float64", "test_meta_outplace_cholesky_inverse_xpu_complex", "test_meta_outplace_cholesky_inverse_xpu_float64", "test_meta_outplace_cholesky_solve_xpu_complex", @@ -2554,15 +1933,10 @@ "test_meta_outplace_cholesky_xpu_complex", "test_meta_outplace_cholesky_xpu_float64", "test_meta_outplace_corrcoef_xpu_complex", - "test_meta_outplace_corrcoef_xpu_float64", "test_meta_outplace_cov_xpu_complex", - "test_meta_outplace_cov_xpu_float64", "test_meta_outplace_einsum_xpu_complex", - "test_meta_outplace_einsum_xpu_float64", "test_meta_outplace_geqrf_xpu_complex", "test_meta_outplace_geqrf_xpu_float64", - "test_meta_outplace_inner_xpu_complex", - "test_meta_outplace_inner_xpu_float64", "test_meta_outplace_linalg_cholesky_ex_xpu_complex", "test_meta_outplace_linalg_cholesky_ex_xpu_float64", "test_meta_outplace_linalg_cholesky_xpu_complex", @@ -2598,7 +1972,6 @@ "test_meta_outplace_linalg_matrix_rank_xpu_complex", "test_meta_outplace_linalg_matrix_rank_xpu_float64", "test_meta_outplace_linalg_multi_dot_xpu_complex", - "test_meta_outplace_linalg_multi_dot_xpu_float64", "test_meta_outplace_linalg_pinv_hermitian_xpu_complex", "test_meta_outplace_linalg_pinv_hermitian_xpu_float64", "test_meta_outplace_linalg_pinv_singular_xpu_complex", @@ -2620,16 +1993,9 @@ "test_meta_outplace_lu_xpu_complex", "test_meta_outplace_lu_xpu_float64", "test_meta_outplace_matmul_xpu_complex", - "test_meta_outplace_matmul_xpu_float64", "test_meta_outplace_mm_xpu_complex", - "test_meta_outplace_mm_xpu_float64", "test_meta_outplace_mv_xpu_complex", - "test_meta_outplace_mv_xpu_float64", - "test_meta_outplace_nn_functional_bilinear_xpu_float64", "test_meta_outplace_nn_functional_linear_xpu_complex", - "test_meta_outplace_nn_functional_linear_xpu_float64", - "test_meta_outplace_nn_functional_multi_head_attention_forward_xpu_float64", - "test_meta_outplace_nn_functional_scaled_dot_product_attention_xpu_float64", "test_meta_outplace_pca_lowrank_xpu_complex", "test_meta_outplace_pca_lowrank_xpu_float64", "test_meta_outplace_pinverse_xpu_complex", @@ -2641,7 +2007,6 @@ "test_meta_outplace_svd_xpu_complex", "test_meta_outplace_svd_xpu_float64", "test_meta_outplace_tensordot_xpu_complex", - "test_meta_outplace_tensordot_xpu_float64", "test_meta_outplace_triangular_solve_xpu_complex", "test_meta_outplace_triangular_solve_xpu_float64", # RuntimeError: Short is not supported in oneDNN! @@ -2658,7 +2023,6 @@ "test_dispatch_meta_outplace_baddbmm_xpu_int16", "test_dispatch_meta_outplace_bmm_xpu_int16", "test_dispatch_meta_outplace_einsum_xpu_int16", - "test_dispatch_meta_outplace_inner_xpu_int16", "test_dispatch_meta_outplace_linalg_multi_dot_xpu_int16", "test_dispatch_meta_outplace_matmul_xpu_int16", "test_dispatch_meta_outplace_mm_xpu_int16", @@ -2678,7 +2042,6 @@ "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_int16", "test_dispatch_symbolic_meta_outplace_bmm_xpu_int16", "test_dispatch_symbolic_meta_outplace_einsum_xpu_int16", - "test_dispatch_symbolic_meta_outplace_inner_xpu_int16", "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_int16", "test_dispatch_symbolic_meta_outplace_matmul_xpu_int16", "test_dispatch_symbolic_meta_outplace_mm_xpu_int16", @@ -2698,7 +2061,6 @@ "test_meta_outplace_baddbmm_xpu_int16", "test_meta_outplace_bmm_xpu_int16", "test_meta_outplace_einsum_xpu_int16", - "test_meta_outplace_inner_xpu_int16", "test_meta_outplace_linalg_multi_dot_xpu_int16", "test_meta_outplace_matmul_xpu_int16", "test_meta_outplace_mm_xpu_int16", @@ -2707,131 +2069,65 @@ "test_meta_outplace_tensordot_xpu_int16", # RuntimeError: could not create a primitive descriptor for a matmul primitive "test_dispatch_meta_inplace_addbmm_xpu_int32", - "test_dispatch_meta_inplace_addbmm_xpu_uint8", "test_dispatch_meta_inplace_addmm_decomposed_xpu_int32", - "test_dispatch_meta_inplace_addmm_decomposed_xpu_uint8", "test_dispatch_meta_inplace_addmm_xpu_int32", - "test_dispatch_meta_inplace_addmm_xpu_uint8", "test_dispatch_meta_inplace_addmv_xpu_int32", - "test_dispatch_meta_inplace_addmv_xpu_uint8", "test_dispatch_meta_inplace_baddbmm_xpu_int32", - "test_dispatch_meta_inplace_baddbmm_xpu_uint8", "test_dispatch_meta_outplace___rmatmul___xpu_int32", - "test_dispatch_meta_outplace___rmatmul___xpu_uint8", "test_dispatch_meta_outplace_addbmm_xpu_int32", - "test_dispatch_meta_outplace_addbmm_xpu_uint8", "test_dispatch_meta_outplace_addmm_decomposed_xpu_int32", - "test_dispatch_meta_outplace_addmm_decomposed_xpu_uint8", "test_dispatch_meta_outplace_addmm_xpu_int32", - "test_dispatch_meta_outplace_addmm_xpu_uint8", "test_dispatch_meta_outplace_addmv_xpu_int32", - "test_dispatch_meta_outplace_addmv_xpu_uint8", "test_dispatch_meta_outplace_baddbmm_xpu_int32", - "test_dispatch_meta_outplace_baddbmm_xpu_uint8", "test_dispatch_meta_outplace_bmm_xpu_int32", - "test_dispatch_meta_outplace_bmm_xpu_uint8", "test_dispatch_meta_outplace_einsum_xpu_int32", - "test_dispatch_meta_outplace_einsum_xpu_uint8", - "test_dispatch_meta_outplace_inner_xpu_int32", - "test_dispatch_meta_outplace_inner_xpu_uint8", "test_dispatch_meta_outplace_linalg_multi_dot_xpu_int32", - "test_dispatch_meta_outplace_linalg_multi_dot_xpu_uint8", "test_dispatch_meta_outplace_matmul_xpu_int32", - "test_dispatch_meta_outplace_matmul_xpu_uint8", "test_dispatch_meta_outplace_mm_xpu_int32", - "test_dispatch_meta_outplace_mm_xpu_uint8", "test_dispatch_meta_outplace_mv_xpu_int32", - "test_dispatch_meta_outplace_mv_xpu_uint8", "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_int32", - "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_uint8", "test_dispatch_meta_outplace_nn_functional_linear_xpu_int32", - "test_dispatch_meta_outplace_nn_functional_linear_xpu_uint8", "test_dispatch_meta_outplace_tensordot_xpu_int32", - "test_dispatch_meta_outplace_tensordot_xpu_uint8", "test_dispatch_symbolic_meta_inplace_addbmm_xpu_int32", - "test_dispatch_symbolic_meta_inplace_addbmm_xpu_uint8", "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_int32", - "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_uint8", "test_dispatch_symbolic_meta_inplace_addmm_xpu_int32", - "test_dispatch_symbolic_meta_inplace_addmm_xpu_uint8", "test_dispatch_symbolic_meta_inplace_addmv_xpu_int32", - "test_dispatch_symbolic_meta_inplace_addmv_xpu_uint8", "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_int32", - "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_uint8", "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_int32", - "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_uint8", "test_dispatch_symbolic_meta_outplace_addbmm_xpu_int32", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_uint8", "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_int32", - "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_uint8", "test_dispatch_symbolic_meta_outplace_addmm_xpu_int32", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_uint8", "test_dispatch_symbolic_meta_outplace_addmv_xpu_int32", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_uint8", "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_int32", - "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_uint8", "test_dispatch_symbolic_meta_outplace_bmm_xpu_int32", - "test_dispatch_symbolic_meta_outplace_bmm_xpu_uint8", "test_dispatch_symbolic_meta_outplace_einsum_xpu_int32", - "test_dispatch_symbolic_meta_outplace_einsum_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_inner_xpu_int32", - "test_dispatch_symbolic_meta_outplace_inner_xpu_uint8", "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_int32", - "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_uint8", "test_dispatch_symbolic_meta_outplace_matmul_xpu_int32", - "test_dispatch_symbolic_meta_outplace_matmul_xpu_uint8", "test_dispatch_symbolic_meta_outplace_mm_xpu_int32", - "test_dispatch_symbolic_meta_outplace_mm_xpu_uint8", "test_dispatch_symbolic_meta_outplace_mv_xpu_int32", - "test_dispatch_symbolic_meta_outplace_mv_xpu_uint8", "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_int32", - "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_uint8", "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_int32", - "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_uint8", "test_dispatch_symbolic_meta_outplace_tensordot_xpu_int32", - "test_dispatch_symbolic_meta_outplace_tensordot_xpu_uint8", "test_meta_inplace_addbmm_xpu_int32", - "test_meta_inplace_addbmm_xpu_uint8", "test_meta_inplace_addmm_decomposed_xpu_int32", - "test_meta_inplace_addmm_decomposed_xpu_uint8", "test_meta_inplace_addmm_xpu_int32", - "test_meta_inplace_addmm_xpu_uint8", "test_meta_inplace_addmv_xpu_int32", - "test_meta_inplace_addmv_xpu_uint8", "test_meta_inplace_baddbmm_xpu_int32", - "test_meta_inplace_baddbmm_xpu_uint8", "test_meta_outplace___rmatmul___xpu_int32", - "test_meta_outplace___rmatmul___xpu_uint8", "test_meta_outplace_addbmm_xpu_int32", - "test_meta_outplace_addbmm_xpu_uint8", "test_meta_outplace_addmm_decomposed_xpu_int32", - "test_meta_outplace_addmm_decomposed_xpu_uint8", "test_meta_outplace_addmm_xpu_int32", - "test_meta_outplace_addmm_xpu_uint8", "test_meta_outplace_addmv_xpu_int32", - "test_meta_outplace_addmv_xpu_uint8", "test_meta_outplace_baddbmm_xpu_int32", - "test_meta_outplace_baddbmm_xpu_uint8", "test_meta_outplace_bmm_xpu_int32", - "test_meta_outplace_bmm_xpu_uint8", "test_meta_outplace_einsum_xpu_int32", - "test_meta_outplace_einsum_xpu_uint8", - "test_meta_outplace_inner_xpu_int32", - "test_meta_outplace_inner_xpu_uint8", "test_meta_outplace_linalg_multi_dot_xpu_int32", - "test_meta_outplace_linalg_multi_dot_xpu_uint8", "test_meta_outplace_matmul_xpu_int32", - "test_meta_outplace_matmul_xpu_uint8", "test_meta_outplace_mm_xpu_int32", - "test_meta_outplace_mm_xpu_uint8", "test_meta_outplace_mv_xpu_int32", - "test_meta_outplace_mv_xpu_uint8", "test_meta_outplace_nn_functional_bilinear_xpu_int32", - "test_meta_outplace_nn_functional_bilinear_xpu_uint8", "test_meta_outplace_nn_functional_linear_xpu_int32", - "test_meta_outplace_nn_functional_linear_xpu_uint8", "test_meta_outplace_tensordot_xpu_int32", - "test_meta_outplace_tensordot_xpu_uint8", # RuntimeError: Long is not supported in oneDNN! "test_dispatch_meta_inplace_addbmm_xpu_int64", "test_dispatch_meta_inplace_addmm_decomposed_xpu_int64", @@ -2846,7 +2142,6 @@ "test_dispatch_meta_outplace_baddbmm_xpu_int64", "test_dispatch_meta_outplace_bmm_xpu_int64", "test_dispatch_meta_outplace_einsum_xpu_int64", - "test_dispatch_meta_outplace_inner_xpu_int64", "test_dispatch_meta_outplace_linalg_multi_dot_xpu_int64", "test_dispatch_meta_outplace_matmul_xpu_int64", "test_dispatch_meta_outplace_mm_xpu_int64", @@ -2872,7 +2167,6 @@ "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_int64", "test_dispatch_symbolic_meta_outplace_bmm_xpu_int64", "test_dispatch_symbolic_meta_outplace_einsum_xpu_int64", - "test_dispatch_symbolic_meta_outplace_inner_xpu_int64", "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_int64", "test_dispatch_symbolic_meta_outplace_matmul_xpu_int64", "test_dispatch_symbolic_meta_outplace_mm_xpu_int64", @@ -2898,7 +2192,6 @@ "test_meta_outplace_baddbmm_xpu_int64", "test_meta_outplace_bmm_xpu_int64", "test_meta_outplace_einsum_xpu_int64", - "test_meta_outplace_inner_xpu_int64", "test_meta_outplace_linalg_multi_dot_xpu_int64", "test_meta_outplace_matmul_xpu_int64", "test_meta_outplace_mm_xpu_int64", @@ -2911,46 +2204,6 @@ "test_meta_outplace_nn_functional_conv_transpose2d_xpu_int64", "test_meta_outplace_nn_functional_conv_transpose3d_xpu_int64", "test_meta_outplace_tensordot_xpu_int64", - # RuntimeError: could not create a primitive - "test_dispatch_meta_outplace_addbmm_xpu_bfloat16", - "test_dispatch_meta_outplace_addbmm_xpu_float16", - "test_dispatch_meta_outplace_addbmm_xpu_float32", - "test_dispatch_meta_outplace_addbmm_xpu_int8", - "test_dispatch_meta_outplace_addmm_xpu_bfloat16", - "test_dispatch_meta_outplace_addmm_xpu_float16", - "test_dispatch_meta_outplace_addmm_xpu_float32", - "test_dispatch_meta_outplace_addmm_xpu_int8", - "test_dispatch_meta_outplace_addmv_xpu_bfloat16", - "test_dispatch_meta_outplace_addmv_xpu_float16", - "test_dispatch_meta_outplace_addmv_xpu_float32", - "test_dispatch_meta_outplace_addmv_xpu_int8", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_bfloat16", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_float16", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_float32", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_int8", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_bfloat16", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_float16", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_float32", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_int8", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_bfloat16", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_float16", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_float32", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_int8", - "test_dispatch_symbolic_meta_outplace_all_strides_addbmm_xpu_float32", - "test_dispatch_symbolic_meta_outplace_all_strides_addmm_xpu_float32", - "test_dispatch_symbolic_meta_outplace_all_strides_addmv_xpu_float32", - "test_meta_outplace_addbmm_xpu_bfloat16", - "test_meta_outplace_addbmm_xpu_float16", - "test_meta_outplace_addbmm_xpu_float32", - "test_meta_outplace_addbmm_xpu_int8", - "test_meta_outplace_addmm_xpu_bfloat16", - "test_meta_outplace_addmm_xpu_float16", - "test_meta_outplace_addmm_xpu_float32", - "test_meta_outplace_addmm_xpu_int8", - "test_meta_outplace_addmv_xpu_bfloat16", - "test_meta_outplace_addmv_xpu_float16", - "test_meta_outplace_addmv_xpu_float32", - "test_meta_outplace_addmv_xpu_int8", # RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive "test_dispatch_meta_outplace_nn_functional_conv_transpose2d_xpu_bfloat16", "test_dispatch_meta_outplace_nn_functional_conv_transpose2d_xpu_complex", @@ -2978,56 +2231,10 @@ "test_meta_outplace_vdot_xpu_complex", # Unexpected success: "test_dispatch_symbolic_meta_outplace_all_strides_narrow_copy_xpu_float32", - # New added case in 2.7 - "test_nonzero_xpu", - # https://github.com/intel/torch-xpu-ops/issues/1569 - # RuntimeError: output 0: meta disagrees with real impl - "test_dispatch_meta_outplace_norm_fro_xpu_bfloat16", - "test_dispatch_meta_outplace_norm_fro_xpu_complex128", - "test_dispatch_meta_outplace_norm_fro_xpu_complex64", - "test_dispatch_meta_outplace_norm_fro_xpu_float", - "test_dispatch_symbolic_meta_outplace_all_strides_norm_fro_xpu_float32", - "test_dispatch_symbolic_meta_outplace_norm_fro_xpu_bfloat16", - "test_dispatch_symbolic_meta_outplace_norm_fro_xpu_complex128", - "test_dispatch_symbolic_meta_outplace_norm_fro_xpu_complex64", - "test_dispatch_symbolic_meta_outplace_norm_fro_xpu_float", ), "test_type_promotion_xpu.py": None, "test_distributions_xpu.py": None, - "test_optim_xpu.py": ( - # oneDNN issues - # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_foreach_matches_forloop_ASGD_xpu_float64", - "test_foreach_matches_forloop_Adadelta_xpu_float64", - "test_foreach_matches_forloop_Adafactor_xpu_float64", - "test_foreach_matches_forloop_Adagrad_xpu_float64", - "test_foreach_matches_forloop_AdamW_xpu_float64", - "test_foreach_matches_forloop_Adam_xpu_float64", - "test_foreach_matches_forloop_Adamax_xpu_float64", - "test_foreach_matches_forloop_NAdam_xpu_float64", - "test_foreach_matches_forloop_RAdam_xpu_float64", - "test_foreach_matches_forloop_RMSprop_xpu_float64", - "test_foreach_matches_forloop_Rprop_xpu_float64", - "test_foreach_matches_forloop_SGD_xpu_float64", - "test_fused_cpu_matches_cuda_AdamW_xpu_float64", - "test_fused_cpu_matches_cuda_Adam_xpu_float64", - "test_fused_cpu_matches_cuda_SGD_xpu_float64", - "test_fused_matches_forloop_AdamW_xpu_float64", - "test_fused_matches_forloop_Adam_xpu_float64", - "test_fused_matches_forloop_SGD_xpu_float64", - "test_set_default_dtype_works_with_foreach_ASGD_xpu_float64", - "test_set_default_dtype_works_with_foreach_Adadelta_xpu_float64", - "test_set_default_dtype_works_with_foreach_Adafactor_xpu_float64", - "test_set_default_dtype_works_with_foreach_Adagrad_xpu_float64", - "test_set_default_dtype_works_with_foreach_AdamW_xpu_float64", - "test_set_default_dtype_works_with_foreach_Adam_xpu_float64", - "test_set_default_dtype_works_with_foreach_Adamax_xpu_float64", - "test_set_default_dtype_works_with_foreach_NAdam_xpu_float64", - "test_set_default_dtype_works_with_foreach_RAdam_xpu_float64", - "test_set_default_dtype_works_with_foreach_RMSprop_xpu_float64", - "test_set_default_dtype_works_with_foreach_Rprop_xpu_float64", - "test_set_default_dtype_works_with_foreach_SGD_xpu_float64", - ), + "test_optim_xpu.py": None, "test_spectral_ops_xpu.py": ( # CUDA specific case "test_cufft_plan_cache_xpu_float64",