From 8623b3472fa03d996358514049c896ca99086493 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Fri, 30 May 2025 02:07:21 +0000 Subject: [PATCH] Merge from main branch Signed-off-by: Cheng, Penghui --- test/xpu/skip_list_common.py | 163 +----------------------------- test/xpu/test_transformers_xpu.py | 30 ++++++ 2 files changed, 32 insertions(+), 161 deletions(-) diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index da15d41ab9..e4e5ac87c1 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -4,20 +4,6 @@ # XPU implementation doesn't claimn FP8 now # https://github.com/intel/torch-xpu-ops/issues/461 "float8", - # workarounds for the following tests - # https://github.com/intel/torch-xpu-ops/issues/1214 - "test_python_ref__refs_exp_xpu_complex128", - "test_python_ref__refs_sigmoid_xpu_complex128", - "test_python_ref_executor__refs_log2_executor_aten_xpu_complex128", - "test_python_ref_executor__refs_exp_executor_aten_xpu_complex128", - "test_python_ref_torch_fallback__refs_log2_xpu_complex128", - "test_python_ref_torch_fallback__refs_log10_xpu_complex128", - "test_python_ref_torch_fallback__refs_sigmoid_xpu_complex128", - "test_python_ref_executor__refs_log10_executor_aten_xpu_complex128", - "test_noncontiguous_samples_histogram_xpu_float32", - "test_python_ref_executor__refs_sigmoid_executor_aten_xpu_complex128", - # TODO: Fix the following tests - "test_out_warning_torch__scaled_mm_xpu", # To be removed from this file. # CUDA and XPU both XFAIL now. "test_out_narrow_copy_xpu_float32", @@ -60,10 +46,6 @@ "test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_int64", "test_noncontiguous_samples_nn_functional_conv1d_xpu_int64", "test_noncontiguous_samples_nn_functional_conv2d_xpu_int64", - # Linalg OPs not supported - # RuntimeError: mode only supports CPU AND CUDA device type, got: xpu - # Issue https://github.com/intel/torch-xpu-ops/issues/327 - "test_numpy_ref_linalg_tensorinv_xpu_float64", # RuntimeError: could not create a primitive descriptor for a deconvolution # https://github.com/intel/torch-xpu-ops/issues/253 "test_variant_consistency_eager_nn_functional_conv_transpose2d_xpu_complex64", @@ -106,9 +88,6 @@ # Jiterator is only supported on CUDA and ROCm GPUs, none are available. # https://github.com/intel/torch-xpu-ops/issues/584 "_jiterator_", - # https://github.com/intel/torch-xpu-ops/issues/157 - # Segfault: - "test_dtypes_nn_functional_multi_head_attention_forward_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 # Linalg OPs not supported "test_dtypes_pca_lowrank_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 "test_dtypes_svd_lowrank_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 @@ -161,25 +140,20 @@ "test_dtypes_lu_solve_xpu", "test_dtypes_lu_xpu", "test_dtypes_mv_xpu", - "test_dtypes_nn_functional_scaled_dot_product_attention_xpu", "test_dtypes_norm_nuc_xpu", "test_dtypes_pinverse_xpu", "test_dtypes_qr_xpu", "test_dtypes_svd_xpu", - "test_dtypes_tensordot_xpu", "test_dtypes_triangular_solve_xpu", "test_noncontiguous_samples___rmatmul___xpu_complex64", "test_noncontiguous_samples___rmatmul___xpu_int64", "test_noncontiguous_samples_addbmm_xpu_complex64", - "test_noncontiguous_samples_addbmm_xpu_float32", "test_noncontiguous_samples_addbmm_xpu_int64", "test_noncontiguous_samples_addmm_decomposed_xpu_complex64", "test_noncontiguous_samples_addmm_decomposed_xpu_int64", "test_noncontiguous_samples_addmm_xpu_complex64", - "test_noncontiguous_samples_addmm_xpu_float32", "test_noncontiguous_samples_addmm_xpu_int64", "test_noncontiguous_samples_addmv_xpu_complex64", - "test_noncontiguous_samples_addmv_xpu_float32", "test_noncontiguous_samples_addmv_xpu_int64", "test_noncontiguous_samples_addr_xpu_complex64", "test_noncontiguous_samples_baddbmm_xpu_complex64", @@ -194,8 +168,6 @@ "test_noncontiguous_samples_einsum_xpu_complex64", "test_noncontiguous_samples_einsum_xpu_int64", "test_noncontiguous_samples_geqrf_xpu_complex64", - "test_noncontiguous_samples_inner_xpu_complex64", - "test_noncontiguous_samples_inner_xpu_int64", "test_noncontiguous_samples_linalg_cholesky_ex_xpu_complex64", "test_noncontiguous_samples_linalg_cholesky_xpu_complex64", "test_noncontiguous_samples_linalg_cond_xpu_complex64", @@ -258,11 +230,7 @@ "test_numpy_ref_addbmm_xpu_float64", "test_numpy_ref_addbmm_xpu_int64", "test_numpy_ref_linalg_tensorinv_xpu_complex128", - "test_out_addbmm_xpu_float32", - "test_out_addmm_xpu_float32", "test_out_addmv_xpu_float32", - "test_out_baddbmm_xpu_float32", - "test_out_mm_xpu_float32", "test_out_mv_xpu_float32", "test_out_requires_grad_error_addbmm_xpu_complex64", "test_out_requires_grad_error_addmm_decomposed_xpu_complex64", @@ -273,7 +241,6 @@ "test_out_requires_grad_error_cholesky_inverse_xpu_complex64", "test_out_requires_grad_error_cholesky_solve_xpu_complex64", "test_out_requires_grad_error_cholesky_xpu_complex64", - "test_out_requires_grad_error_inner_xpu_complex64", "test_out_requires_grad_error_linalg_cholesky_ex_xpu_complex64", "test_out_requires_grad_error_linalg_cholesky_xpu_complex64", "test_out_requires_grad_error_linalg_eig_xpu_complex64", @@ -300,38 +267,23 @@ "test_out_requires_grad_error_qr_xpu_complex64", "test_out_requires_grad_error_tensordot_xpu_complex64", "test_out_requires_grad_error_triangular_solve_xpu_complex64", - "test_out_warning_addmm_decomposed_xpu", - "test_out_warning_addmm_xpu", - "test_out_warning_addmv_xpu", - "test_out_warning_baddbmm_xpu", - "test_out_warning_bmm_xpu", - "test_out_warning_matmul_xpu", - "test_out_warning_mm_xpu", - "test_out_warning_mv_xpu", - "test_out_warning_nn_functional_linear_xpu", "test_python_ref__refs_linalg_svd_xpu_complex128", "test_python_ref__refs_linalg_svd_xpu_complex64", "test_python_ref__refs_linalg_svd_xpu_float64", "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex128", "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex64", "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_float64", - "test_python_ref_executor__refs_nn_functional_pdist_executor_aten_xpu_float64", "test_python_ref_meta__refs_linalg_svd_xpu_complex128", "test_python_ref_meta__refs_linalg_svd_xpu_complex64", "test_python_ref_meta__refs_linalg_svd_xpu_float64", - "test_python_ref_meta__refs_nn_functional_pdist_xpu_float64", "test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex128", "test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex64", "test_python_ref_torch_fallback__refs_linalg_svd_xpu_float64", - "test_python_ref_torch_fallback__refs_nn_functional_pdist_xpu_float64", "test_variant_consistency_eager___rmatmul___xpu_complex64", "test_variant_consistency_eager_addmm_decomposed_xpu_complex64", "test_variant_consistency_eager_addmm_xpu_complex64", - "test_variant_consistency_eager_addmm_xpu_float32", "test_variant_consistency_eager_addmv_xpu_complex64", - "test_variant_consistency_eager_addmv_xpu_float32", "test_variant_consistency_eager_baddbmm_xpu_complex64", - "test_variant_consistency_eager_baddbmm_xpu_float32", "test_variant_consistency_eager_bmm_xpu_complex64", "test_variant_consistency_eager_cholesky_inverse_xpu_complex64", "test_variant_consistency_eager_cholesky_solve_xpu_complex64", @@ -340,7 +292,6 @@ "test_variant_consistency_eager_cov_xpu_complex64", "test_variant_consistency_eager_einsum_xpu_complex64", "test_variant_consistency_eager_geqrf_xpu_complex64", - "test_variant_consistency_eager_inner_xpu_complex64", "test_variant_consistency_eager_linalg_cholesky_ex_xpu_complex64", "test_variant_consistency_eager_linalg_cholesky_xpu_complex64", "test_variant_consistency_eager_linalg_cond_xpu_complex64", @@ -414,7 +365,6 @@ "test_conj_view_cov_xpu_complex64", "test_conj_view_einsum_xpu_complex64", "test_conj_view_geqrf_xpu_complex64", - "test_conj_view_inner_xpu_complex64", "test_conj_view_linalg_cholesky_ex_xpu_complex64", "test_conj_view_linalg_cholesky_xpu_complex64", "test_conj_view_linalg_cond_xpu_complex64", @@ -478,7 +428,6 @@ "test_neg_conj_view_corrcoef_xpu_complex128", "test_neg_conj_view_cov_xpu_complex128", "test_neg_conj_view_geqrf_xpu_complex128", - "test_neg_conj_view_inner_xpu_complex128", "test_neg_conj_view_linalg_cholesky_ex_xpu_complex128", "test_neg_conj_view_linalg_cholesky_xpu_complex128", "test_neg_conj_view_linalg_cond_xpu_complex128", @@ -520,73 +469,11 @@ "test_neg_conj_view_qr_xpu_complex128", "test_neg_conj_view_tensordot_xpu_complex128", "test_neg_conj_view_triangular_solve_xpu_complex128", - "test_neg_view___rmatmul___xpu_float64", "test_neg_view__refs_linalg_svd_xpu_float64", - "test_neg_view__refs_nn_functional_pdist_xpu_float64", - "test_neg_view_addbmm_xpu_float64", "test_neg_view_addmm_decomposed_xpu_float64", "test_neg_view_addmm_xpu_float64", "test_neg_view_addmv_xpu_float64", - "test_neg_view_addr_xpu_float64", "test_neg_view_baddbmm_xpu_float64", - "test_neg_view_bmm_xpu_float64", - "test_neg_view_cdist_xpu_float64", - "test_neg_view_cholesky_inverse_xpu_float64", - "test_neg_view_cholesky_solve_xpu_float64", - "test_neg_view_cholesky_xpu_float64", - "test_neg_view_corrcoef_xpu_float64", - "test_neg_view_cov_xpu_float64", - "test_neg_view_einsum_xpu_float64", - "test_neg_view_geqrf_xpu_float64", - "test_neg_view_inner_xpu_float64", - "test_neg_view_linalg_cholesky_ex_xpu_float64", - "test_neg_view_linalg_cholesky_xpu_float64", - "test_neg_view_linalg_cond_xpu_float64", - "test_neg_view_linalg_eig_xpu_float64", - "test_neg_view_linalg_eigh_xpu_float64", - "test_neg_view_linalg_eigvalsh_xpu_float64", - "test_neg_view_linalg_householder_product_xpu_float64", - "test_neg_view_linalg_inv_ex_xpu_float64", - "test_neg_view_linalg_inv_xpu_float64", - "test_neg_view_linalg_ldl_factor_ex_xpu_float64", - "test_neg_view_linalg_ldl_factor_xpu_float64", - "test_neg_view_linalg_ldl_solve_xpu_float64", - "test_neg_view_linalg_lstsq_grad_oriented_xpu_float64", - "test_neg_view_linalg_lstsq_xpu_float64", - "test_neg_view_linalg_matrix_norm_xpu_float64", - "test_neg_view_linalg_matrix_power_xpu_float64", - "test_neg_view_linalg_matrix_rank_hermitian_xpu_float64", - "test_neg_view_linalg_matrix_rank_xpu_float64", - "test_neg_view_linalg_multi_dot_xpu_float64", - "test_neg_view_linalg_norm_subgradients_at_zero_xpu_float64", - "test_neg_view_linalg_norm_xpu_float64", - "test_neg_view_linalg_pinv_hermitian_xpu_float64", - "test_neg_view_linalg_pinv_singular_xpu_float64", - "test_neg_view_linalg_pinv_xpu_float64", - "test_neg_view_linalg_qr_xpu_float64", - "test_neg_view_linalg_solve_triangular_xpu_float64", - "test_neg_view_linalg_svd_xpu_float64", - "test_neg_view_linalg_svdvals_xpu_float64", - "test_neg_view_linalg_tensorinv_xpu_float64", - "test_neg_view_linalg_tensorsolve_xpu_float64", - "test_neg_view_logdet_xpu_float64", - "test_neg_view_lu_xpu_float64", - "test_neg_view_matmul_xpu_float64", - "test_neg_view_mm_xpu_float64", - "test_neg_view_mv_xpu_float64", - "test_neg_view_nn_functional_bilinear_xpu_float64", - "test_neg_view_nn_functional_linear_xpu_float64", - "test_neg_view_nn_functional_multi_head_attention_forward_xpu_float64", - "test_neg_view_nn_functional_scaled_dot_product_attention_xpu_float64", - "test_neg_view_norm_nuc_xpu_float64", - "test_neg_view_ormqr_xpu_float64", - "test_neg_view_pca_lowrank_xpu_float64", - "test_neg_view_pinverse_xpu_float64", - "test_neg_view_qr_xpu_float64", - "test_neg_view_svd_lowrank_xpu_float64", - "test_neg_view_svd_xpu_float64", - "test_neg_view_tensordot_xpu_float64", - "test_neg_view_triangular_solve_xpu_float64", "test_noncontiguous_samples_pca_lowrank_xpu_complex64", "test_noncontiguous_samples_svd_lowrank_xpu_complex64", "test_variant_consistency_eager_pca_lowrank_xpu_complex64", @@ -607,35 +494,10 @@ "test_dtypes_histogram_xpu", # Unexpected success, CUDA got XFAIL because CUDA does not have historgramadd supported "test_errors_histogramdd_xpu", - # 2025 bundle std::pow complex result is different on host and device - "test_python_ref__refs_square_xpu_complex64", - "test_python_ref_torch_fallback__refs_square_xpu_complex64", - "test_python_ref_torch_fallback__refs_exp_xpu_complex128", # Failed on rolling driver, passed on preci "test_python_ref__refs_div_trunc_rounding_xpu_float64", "test_python_ref_executor__refs_div_trunc_rounding_executor_aten_xpu_float64", "test_python_ref_torch_fallback__refs_div_trunc_rounding_xpu_float64", - # TODO: passed from source code building version, investigate - "test_python_ref__refs_log2_xpu_complex128", - # The following dtypes did not work in backward but are listed by the OpInfo: {torch.bfloat16}. - "test_dtypes_fft_fft2_xpu", - "test_dtypes_fft_fft_xpu", - "test_dtypes_fft_fftn_xpu", - "test_dtypes_fft_hfft2_xpu", - "test_dtypes_fft_hfft_xpu", - "test_dtypes_fft_hfftn_xpu", - "test_dtypes_fft_ifft2_xpu", - "test_dtypes_fft_ifft_xpu", - "test_dtypes_fft_ifftn_xpu", - "test_dtypes_fft_ihfft2_xpu", - "test_dtypes_fft_ihfft_xpu", - "test_dtypes_fft_ihfftn_xpu", - "test_dtypes_fft_irfft2_xpu", - "test_dtypes_fft_irfft_xpu", - "test_dtypes_fft_irfftn_xpu", - "test_dtypes_fft_rfft2_xpu", - "test_dtypes_fft_rfft_xpu", - "test_dtypes_fft_rfftn_xpu", ), "test_binary_ufuncs_xpu.py": ( "test_fmod_remainder_by_zero_integral_xpu_int64", # zero division is an undefined behavior: different handles on different backends @@ -718,25 +580,11 @@ # oneDNN issues # Double and complex datatype matmul is not supported in oneDNN # https://github.com/intel/torch-xpu-ops/issues/253 - "test_sdp_math_gradcheck_contiguous_inputs_False_xpu", - "test_sdp_math_gradcheck_contiguous_inputs_True_xpu", - "test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_False_xpu", - "test_transformerencoder_batch_first_True_training_False_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_True_training_False_enable_nested_tensor_False_xpu", - "test_transformerencoder_batch_first_False_training_True_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_False_training_True_enable_nested_tensor_False_xpu", - "test_transformerencoder_batch_first_False_training_False_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_False_training_False_enable_nested_tensor_False_xpu", "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_2_xpu", "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_2_xpu", "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_0_xpu", @@ -745,19 +593,12 @@ "test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_0_xpu", "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_5_xpu", "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_0_xpu", # https://github.com/intel/torch-xpu-ops/issues/1432 "test_multiheadattention_fastpath_attn_mask_attn_mask_dim_2_key_padding_mask_dim_2_bool_xpu", "test_multiheadattention_fastpath_attn_mask_attn_mask_dim_3_key_padding_mask_dim_2_bool_xpu", @@ -765,13 +606,13 @@ "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_True_d_model_12_xpu", "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_True_use_autocast_False_d_model_12_xpu", "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_True_use_autocast_True_d_model_12_xpu", + # XPU didn't support torch._scaled_dot_product_efficient_attention and F.scaled_dot_product_attention + "test_mem_eff_attention_fail_with_batch_size_geq_65536", ), "test_complex_xpu.py": None, "test_modules_xpu.py": ( # oneDNN issues # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_cpu_gpu_parity_nn_Bilinear_xpu_float64", - "test_cpu_gpu_parity_nn_GRUCell_xpu_float64", "test_cpu_gpu_parity_nn_GRU_eval_mode_xpu_float64", "test_cpu_gpu_parity_nn_GRU_train_mode_xpu_float64", "test_cpu_gpu_parity_nn_LSTMCell_xpu_float64", diff --git a/test/xpu/test_transformers_xpu.py b/test/xpu/test_transformers_xpu.py index 24409cf1c1..de6e43122e 100644 --- a/test/xpu/test_transformers_xpu.py +++ b/test/xpu/test_transformers_xpu.py @@ -3,6 +3,8 @@ import contextlib import torch +import torch.nn.functional as F +from torch.nn.attention import sdpa_kernel, SDPBackend from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_utils import parametrize, run_tests @@ -51,6 +53,34 @@ def mha_native_args(self, nb_heads, bias): mha(query=x, key=x, value=x, key_padding_mask=pad_mask) + def _test_mem_eff_attention_fail_with_batch_size_geq_65536(self): + query = torch.rand([2**16, 2, 2, 8], device="xpu", dtype=torch.float16) + key = torch.rand([2**16, 2, 2, 8], device="xpu", dtype=torch.float16) + value = torch.rand([2**16, 2, 2, 8], device="xpu", dtype=torch.float16) + with sdpa_kernel(backends=SDPBackend.EFFICIENT_ATTENTION): + out = F.scaled_dot_product_attention(query, key, value) + out_cpu = F.scaled_dot_product_attention(query.cpu(), key.cpu(), value.cpu()) + self.assertEqual(out, out_cpu, atol=1e-3, rtol=1e-4) + + def _test_mem_eff_attention_fail_with_batch_size_geq_65536_error(self): + query = torch.rand([2**16, 2, 2, 8], device="xpu", dtype=torch.float16) + key = torch.rand([2**16, 2, 2, 8], device="xpu", dtype=torch.float16) + value = torch.rand([2**16, 2, 2, 8], device="xpu", dtype=torch.float16) + error_str = ( + r"Efficient attention cannot produce valid seed, " + r"logsumexp and offset outputs when the batch size exceeds \(65535\)\." + ) + with self.assertRaisesRegex(RuntimeError, error_str): + torch._scaled_dot_product_efficient_attention( + query, key, value, attn_bias=None, compute_log_sumexp=True + ) + + TestSDPAFailureModes.test_mem_eff_attention_fail_with_batch_size_geq_65536 = ( + _test_mem_eff_attention_fail_with_batch_size_geq_65536 + ) + TestSDPAFailureModes.test_mem_eff_attention_fail_with_batch_size_geq_65536_error = ( + _test_mem_eff_attention_fail_with_batch_size_geq_65536_error + ) TestTransformers.test_mha_native_args = mha_native_args instantiate_device_type_tests(