From 2df7a29766f3af17b55ffc3d06a136c0a75c2b7a Mon Sep 17 00:00:00 2001 From: mengfei25 Date: Thu, 27 Feb 2025 21:23:38 +0800 Subject: [PATCH 1/2] Fix build for checking build wheel (#1417) --- .github/workflows/_linux_build.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/_linux_build.yml b/.github/workflows/_linux_build.yml index fbaf5e5c5..6abcab211 100644 --- a/.github/workflows/_linux_build.yml +++ b/.github/workflows/_linux_build.yml @@ -120,7 +120,8 @@ jobs: echo "Wheel build successful, update last commit in the issue https://github.com/intel/torch-xpu-ops/issues/1280" gh --repo $repo issue view $commit_issue --json body -q .body | sed "s;${last_commit};${current_commit};g" | sed '/^$/d' > new_body.txt gh --repo $repo issue edit $commit_issue --body-file new_body.txt - else + fi + if [ ! -f dist/torch*.whl ]; then echo "Wheel build failed, use last commit in the issue https://github.com/intel/torch-xpu-ops/issues/1280" gh --repo $repo issue comment $commit_issue -b "Wheel build failed with commit [${current_commit}](https://github.com/pytorch/pytorch/tree/${current_commit}), refer ${build_url}. CC @intel/torch-xpu-ops-maintain @EikanWang @riverliuintel @fengyuan14 @xytintel @etaf @chuanqi129 @mengfei25" git clean -df . From de783140c74d81e87c24fc6839a56420737241e1 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Fri, 28 Feb 2025 10:09:01 +0800 Subject: [PATCH 2/2] Skip unspported UT for tunable ops (#1415) Skip unspported UT for tunable ops Signed-off-by: Cheng --- test/xpu/skip_list_common.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 19a96c71d..271b50968 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -1421,6 +1421,9 @@ "test_numeric_check_leak_tunableop_rocm_xpu_float32", "test_dump_results_on_exit_tunableop_xpu_float32", "test_rotating_buffer_tunableop_xpu_float32", + "test_gemm_bias_tunableop_xpu_bfloat16", + "test_scaled_gemm_tunableop_xpu_float8_e4m3fnuz", + "test_scaled_gemm_tunableop_xpu_float8_e5m2fnuz", # CUDA bias cases added in latest PyTorch # AttributeError: module 'torch._C' has no attribute '_cuda_tunableop_enable' "test_matmul_check_entries_tunableop_xpu_float16",