Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AArch64] Implement NEON vscale intrinsics #100347

Merged
merged 3 commits into from
Sep 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions clang/include/clang/Basic/arm_neon.td
Original file line number Diff line number Diff line change
Expand Up @@ -2126,3 +2126,9 @@ let ArchGuard = "defined(__aarch64__)", TargetGuard = "neon,faminmax" in {
def FAMIN : WInst<"vamin", "...", "fhQdQfQh">;
def FAMAX : WInst<"vamax", "...", "fhQdQfQh">;
}

let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp8,neon" in {
// fscale
def FSCALE_V128 : WInst<"vscale", "..(.S)", "QdQfQh">;
def FSCALE_V64 : WInst<"vscale", "(.q)(.q)(.qS)", "fh">;
}
8 changes: 8 additions & 0 deletions clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13573,6 +13573,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_famax;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "famax");
}
case NEON::BI__builtin_neon_vscale_f16:
case NEON::BI__builtin_neon_vscaleq_f16:
case NEON::BI__builtin_neon_vscale_f32:
case NEON::BI__builtin_neon_vscaleq_f32:
case NEON::BI__builtin_neon_vscaleq_f64: {
Int = Intrinsic::aarch64_neon_fp8_fscale;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fscale");
}
}
}

Expand Down
58 changes: 58 additions & 0 deletions clang/test/CodeGen/aarch64-neon-fp8-intrinsics/acle_neon_fscale.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
#include <arm_neon.h>

// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon -target-feature +fp8 -O3 -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon -target-feature +fp8 -S -O3 -o /dev/null %s

// CHECK-LABEL: define dso_local <4 x half> @test_vscale_f16(
// CHECK-SAME: <4 x half> noundef [[VN:%.*]], <4 x i16> noundef [[VM:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FSCALE2_I:%.*]] = tail call <4 x half> @llvm.aarch64.neon.fp8.fscale.v4f16(<4 x half> [[VN]], <4 x i16> [[VM]])
// CHECK-NEXT: ret <4 x half> [[FSCALE2_I]]
//
float16x4_t test_vscale_f16(float16x4_t vn, int16x4_t vm) {
return vscale_f16(vn, vm);
}

// CHECK-LABEL: define dso_local <8 x half> @test_vscaleq_f16(
// CHECK-SAME: <8 x half> noundef [[VN:%.*]], <8 x i16> noundef [[VM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FSCALE2_I:%.*]] = tail call <8 x half> @llvm.aarch64.neon.fp8.fscale.v8f16(<8 x half> [[VN]], <8 x i16> [[VM]])
// CHECK-NEXT: ret <8 x half> [[FSCALE2_I]]
//
float16x8_t test_vscaleq_f16(float16x8_t vn, int16x8_t vm) {
return vscaleq_f16(vn, vm);

}

// CHECK-LABEL: define dso_local <2 x float> @test_vscale_f32(
// CHECK-SAME: <2 x float> noundef [[VN:%.*]], <2 x i32> noundef [[VM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FSCALE2_I:%.*]] = tail call <2 x float> @llvm.aarch64.neon.fp8.fscale.v2f32(<2 x float> [[VN]], <2 x i32> [[VM]])
// CHECK-NEXT: ret <2 x float> [[FSCALE2_I]]
//
float32x2_t test_vscale_f32(float32x2_t vn, int32x2_t vm) {
return vscale_f32(vn, vm);

}

// CHECK-LABEL: define dso_local <4 x float> @test_vscaleq_f32(
// CHECK-SAME: <4 x float> noundef [[VN:%.*]], <4 x i32> noundef [[VM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FSCALE2_I:%.*]] = tail call <4 x float> @llvm.aarch64.neon.fp8.fscale.v4f32(<4 x float> [[VN]], <4 x i32> [[VM]])
// CHECK-NEXT: ret <4 x float> [[FSCALE2_I]]
//
float32x4_t test_vscaleq_f32(float32x4_t vn, int32x4_t vm) {
return vscaleq_f32(vn, vm);

}

// CHECK-LABEL: define dso_local <2 x double> @test_vscale_f64(
// CHECK-SAME: <2 x double> noundef [[VN:%.*]], <2 x i64> noundef [[VM:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FSCALE2_I:%.*]] = tail call <2 x double> @llvm.aarch64.neon.fp8.fscale.v2f64(<2 x double> [[VN]], <2 x i64> [[VM]])
// CHECK-NEXT: ret <2 x double> [[FSCALE2_I]]
//
float64x2_t test_vscale_f64(float64x2_t vn, int64x2_t vm) {
return vscaleq_f64(vn, vm);
}
7 changes: 7 additions & 0 deletions llvm/include/llvm/IR/IntrinsicsAArch64.td
Original file line number Diff line number Diff line change
Expand Up @@ -563,6 +563,13 @@ let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
def int_aarch64_neon_vcmla_rot90 : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_neon_vcmla_rot180 : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_neon_vcmla_rot270 : AdvSIMD_3VectorArg_Intrinsic;

// FP8 fscale
def int_aarch64_neon_fp8_fscale : DefaultAttrsIntrinsic<
[llvm_anyvector_ty],
[LLVMMatchType<0>,
LLVMVectorOfBitcastsToInt<0>],
[IntrNoMem]>;
}

let TargetPrefix = "aarch64" in {
Expand Down
20 changes: 20 additions & 0 deletions llvm/lib/Target/AArch64/AArch64InstrFormats.td
Original file line number Diff line number Diff line change
Expand Up @@ -6243,6 +6243,26 @@ multiclass SIMDThreeSameVectorDOT4<string asm> {
V128, v4f32, v16i8, null_frag>;
}

let mayRaiseFPException = 1, Uses = [FPCR] in
multiclass SIMDThreeVectorFscale<bit U, bit S, bits<3> opc,
string asm, SDPatternOperator OpNode> {
def v4f16 : BaseSIMDThreeSameVector<0, U, {S,0b10}, {0b00,opc}, V64,
asm, ".4h",
[(set (v4f16 V64:$Rd), (OpNode (v4f16 V64:$Rn), (v4i16 V64:$Rm)))]>;
def v8f16 : BaseSIMDThreeSameVector<1, U, {S,0b10}, {0b00,opc}, V128,
asm, ".8h",
[(set (v8f16 V128:$Rd), (OpNode (v8f16 V128:$Rn), (v8i16 V128:$Rm)))]>;
def v2f32 : BaseSIMDThreeSameVector<0, U, {S,0b01}, {0b11,opc}, V64,
asm, ".2s",
[(set (v2f32 V64:$Rd), (OpNode (v2f32 V64:$Rn), (v2i32 V64:$Rm)))]>;
def v4f32 : BaseSIMDThreeSameVector<1, U, {S,0b01}, {0b11,opc}, V128,
asm, ".4s",
[(set (v4f32 V128:$Rd), (OpNode (v4f32 V128:$Rn), (v4i32 V128:$Rm)))]>;
def v2f64 : BaseSIMDThreeSameVector<1, U, {S,0b11}, {0b11,opc}, V128,
asm, ".2d",
[(set (v2f64 V128:$Rd), (OpNode (v2f64 V128:$Rn), (v2i64 V128:$Rm)))]>;
}

//----------------------------------------------------------------------------
// AdvSIMD two register vector instructions.
//----------------------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64InstrInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -10136,7 +10136,7 @@ let Uses = [FPMR, FPCR], Predicates = [HasFP8] in {
defm BF2CVTL : SIMDMixedTwoVectorFP8<0b11, "bf2cvtl">;
defm FCVTN_F16_F8 : SIMDThreeSameSizeVectorCvt<"fcvtn">;
defm FCVTN_F32_F8 : SIMDThreeVectorCvt<"fcvtn">;
defm FSCALE : SIMDThreeSameVectorFP<0b1, 0b1, 0b111, "fscale", null_frag>;
defm FSCALE : SIMDThreeVectorFscale<0b1, 0b1, 0b111, "fscale", int_aarch64_neon_fp8_fscale>;
} // End let Predicates = [HasFP8]

// fminimum(abs(a), abs(b)) -> famin(a, b)
Expand Down
54 changes: 54 additions & 0 deletions llvm/test/CodeGen/AArch64/neon-fp8-fscale.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64-linux -mattr=+neon,+fp8 < %s | FileCheck %s


define <4 x half> @test_fscale_f16(<4 x half> %vn, <4 x i16> %vm) {
; CHECK-LABEL: test_fscale_f16:
; CHECK: // %bb.0:
; CHECK-NEXT: fscale v0.4h, v0.4h, v1.4h
; CHECK-NEXT: ret
%res = tail call <4 x half> @llvm.aarch64.neon.fp8.fscale.v4f16(<4 x half> %vn, <4 x i16> %vm)
ret <4 x half> %res
}

define <8 x half> @test_fscaleq_f16(<8 x half> %vn, <8 x i16> %vm) {
; CHECK-LABEL: test_fscaleq_f16:
; CHECK: // %bb.0:
; CHECK-NEXT: fscale v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
%res = tail call <8 x half> @llvm.aarch64.neon.fp8.fscale.v8f16(<8 x half> %vn, <8 x i16> %vm)
ret <8 x half> %res
}

define <2 x float> @test_fscale_f32(<2 x float> %vn, <2 x i32> %vm) {
; CHECK-LABEL: test_fscale_f32:
; CHECK: // %bb.0:
; CHECK-NEXT: fscale v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
%res = tail call <2 x float> @llvm.aarch64.neon.fp8.fscale.v2f32(<2 x float> %vn, <2 x i32> %vm)
ret <2 x float> %res
}

define <4 x float> @test_fscaleq_f32(<4 x float> %vn, <4 x i32> %vm) {
; CHECK-LABEL: test_fscaleq_f32:
; CHECK: // %bb.0:
; CHECK-NEXT: fscale v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
%res = tail call <4 x float> @llvm.aarch64.neon.fp8.fscale.v4f32(<4 x float> %vn, <4 x i32> %vm)
ret <4 x float> %res
}

define <2 x double> @test_fscaleq_f64(<2 x double> %vn, <2 x i64> %vm) {
; CHECK-LABEL: test_fscaleq_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: fscale v0.2d, v0.2d, v1.2d
; CHECK-NEXT: ret
%res = tail call <2 x double> @llvm.aarch64.neon.fp8.fscale.v2f64(<2 x double> %vn, <2 x i64> %vm)
ret <2 x double> %res
}

declare <4 x half> @llvm.aarch64.neon.fp8.fscale.v4f16(<4 x half>, <4 x i16>)
declare <8 x half> @llvm.aarch64.neon.fp8.fscale.v8f16(<8 x half>, <8 x i16>)
declare <2 x float> @llvm.aarch64.neon.fp8.fscale.v2f32(<2 x float>, <2 x i32>)
declare <4 x float> @llvm.aarch64.neon.fp8.fscale.v4f32(<4 x float>, <4 x i32>)
declare <2 x double> @llvm.aarch64.neon.fp8.fscale.v2f64(<2 x double>, <2 x i64>)
Loading