Skip to content

Reimplement constrained 'trunc' using operand bundles #118253

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 26 additions & 26 deletions clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -657,6 +657,17 @@ static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
}
}

// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
static Value *emitUnaryFPBuiltin(CodeGenFunction &CGF, const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));

CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, Src0);
}

// Emit an intrinsic that has 2 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
Expand Down Expand Up @@ -3238,9 +3249,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_truncf16:
case Builtin::BI__builtin_truncl:
case Builtin::BI__builtin_truncf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::trunc,
Intrinsic::experimental_constrained_trunc));
return RValue::get(emitUnaryFPBuiltin(*this, E, Intrinsic::trunc));

case Builtin::BIlround:
case Builtin::BIlroundf:
Expand Down Expand Up @@ -6827,7 +6836,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
unsigned j = 0;
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
ai != ae; ++ai, ++j) {
if (F->isConstrainedFPIntrinsic())
if (F->isLegacyConstrainedIntrinsic())
if (ai->getType()->isMetadataTy())
continue;
if (shift > 0 && shift == j)
Expand All @@ -6836,7 +6845,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
}

if (F->isConstrainedFPIntrinsic())
if (F->isLegacyConstrainedIntrinsic())
return Builder.CreateConstrainedFPCall(F, Ops, name);
else
return Builder.CreateCall(F, Ops, name);
Expand Down Expand Up @@ -12989,13 +12998,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
: Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
}
case NEON::BI__builtin_neon_vrndh_f16: {
case NEON::BI__builtin_neon_vrndh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_trunc
: Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
}
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::trunc, HalfTy), Ops,
"vrndz");

case NEON::BI__builtin_neon_vrnd32x_f32:
case NEON::BI__builtin_neon_vrnd32xq_f32:
case NEON::BI__builtin_neon_vrnd32x_f64:
Expand Down Expand Up @@ -13029,12 +13036,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
}
case NEON::BI__builtin_neon_vrnd_v:
case NEON::BI__builtin_neon_vrndq_v: {
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_trunc
: Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
}
case NEON::BI__builtin_neon_vrndq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::trunc, Ty), Ops, "vrndz");

case NEON::BI__builtin_neon_vcvt_f64_v:
case NEON::BI__builtin_neon_vcvtq_f64_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Expand Down Expand Up @@ -18251,9 +18255,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
: Intrinsic::ceil;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
ID = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_trunc
: Intrinsic::trunc;
return emitUnaryFPBuiltin(*this, E, Intrinsic::trunc);

llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
: Builder.CreateCall(F, X);
Expand Down Expand Up @@ -18754,9 +18757,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
.getScalarVal();
case PPC::BI__builtin_ppc_friz:
case PPC::BI__builtin_ppc_frizs:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::trunc,
Intrinsic::experimental_constrained_trunc))
return RValue::get(emitUnaryFPBuiltin(*this, E, Intrinsic::trunc))
.getScalarVal();
case PPC::BI__builtin_ppc_fsqrt:
case PPC::BI__builtin_ppc_fsqrts:
Expand Down Expand Up @@ -20536,8 +20537,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
CI = Intrinsic::experimental_constrained_nearbyint; break;
case 1: ID = Intrinsic::round;
CI = Intrinsic::experimental_constrained_round; break;
case 5: ID = Intrinsic::trunc;
CI = Intrinsic::experimental_constrained_trunc; break;
case 5: ID = Intrinsic::trunc; break;
case 6: ID = Intrinsic::ceil;
CI = Intrinsic::experimental_constrained_ceil; break;
case 7: ID = Intrinsic::floor;
Expand All @@ -20546,7 +20546,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
break;
}
if (ID != Intrinsic::not_intrinsic) {
if (Builder.getIsFPConstrained()) {
if (Builder.getIsFPConstrained() && ID != Intrinsic::trunc) {
Function *F = CGM.getIntrinsic(CI, ResultType);
return Builder.CreateConstrainedFPCall(F, X);
} else {
Expand Down
2 changes: 1 addition & 1 deletion clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c
Original file line number Diff line number Diff line change
Expand Up @@ -792,7 +792,7 @@ float64x1_t test_vrndx_f64(float64x1_t a) {
// COMMON-LABEL: test_vrnd_f64
// COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
// UNCONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a)
// CONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
// CONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// COMMONIR: ret <1 x double> [[VRNDZ1_I]]
float64x1_t test_vrnd_f64(float64x1_t a) {
return vrnd_f64(a);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ uint64_t test_vcvth_u64_f16 (float16_t a) {

// COMMON-LABEL: test_vrndh_f16
// UNCONSTRAINED: [[RND:%.*]] = call half @llvm.trunc.f16(half %a)
// CONSTRAINED: [[RND:%.*]] = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict")
// CONSTRAINED: [[RND:%.*]] = call half @llvm.trunc.f16(half %a) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// COMMONIR: ret half [[RND]]
float16_t test_vrndh_f16(float16_t a) {
return vrndh_f16(a);
Expand Down Expand Up @@ -298,3 +298,5 @@ float16_t test_vfmsh_f16(float16_t a, float16_t b, float16_t c) {
return vfmsh_f16(a, b, c);
}

// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }

6 changes: 4 additions & 2 deletions clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,13 +85,13 @@ void test_float(void) {
vf = __builtin_vsx_xvrspiz(vf);
// CHECK-LABEL: try-xvrspiz
// CHECK-UNCONSTRAINED: @llvm.trunc.v4f32(<4 x float> %{{.*}})
// CHECK-CONSTRAINED: @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !"fpexcept.strict")
// CHECK-CONSTRAINED: @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// CHECK-ASM: xvrspiz

vd = __builtin_vsx_xvrdpiz(vd);
// CHECK-LABEL: try-xvrdpiz
// CHECK-UNCONSTRAINED: @llvm.trunc.v2f64(<2 x double> %{{.*}})
// CHECK-CONSTRAINED: @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !"fpexcept.strict")
// CHECK-CONSTRAINED: @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR]] [ "fpe.except"(metadata !"strict") ]
// CHECK-ASM: xvrdpiz

vf = __builtin_vsx_xvmaddasp(vf, vf, vf);
Expand Down Expand Up @@ -156,3 +156,5 @@ void test_float(void) {
// CHECK-CONSTRAINED: fneg <2 x double> [[RESULT1]]
// CHECK-ASM: xvnmsubadp
}

// CHECK-CONSTRAINED: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,13 @@ void test_float(void) {
vd = __builtin_s390_vfidb(vd, 4, 1);
// CHECK: call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %{{.*}})
vd = __builtin_s390_vfidb(vd, 4, 5);
// CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}})
// CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
vd = __builtin_s390_vfidb(vd, 4, 6);
// CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}})
vd = __builtin_s390_vfidb(vd, 4, 7);
// CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}})
vd = __builtin_s390_vfidb(vd, 4, 4);
// CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4)
}

// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,11 @@ void test_float(void) {
vf = __builtin_s390_vfisb(vf, 4, 1);
// CHECK: call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
vf = __builtin_s390_vfisb(vf, 4, 5);
// CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
// CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
vf = __builtin_s390_vfisb(vf, 4, 6);
// CHECK: call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
vf = __builtin_s390_vfisb(vf, 4, 7);
// CHECK: call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
}

// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
Original file line number Diff line number Diff line change
Expand Up @@ -303,10 +303,10 @@ void test_float(void) {
// CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
// CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
vd = vec_roundz(vd);
// CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
// CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
vd = vec_trunc(vd);
// CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
// CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR]] [ "fpe.except"(metadata !"strict") ]
// CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
vd = vec_roundc(vd);
// CHECK: call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
Expand All @@ -316,3 +316,5 @@ void test_float(void) {
// CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 0, 0
vd = vec_round(vd);
}

// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
Original file line number Diff line number Diff line change
Expand Up @@ -495,16 +495,16 @@ void test_float(void) {
// CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7

vf = vec_roundz(vf);
// CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
// CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5
vf = vec_trunc(vf);
// CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
// CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5
vd = vec_roundz(vd);
// CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
// CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
vd = vec_trunc(vd);
// CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
// CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5

vf = vec_roundc(vf);
Expand Down Expand Up @@ -541,3 +541,5 @@ void test_float(void) {
// CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095)
// CHECK-ASM: vftcidb
}

// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
6 changes: 3 additions & 3 deletions clang/test/CodeGen/X86/strictfp_builtins.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ void p(char *str, int x) {
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 516) #[[ATTR3]]
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 516) #[[ATTR4:[0-9]+]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.1, i32 noundef [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: ret void
Expand All @@ -43,7 +43,7 @@ void test_long_double_isinf(long double ld) {
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 504) #[[ATTR3]]
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 504) #[[ATTR4]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: ret void
Expand All @@ -59,7 +59,7 @@ void test_long_double_isfinite(long double ld) {
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 3) #[[ATTR3]]
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 3) #[[ATTR4]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: ret void
Expand Down
4 changes: 3 additions & 1 deletion clang/test/CodeGen/arm64-vrnd-constrained.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
float64x2_t rnd5(float64x2_t a) { return vrndq_f64(a); }
// COMMON-LABEL: rnd5
// UNCONSTRAINED: call <2 x double> @llvm.trunc.v2f64(<2 x double>
// CONSTRAINED: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>
// CONSTRAINED: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// CHECK-ASM: frintz.2d v{{[0-9]+}}, v{{[0-9]+}}

float64x2_t rnd13(float64x2_t a) { return vrndmq_f64(a); }
Expand All @@ -41,3 +41,5 @@ float64x2_t rnd25(float64x2_t a) { return vrndxq_f64(a); }
// CONSTRAINED: call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>
// CHECK-ASM: frintx.2d v{{[0-9]+}}, v{{[0-9]+}}

// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }

19 changes: 11 additions & 8 deletions clang/test/CodeGen/constrained-math-builtins.c
Original file line number Diff line number Diff line change
Expand Up @@ -242,10 +242,10 @@ __builtin_atan2(f,f); __builtin_atan2f(f,f); __builtin_atan2l(f,f);

__builtin_trunc(f); __builtin_truncf(f); __builtin_truncl(f); __builtin_truncf128(f);

// CHECK: call double @llvm.experimental.constrained.trunc.f64(double %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.trunc.f32(float %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call double @llvm.trunc.f64(double %{{.*}}) #[[ATTR_CALL:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// CHECK: call float @llvm.trunc.f32(float %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ]
// CHECK: call x86_fp80 @llvm.trunc.f80(x86_fp80 %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ]
// CHECK: call fp128 @llvm.trunc.f128(fp128 %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ]
};

// CHECK: declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
Expand Down Expand Up @@ -377,10 +377,10 @@ __builtin_atan2(f,f); __builtin_atan2f(f,f); __builtin_atan2l(f,f);
// CHECK: declare x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80, metadata, metadata)
// CHECK: declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata)

// CHECK: declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
// CHECK: declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
// CHECK: declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
// CHECK: declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
// CHECK: declare double @llvm.trunc.f64(double) #[[ATTR_FUNC:[0-9]+]]
// CHECK: declare float @llvm.trunc.f32(float) #[[ATTR_FUNC]]
// CHECK: declare x86_fp80 @llvm.trunc.f80(x86_fp80) #[[ATTR_FUNC]]
// CHECK: declare fp128 @llvm.trunc.f128(fp128) #[[ATTR_FUNC]]

#pragma STDC FP_CONTRACT ON
void bar(float f) {
Expand All @@ -401,3 +401,6 @@ void bar(float f) {
// CHECK: fneg
// CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
};

// CHECK: attributes #[[ATTR_FUNC]] = { {{.*}} memory(none) }
// CHECK: attributes #[[ATTR_CALL]] = { strictfp memory(inaccessiblemem: readwrite) }
Loading
Loading