-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[AMDGPU] InstCombine llvm.amdgcn.ds.bpermute with uniform arguments #129895
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-llvm-transforms @llvm/pr-subscribers-backend-amdgpu Author: Jay Foad (jayfoad) ChangesFull diff: https://github.com/llvm/llvm-project/pull/129895.diff 2 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index ebe740f884ea6..8925f2e974efa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1118,9 +1118,11 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
}
case Intrinsic::amdgcn_permlane64:
case Intrinsic::amdgcn_readfirstlane:
- case Intrinsic::amdgcn_readlane: {
- // If the first argument is uniform these intrinsics return it unchanged.
- const Use &Src = II.getArgOperandUse(0);
+ case Intrinsic::amdgcn_readlane:
+ case Intrinsic::amdgcn_ds_bpermute: {
+ // If the data argument is uniform these intrinsics return it unchanged.
+ unsigned SrcIdx = IID == Intrinsic::amdgcn_ds_bpermute ? 1 : 0;
+ const Use &Src = II.getArgOperandUse(SrcIdx);
if (isTriviallyUniform(Src))
return IC.replaceInstUsesWith(II, Src.get());
@@ -1152,6 +1154,22 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
}
}
+ // If the lane argument of bpermute is uniform, change it to readlane. This
+ // generates better code and can enable further optimizations because
+ // readlane is AlwaysUniform.
+ if (IID == Intrinsic::amdgcn_ds_bpermute) {
+ const Use &Lane = II.getArgOperandUse(0);
+ if (isTriviallyUniform(Lane)) {
+ Value *NewLane = IC.Builder.CreateLShr(Lane, 2);
+ Function *NewDecl = Intrinsic::getOrInsertDeclaration(
+ II.getModule(), Intrinsic::amdgcn_readlane, II.getType());
+ II.setCalledFunction(NewDecl);
+ II.setOperand(0, Src);
+ II.setOperand(1, NewLane);
+ return ⅈ
+ }
+ }
+
return std::nullopt;
}
case Intrinsic::amdgcn_writelane: {
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
index 3605c4a929c58..843b436aa1b0f 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
@@ -66,7 +66,7 @@ define double @test_constant_fold_rcp_f64_43() nounwind {
define float @test_constant_fold_rcp_f32_43_strictfp() nounwind strictfp {
; CHECK-LABEL: @test_constant_fold_rcp_f32_43_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.rcp.f32(float 4.300000e+01) #[[ATTR14:[0-9]+]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.rcp.f32(float 4.300000e+01) #[[ATTR17:[0-9]+]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.rcp.f32(float 4.300000e+01) strictfp nounwind readnone
@@ -115,7 +115,7 @@ define half @test_constant_fold_sqrt_f16_0() nounwind {
define float @test_constant_fold_sqrt_f32_0() nounwind {
; CHECK-LABEL: @test_constant_fold_sqrt_f32_0(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 0.000000e+00) #[[ATTR15:[0-9]+]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 0.000000e+00) #[[ATTR18:[0-9]+]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.sqrt.f32(float 0.0) nounwind readnone
@@ -124,7 +124,7 @@ define float @test_constant_fold_sqrt_f32_0() nounwind {
define double @test_constant_fold_sqrt_f64_0() nounwind {
; CHECK-LABEL: @test_constant_fold_sqrt_f64_0(
-; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.sqrt.f64(double 0.000000e+00) #[[ATTR15]]
+; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.sqrt.f64(double 0.000000e+00) #[[ATTR18]]
; CHECK-NEXT: ret double [[VAL]]
;
%val = call double @llvm.amdgcn.sqrt.f64(double 0.0) nounwind readnone
@@ -141,7 +141,7 @@ define half @test_constant_fold_sqrt_f16_neg0() nounwind {
define float @test_constant_fold_sqrt_f32_neg0() nounwind {
; CHECK-LABEL: @test_constant_fold_sqrt_f32_neg0(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.sqrt.f32(float -0.000000e+00) #[[ATTR15]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.sqrt.f32(float -0.000000e+00) #[[ATTR18]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.sqrt.f32(float -0.0) nounwind readnone
@@ -150,7 +150,7 @@ define float @test_constant_fold_sqrt_f32_neg0() nounwind {
define double @test_constant_fold_sqrt_f64_neg0() nounwind {
; CHECK-LABEL: @test_constant_fold_sqrt_f64_neg0(
-; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.sqrt.f64(double -0.000000e+00) #[[ATTR15]]
+; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.sqrt.f64(double -0.000000e+00) #[[ATTR18]]
; CHECK-NEXT: ret double [[VAL]]
;
%val = call double @llvm.amdgcn.sqrt.f64(double -0.0) nounwind readnone
@@ -718,7 +718,7 @@ define i1 @test_class_isnan_f32(float %x) nounwind {
define i1 @test_class_isnan_f32_strict(float %x) nounwind strictfp {
; CHECK-LABEL: @test_class_isnan_f32_strict(
-; CHECK-NEXT: [[VAL:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X:%.*]], i32 3) #[[ATTR16:[0-9]+]]
+; CHECK-NEXT: [[VAL:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X:%.*]], i32 3) #[[ATTR19:[0-9]+]]
; CHECK-NEXT: ret i1 [[VAL]]
;
%val = call i1 @llvm.amdgcn.class.f32(float %x, i32 3) strictfp
@@ -736,7 +736,7 @@ define i1 @test_class_is_p0_n0_f32(float %x) nounwind {
define i1 @test_class_is_p0_n0_f32_strict(float %x) nounwind strictfp {
; CHECK-LABEL: @test_class_is_p0_n0_f32_strict(
-; CHECK-NEXT: [[VAL:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X:%.*]], i32 96) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X:%.*]], i32 96) #[[ATTR19]]
; CHECK-NEXT: ret i1 [[VAL]]
;
%val = call i1 @llvm.amdgcn.class.f32(float %x, i32 96) strictfp
@@ -2000,7 +2000,7 @@ define i64 @icmp_constant_inputs_false() {
define i64 @icmp_constant_inputs_true() {
; CHECK-LABEL: @icmp_constant_inputs_true(
-; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.read_register.i64(metadata [[META0:![0-9]+]]) #[[ATTR17:[0-9]+]]
+; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.read_register.i64(metadata [[META0:![0-9]+]]) #[[ATTR20:[0-9]+]]
; CHECK-NEXT: ret i64 [[RESULT]]
;
%result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 9, i32 8, i32 34)
@@ -2707,7 +2707,7 @@ define i64 @fcmp_constant_inputs_false() {
define i64 @fcmp_constant_inputs_true() {
; CHECK-LABEL: @fcmp_constant_inputs_true(
-; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.read_register.i64(metadata [[META0]]) #[[ATTR17]]
+; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.read_register.i64(metadata [[META0]]) #[[ATTR20]]
; CHECK-NEXT: ret i64 [[RESULT]]
;
%result = call i64 @llvm.amdgcn.fcmp.i64.f32(float 2.0, float 4.0, i32 4)
@@ -5845,7 +5845,7 @@ define double @trig_preop_constfold_neg32_segment() {
define double @trig_preop_constfold_strictfp() strictfp {
; CHECK-LABEL: @trig_preop_constfold_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.trig.preop.f64(double 3.454350e+02, i32 5) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.trig.preop.f64(double 3.454350e+02, i32 5) #[[ATTR19]]
; CHECK-NEXT: ret double [[VAL]]
;
%val = call double @llvm.amdgcn.trig.preop.f64(double 3.454350e+02, i32 5) strictfp
@@ -6214,7 +6214,7 @@ define half @test_constant_fold_log_f16_neg10() {
define float @test_constant_fold_log_f32_qnan_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_log_f32_qnan_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.log.f32(float 0x7FF8000000000000) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.log.f32(float 0x7FF8000000000000) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.log.f32(float 0x7FF8000000000000) strictfp
@@ -6223,7 +6223,7 @@ define float @test_constant_fold_log_f32_qnan_strictfp() strictfp {
define float @test_constant_fold_log_f32_0_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_log_f32_0_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.log.f32(float 0.000000e+00) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.log.f32(float 0.000000e+00) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.log.f32(float 0.0) strictfp
@@ -6232,7 +6232,7 @@ define float @test_constant_fold_log_f32_0_strictfp() strictfp {
define float @test_constant_fold_log_f32_neg0_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_log_f32_neg0_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.log.f32(float -0.000000e+00) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.log.f32(float -0.000000e+00) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.log.f32(float -0.0) strictfp
@@ -6241,7 +6241,7 @@ define float @test_constant_fold_log_f32_neg0_strictfp() strictfp {
define float @test_constant_fold_log_f32_neg_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_log_f32_neg_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.log.f32(float -1.000000e+01) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.log.f32(float -1.000000e+01) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.log.f32(float -10.0) strictfp
@@ -6258,7 +6258,7 @@ define float @test_constant_fold_log_f32_pinf_strictfp() strictfp {
define float @test_constant_fold_log_f32_ninf_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_log_f32_ninf_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.log.f32(float 0xFFF0000000000000) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.log.f32(float 0xFFF0000000000000) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.log.f32(float 0xFFF0000000000000) strictfp
@@ -6460,7 +6460,7 @@ define half @test_constant_fold_exp2_f16_neg10() {
define float @test_constant_fold_exp2_f32_qnan_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_exp2_f32_qnan_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float 0x7FF8000000000000) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float 0x7FF8000000000000) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.exp2.f32(float 0x7FF8000000000000) strictfp
@@ -6469,7 +6469,7 @@ define float @test_constant_fold_exp2_f32_qnan_strictfp() strictfp {
define float @test_constant_fold_exp2_f32_0_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_exp2_f32_0_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float 0.000000e+00) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float 0.000000e+00) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.exp2.f32(float 0.0) strictfp
@@ -6478,7 +6478,7 @@ define float @test_constant_fold_exp2_f32_0_strictfp() strictfp {
define float @test_constant_fold_exp2_f32_neg0_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_exp2_f32_neg0_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float -0.000000e+00) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float -0.000000e+00) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.exp2.f32(float -0.0) strictfp
@@ -6487,7 +6487,7 @@ define float @test_constant_fold_exp2_f32_neg0_strictfp() strictfp {
define float @test_constant_fold_exp2_f32_1_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_exp2_f32_1_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float 1.000000e+00) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float 1.000000e+00) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.exp2.f32(float 1.0) strictfp
@@ -6496,7 +6496,7 @@ define float @test_constant_fold_exp2_f32_1_strictfp() strictfp {
define float @test_constant_fold_exp2_f32_neg1_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_exp2_f32_neg1_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float -1.000000e+00) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float -1.000000e+00) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.exp2.f32(float -1.0) strictfp
@@ -6505,7 +6505,7 @@ define float @test_constant_fold_exp2_f32_neg1_strictfp() strictfp {
define float @test_constant_fold_exp2_f32_2_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_exp2_f32_2_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float 2.000000e+00) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float 2.000000e+00) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.exp2.f32(float 2.0) strictfp
@@ -6514,7 +6514,7 @@ define float @test_constant_fold_exp2_f32_2_strictfp() strictfp {
define float @test_constant_fold_exp2_f32_neg2_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_exp2_f32_neg2_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float -2.000000e+00) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float -2.000000e+00) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.exp2.f32(float -2.0) strictfp
@@ -6523,7 +6523,7 @@ define float @test_constant_fold_exp2_f32_neg2_strictfp() strictfp {
define float @test_constant_fold_exp2_f32_neg_strictfp() strictfp {
; CHECK-LABEL: @test_constant_fold_exp2_f32_neg_strictfp(
-; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float -1.000000e+01) #[[ATTR16]]
+; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float -1.000000e+01) #[[ATTR19]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.exp2.f32(float -10.0) strictfp
@@ -6571,6 +6571,7 @@ declare i32 @llvm.amdgcn.prng.b32(i32)
define i32 @prng_undef_i32() {
; CHECK-LABEL: @prng_undef_i32(
; CHECK-NEXT: ret i32 undef
+;
%prng = call i32 @llvm.amdgcn.prng.b32(i32 undef)
ret i32 %prng
}
@@ -6578,6 +6579,46 @@ define i32 @prng_undef_i32() {
define i32 @prng_poison_i32() {
; CHECK-LABEL: @prng_poison_i32(
; CHECK-NEXT: ret i32 poison
+;
%prng = call i32 @llvm.amdgcn.prng.b32(i32 poison)
ret i32 %prng
}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.ds.bpermute
+; --------------------------------------------------------------------
+
+define amdgpu_kernel void @ds_bpermute_uniform_src(ptr addrspace(1) %out, i32 %lane) {
+; CHECK-LABEL: @ds_bpermute_uniform_src(
+; CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT:%.*]], align 4
+; CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.ds.bpermute(i32 %lane, i32 7)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @ds_bpermute_constant_lane(ptr addrspace(1) %out, i32 %src) {
+; CHECK-LABEL: @ds_bpermute_constant_lane(
+; CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC:%.*]], i32 7)
+; CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT:%.*]], align 4
+; CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.ds.bpermute(i32 28, i32 %src)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @ds_bpermute_uniform_lane(ptr addrspace(1) %out, i32 %lanearg, i32 %src) {
+; CHECK-LABEL: @ds_bpermute_uniform_lane(
+; CHECK-NEXT: [[LANE:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[LANEARG:%.*]])
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[LANE]], 2
+; CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC:%.*]], i32 [[TMP1]])
+; CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT:%.*]], align 4
+; CHECK-NEXT: ret void
+;
+ %lane = call i32 @llvm.amdgcn.readfirstlane(i32 %lanearg)
+ %v = call i32 @llvm.amdgcn.ds.bpermute(i32 %lane, i32 %src)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
|
@@ -6496,7 +6496,7 @@ define float @test_constant_fold_exp2_f32_1_strictfp() strictfp { | |||
|
|||
define float @test_constant_fold_exp2_f32_neg1_strictfp() strictfp { | |||
; CHECK-LABEL: @test_constant_fold_exp2_f32_neg1_strictfp( | |||
; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float -1.000000e+00) #[[ATTR16]] | |||
; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.exp2.f32(float -1.000000e+00) #[[ATTR19]] |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
All the test attribute renumbering is annoying. Does it go away with a regenerate of the base checks?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
All the test attribute renumbering is annoying.
You're telling me! We should switch to printing attributes inline.
Does it go away with a regenerate of the base checks?
Yes. Done.
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/203/builds/3742 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/205/builds/2533 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/204/builds/2554 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/30/builds/17032 Here is the relevant piece of the build log for the reference
|
Reland llvm#129895 with a fix to avoid trying to combine bpermute of bitcast.
…uments (llvm#129895)" This reverts commit be5149a. It caused build failures in the openmp-offload-amdgpu-runtime buildbot and others.
…lvm#130133) Reland llvm#129895 with a fix to avoid trying to combine bpermute of bitcast.
No description provided.