-
Notifications
You must be signed in to change notification settings - Fork 13.5k
[GISel][RISCV] Anyextend before copying f16 -> i32/i64 #94993
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-llvm-globalisel @llvm/pr-subscribers-backend-risc-v Author: Yingwei Zheng (dtcxzyw) ChangesFixes type check failure in #94110 (comment) Patch is 37.37 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/94993.diff 2 Files Affected:
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index beee9405de02a..b9bcfd8fb79a6 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -102,9 +102,16 @@ struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
void assignValueToReg(Register ValVReg, Register PhysReg,
const CCValAssign &VA) override {
- // If we're passing an f32 value into an i64, anyextend before copying.
- if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
- ValVReg = MIRBuilder.buildAnyExt(LLT::scalar(64), ValVReg).getReg(0);
+ // If we're passing a smaller fp value into a larger integer register,
+ // anyextend before copying.
+ if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
+ ((VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::i64) &&
+ VA.getValVT() == MVT::f16))
+ ValVReg =
+ MIRBuilder
+ .buildAnyExt(LLT::scalar(VA.getLocVT().getScalarSizeInBits()),
+ ValVReg)
+ .getReg(0);
Register ExtReg = extendRegister(ValVReg, VA);
MIRBuilder.buildCopy(PhysReg, ExtReg);
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
index 0a0828e51893f..04fa62b195076 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
@@ -1,10 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator < %s \
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
-; RUN: llc -mtriple=riscv32 -mattr=+f -global-isel -stop-after=irtranslator < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+f -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IF %s
-; RUN: llc -mtriple=riscv32 -mattr=+zfh -global-isel -stop-after=irtranslator < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+zfh -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IZFH %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+f -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64IF %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfh -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64IZFH %s
define half @callee_half_in_regs(half %x) nounwind {
; RV32I-LABEL: name: callee_half_in_regs
@@ -34,6 +40,34 @@ define half @callee_half_in_regs(half %x) nounwind {
; RV32IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
+ ;
+ ; RV64I-LABEL: name: callee_half_in_regs
+ ; RV64I: bb.1 (%ir-block.0):
+ ; RV64I-NEXT: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64IF-LABEL: name: callee_half_in_regs
+ ; RV64IF: bb.1 (%ir-block.0):
+ ; RV64IF-NEXT: liveins: $f10_f
+ ; RV64IF-NEXT: {{ $}}
+ ; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
+ ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
+ ; RV64IF-NEXT: PseudoRET implicit $f10_f
+ ;
+ ; RV64IZFH-LABEL: name: callee_half_in_regs
+ ; RV64IZFH: bb.1 (%ir-block.0):
+ ; RV64IZFH-NEXT: liveins: $f10_h
+ ; RV64IZFH-NEXT: {{ $}}
+ ; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
+ ; RV64IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
+ ; RV64IZFH-NEXT: PseudoRET implicit $f10_h
ret half %x
}
@@ -84,6 +118,53 @@ define half @caller_half_in_regs(half %x) nounwind {
; RV32IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY1]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
+ ;
+ ; RV64I-LABEL: name: caller_half_in_regs
+ ; RV64I: bb.1 (%ir-block.0):
+ ; RV64I-NEXT: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @caller_half_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit-def $x10
+ ; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
+ ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT1]](s64)
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64IF-LABEL: name: caller_half_in_regs
+ ; RV64IF: bb.1 (%ir-block.0):
+ ; RV64IF-NEXT: liveins: $f10_f
+ ; RV64IF-NEXT: {{ $}}
+ ; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV64IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
+ ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
+ ; RV64IF-NEXT: PseudoCALL target-flags(riscv-call) @caller_half_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
+ ; RV64IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; RV64IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT1]](s32)
+ ; RV64IF-NEXT: PseudoRET implicit $f10_f
+ ;
+ ; RV64IZFH-LABEL: name: caller_half_in_regs
+ ; RV64IZFH: bb.1 (%ir-block.0):
+ ; RV64IZFH-NEXT: liveins: $f10_h
+ ; RV64IZFH-NEXT: {{ $}}
+ ; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
+ ; RV64IZFH-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; RV64IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
+ ; RV64IZFH-NEXT: PseudoCALL target-flags(riscv-call) @caller_half_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_h, implicit-def $f10_h
+ ; RV64IZFH-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f10_h
+ ; RV64IZFH-NEXT: $f10_h = COPY [[COPY1]](s16)
+ ; RV64IZFH-NEXT: PseudoRET implicit $f10_h
%y = call half @caller_half_in_regs(half %x)
ret half %y
}
@@ -119,6 +200,40 @@ define half @callee_half_mixed_with_int(i32 %x0, half %x) nounwind {
; RV32IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY1]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
+ ;
+ ; RV64I-LABEL: name: callee_half_mixed_with_int
+ ; RV64I: bb.1 (%ir-block.0):
+ ; RV64I-NEXT: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64IF-LABEL: name: callee_half_mixed_with_int
+ ; RV64IF: bb.1 (%ir-block.0):
+ ; RV64IF-NEXT: liveins: $x10, $f10_f
+ ; RV64IF-NEXT: {{ $}}
+ ; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
+ ; RV64IF-NEXT: PseudoRET implicit $f10_f
+ ;
+ ; RV64IZFH-LABEL: name: callee_half_mixed_with_int
+ ; RV64IZFH: bb.1 (%ir-block.0):
+ ; RV64IZFH-NEXT: liveins: $x10, $f10_h
+ ; RV64IZFH-NEXT: {{ $}}
+ ; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64IZFH-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $f10_h
+ ; RV64IZFH-NEXT: $f10_h = COPY [[COPY1]](s16)
+ ; RV64IZFH-NEXT: PseudoRET implicit $f10_h
ret half %x
}
@@ -175,6 +290,65 @@ define half @caller_half_mixed_with_int(half %x, i32 %x0) nounwind {
; RV32IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY2]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
+ ;
+ ; RV64I-LABEL: name: caller_half_mixed_with_int
+ ; RV64I: bb.1 (%ir-block.0):
+ ; RV64I-NEXT: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s32)
+ ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_mixed_with_int, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s64)
+ ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT2]](s64)
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64IF-LABEL: name: caller_half_mixed_with_int
+ ; RV64IF: bb.1 (%ir-block.0):
+ ; RV64IF-NEXT: liveins: $x10, $f10_f
+ ; RV64IF-NEXT: {{ $}}
+ ; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; RV64IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s32)
+ ; RV64IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
+ ; RV64IF-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT1]](s32)
+ ; RV64IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_mixed_with_int, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $f10_f, implicit-def $f10_f
+ ; RV64IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
+ ; RV64IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT2]](s32)
+ ; RV64IF-NEXT: PseudoRET implicit $f10_f
+ ;
+ ; RV64IZFH-LABEL: name: caller_half_mixed_with_int
+ ; RV64IZFH: bb.1 (%ir-block.0):
+ ; RV64IZFH-NEXT: liveins: $x10, $f10_h
+ ; RV64IZFH-NEXT: {{ $}}
+ ; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $f10_h
+ ; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64IZFH-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; RV64IZFH-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; RV64IZFH-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
+ ; RV64IZFH-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; RV64IZFH-NEXT: $f10_h = COPY [[COPY]](s16)
+ ; RV64IZFH-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_mixed_with_int, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $f10_h, implicit-def $f10_h
+ ; RV64IZFH-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; RV64IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
+ ; RV64IZFH-NEXT: $f10_h = COPY [[COPY2]](s16)
+ ; RV64IZFH-NEXT: PseudoRET implicit $f10_h
%y = call half @callee_half_mixed_with_int(i32 %x0, half %x)
ret half %y
}
@@ -232,6 +406,83 @@ define half @callee_half_return_stack1(i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %
; RV32IZFH-NEXT: [[COPY8:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY8]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
+ ;
+ ; RV64I-LABEL: name: callee_half_return_stack1
+ ; RV64I: bb.1 (%ir-block.0):
+ ; RV64I-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+ ; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+ ; RV64I-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY3]](s64)
+ ; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+ ; RV64I-NEXT: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+ ; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+ ; RV64I-NEXT: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
+ ; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+ ; RV64I-NEXT: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY6]](s64)
+ ; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+ ; RV64I-NEXT: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY7]](s64)
+ ; RV64I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %fixed-stack.0, align 16)
+ ; RV64I-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s64)
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC8]](s16)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64IF-LABEL: name: callee_half_return_stack1
+ ; RV64IF: bb.1 (%ir-block.0):
+ ; RV64IF-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $f10_f
+ ; RV64IF-NEXT: {{ $}}
+ ; RV64IF-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64IF-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64IF-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64IF-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+ ; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
+ ; RV64IF-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+ ; RV64IF-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY3]](s64)
+ ; RV64IF-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+ ; RV64IF-NEXT: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+ ; RV64IF-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+ ; RV64IF-NEXT: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
+ ; RV64IF-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+ ; RV64IF-NEXT: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY6]](s64)
+ ; RV64IF-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+ ; RV64IF-NEXT: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY7]](s64)
+ ; RV64IF-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV64IF-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
+ ; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC8]](s16)
+ ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
+ ; RV64IF-NEXT: PseudoRET implicit $f10_f
+ ;
+ ; RV64IZFH-LABEL: name: callee_half_return_stack1
+ ; RV64IZFH: bb.1 (%ir-block.0):
+ ; RV64IZFH-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $f10_h
+ ; RV64IZFH-NEXT: {{ $}}
+ ; RV64IZFH-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64IZFH-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64IZFH-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64IZFH-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; RV64IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+ ; RV64IZFH-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
+ ; RV64IZFH-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+ ; RV64IZFH-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY3]](s64)
+ ; RV64IZFH-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+ ; RV64IZFH-NEXT: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+ ; RV64IZFH-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+ ; RV64IZFH-NEXT: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
+ ; RV64IZFH-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+ ; RV64IZFH-NEXT: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY6]](s64)
+ ; RV64IZFH-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+ ; RV64IZFH-NEXT: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY7]](s64)
+ ; RV64IZFH-NEXT: [[COPY8:%[0-9]+]]:_(s16) = COPY $f10_h
+ ; RV64IZFH-NEXT: $f10_h = COPY [[COPY8]](s16)
+ ; RV64IZFH-NEXT: PseudoRET implicit $f10_h
ret half %x
}
@@ -333,6 +584,131 @@ define half @caller_half_return_stack1(i32 %v1, half %x) nounwind {
; RV32IZFH-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
; RV32IZFH-NEXT: $f10_h = COPY [[COPY2]](s16)
; RV32IZFH-NEXT: PseudoRET implicit $f10_h
+ ;
+ ; RV64I-LABEL: name: caller_half_return_stack1
+ ; RV64I: bb.1 (%ir-block.0):
+ ; RV64I-NEXT: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+ ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+ ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+ ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV64I-NEXT: ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
+ ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+ ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
+ ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
+ ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
+ ; RV64I-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+ ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C7]](s64)
+ ; RV64I-NEXT: G_STORE [[ANYEXT8]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
+ ; RV64I-NEXT: $x12 = COPY [[ANYEXT2]](s64)
+ ; RV64I-NEXT: $x13 = COPY [[ANYEXT3]](s64)
+ ; RV64I-NEXT: $x14 = COPY [[ANYEXT4]](s64)
+ ; RV64I-NEXT: $x15 = COPY [[ANYEXT5]](s64)
+ ; RV64I-NEXT: $x16 = COPY [[ANYEXT6]](s64)
+ ; RV64I-NEXT: $x17 = COPY [[ANYEXT7]](s64)
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_st...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Fixes type check failure in llvm#94110 (comment)
Fixes type check failure in #94110 (comment)