Skip to content

Commit 73a75d3

Browse files
authored
Merge pull request #6 from Disasm/rustc-pick-min
Apply patches for RISC-V 64-bit support
2 parents 683d352 + 70b9bc4 commit 73a75d3

35 files changed

+7693
-269
lines changed

llvm/include/llvm/IR/IntrinsicsRISCV.td

+25
Original file line numberDiff line numberDiff line change
@@ -41,4 +41,29 @@ def int_riscv_masked_cmpxchg_i32
4141
llvm_i32_ty, llvm_i32_ty],
4242
[IntrArgMemOnly, NoCapture<0>]>;
4343

44+
class MaskedAtomicRMW64Intrinsic
45+
: Intrinsic<[llvm_i64_ty],
46+
[llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty],
47+
[IntrArgMemOnly, NoCapture<0>]>;
48+
49+
class MaskedAtomicRMW64WithSextIntrinsic
50+
: Intrinsic<[llvm_i64_ty],
51+
[llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty,
52+
llvm_i64_ty],
53+
[IntrArgMemOnly, NoCapture<0>]>;
54+
55+
def int_riscv_masked_atomicrmw_xchg_i64 : MaskedAtomicRMW64Intrinsic;
56+
def int_riscv_masked_atomicrmw_add_i64 : MaskedAtomicRMW64Intrinsic;
57+
def int_riscv_masked_atomicrmw_sub_i64 : MaskedAtomicRMW64Intrinsic;
58+
def int_riscv_masked_atomicrmw_nand_i64 : MaskedAtomicRMW64Intrinsic;
59+
def int_riscv_masked_atomicrmw_max_i64 : MaskedAtomicRMW64WithSextIntrinsic;
60+
def int_riscv_masked_atomicrmw_min_i64 : MaskedAtomicRMW64WithSextIntrinsic;
61+
def int_riscv_masked_atomicrmw_umax_i64 : MaskedAtomicRMW64Intrinsic;
62+
def int_riscv_masked_atomicrmw_umin_i64 : MaskedAtomicRMW64Intrinsic;
63+
64+
def int_riscv_masked_cmpxchg_i64
65+
: Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty,
66+
llvm_i64_ty, llvm_i64_ty],
67+
[IntrArgMemOnly, NoCapture<0>]>;
68+
4469
} // TargetPrefix = "riscv"

llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp

+67-14
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,9 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
8787
case RISCV::PseudoAtomicLoadNand32:
8888
return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
8989
NextMBBI);
90+
case RISCV::PseudoAtomicLoadNand64:
91+
return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64,
92+
NextMBBI);
9093
case RISCV::PseudoMaskedAtomicSwap32:
9194
return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
9295
NextMBBI);
@@ -111,6 +114,8 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
111114
NextMBBI);
112115
case RISCV::PseudoCmpXchg32:
113116
return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
117+
case RISCV::PseudoCmpXchg64:
118+
return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI);
114119
case RISCV::PseudoMaskedCmpXchg32:
115120
return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
116121
}
@@ -152,12 +157,61 @@ static unsigned getSCForRMW32(AtomicOrdering Ordering) {
152157
}
153158
}
154159

160+
static unsigned getLRForRMW64(AtomicOrdering Ordering) {
161+
switch (Ordering) {
162+
default:
163+
llvm_unreachable("Unexpected AtomicOrdering");
164+
case AtomicOrdering::Monotonic:
165+
return RISCV::LR_D;
166+
case AtomicOrdering::Acquire:
167+
return RISCV::LR_D_AQ;
168+
case AtomicOrdering::Release:
169+
return RISCV::LR_D;
170+
case AtomicOrdering::AcquireRelease:
171+
return RISCV::LR_D_AQ;
172+
case AtomicOrdering::SequentiallyConsistent:
173+
return RISCV::LR_D_AQ_RL;
174+
}
175+
}
176+
177+
static unsigned getSCForRMW64(AtomicOrdering Ordering) {
178+
switch (Ordering) {
179+
default:
180+
llvm_unreachable("Unexpected AtomicOrdering");
181+
case AtomicOrdering::Monotonic:
182+
return RISCV::SC_D;
183+
case AtomicOrdering::Acquire:
184+
return RISCV::SC_D;
185+
case AtomicOrdering::Release:
186+
return RISCV::SC_D_RL;
187+
case AtomicOrdering::AcquireRelease:
188+
return RISCV::SC_D_RL;
189+
case AtomicOrdering::SequentiallyConsistent:
190+
return RISCV::SC_D_AQ_RL;
191+
}
192+
}
193+
194+
static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) {
195+
if (Width == 32)
196+
return getLRForRMW32(Ordering);
197+
if (Width == 64)
198+
return getLRForRMW64(Ordering);
199+
llvm_unreachable("Unexpected LR width\n");
200+
}
201+
202+
static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) {
203+
if (Width == 32)
204+
return getSCForRMW32(Ordering);
205+
if (Width == 64)
206+
return getSCForRMW64(Ordering);
207+
llvm_unreachable("Unexpected SC width\n");
208+
}
209+
155210
static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
156211
DebugLoc DL, MachineBasicBlock *ThisMBB,
157212
MachineBasicBlock *LoopMBB,
158213
MachineBasicBlock *DoneMBB,
159214
AtomicRMWInst::BinOp BinOp, int Width) {
160-
assert(Width == 32 && "RV64 atomic expansion currently unsupported");
161215
unsigned DestReg = MI.getOperand(0).getReg();
162216
unsigned ScratchReg = MI.getOperand(1).getReg();
163217
unsigned AddrReg = MI.getOperand(2).getReg();
@@ -166,11 +220,11 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
166220
static_cast<AtomicOrdering>(MI.getOperand(4).getImm());
167221

168222
// .loop:
169-
// lr.w dest, (addr)
223+
// lr.[w|d] dest, (addr)
170224
// binop scratch, dest, val
171-
// sc.w scratch, scratch, (addr)
225+
// sc.[w|d] scratch, scratch, (addr)
172226
// bnez scratch, loop
173-
BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
227+
BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
174228
.addReg(AddrReg);
175229
switch (BinOp) {
176230
default:
@@ -184,7 +238,7 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
184238
.addImm(-1);
185239
break;
186240
}
187-
BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
241+
BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
188242
.addReg(AddrReg)
189243
.addReg(ScratchReg);
190244
BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
@@ -219,7 +273,7 @@ static void doMaskedAtomicBinOpExpansion(
219273
const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL,
220274
MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
221275
MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
222-
assert(Width == 32 && "RV64 atomic expansion currently unsupported");
276+
assert(Width == 32 && "Should never need to expand masked 64-bit operations");
223277
unsigned DestReg = MI.getOperand(0).getReg();
224278
unsigned ScratchReg = MI.getOperand(1).getReg();
225279
unsigned AddrReg = MI.getOperand(2).getReg();
@@ -333,7 +387,7 @@ bool RISCVExpandPseudo::expandAtomicMinMaxOp(
333387
MachineBasicBlock::iterator &NextMBBI) {
334388
assert(IsMasked == true &&
335389
"Should only need to expand masked atomic max/min");
336-
assert(Width == 32 && "RV64 atomic expansion currently unsupported");
390+
assert(Width == 32 && "Should never need to expand masked 64-bit operations");
337391

338392
MachineInstr &MI = *MBBI;
339393
DebugLoc DL = MI.getDebugLoc();
@@ -451,7 +505,6 @@ bool RISCVExpandPseudo::expandAtomicMinMaxOp(
451505
bool RISCVExpandPseudo::expandAtomicCmpXchg(
452506
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked,
453507
int Width, MachineBasicBlock::iterator &NextMBBI) {
454-
assert(Width == 32 && "RV64 atomic expansion currently unsupported");
455508
MachineInstr &MI = *MBBI;
456509
DebugLoc DL = MI.getDebugLoc();
457510
MachineFunction *MF = MBB.getParent();
@@ -483,18 +536,18 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg(
483536

484537
if (!IsMasked) {
485538
// .loophead:
486-
// lr.w dest, (addr)
539+
// lr.[w|d] dest, (addr)
487540
// bne dest, cmpval, done
488-
BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
541+
BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
489542
.addReg(AddrReg);
490543
BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
491544
.addReg(DestReg)
492545
.addReg(CmpValReg)
493546
.addMBB(DoneMBB);
494547
// .looptail:
495-
// sc.w scratch, newval, (addr)
548+
// sc.[w|d] scratch, newval, (addr)
496549
// bnez scratch, loophead
497-
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
550+
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
498551
.addReg(AddrReg)
499552
.addReg(NewValReg);
500553
BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
@@ -507,7 +560,7 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg(
507560
// and scratch, dest, mask
508561
// bne scratch, cmpval, done
509562
unsigned MaskReg = MI.getOperand(5).getReg();
510-
BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
563+
BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
511564
.addReg(AddrReg);
512565
BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
513566
.addReg(DestReg)
@@ -525,7 +578,7 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg(
525578
// bnez scratch, loophead
526579
insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
527580
MaskReg, ScratchReg);
528-
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
581+
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
529582
.addReg(AddrReg)
530583
.addReg(ScratchReg);
531584
BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))

0 commit comments

Comments
 (0)