Skip to content

[AArch64][SVE] Pair SVE fill/spill into LDP/STP with -msve-vector-bits=128. #134068

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 47 additions & 1 deletion llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,10 @@ static cl::opt<unsigned> LdStConstLimit("aarch64-load-store-const-scan-limit",
static cl::opt<bool> EnableRenaming("aarch64-load-store-renaming",
cl::init(true), cl::Hidden);

// Enable SVE fill/spill pairing for VLS 128.
static cl::opt<bool> EnableSVEFillSpillPairing("aarch64-sve-fill-spill-pairing",
cl::init(true), cl::Hidden);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Given the transformation is little endian specific and the option's primary use case being for debug, perhaps it's worth reversing the polarity and implementing DisableSVEFillSpillPairing -> aarch64-disable-sve-fill-pairing?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, that makes sense. I've renamed this.

Copy link
Collaborator

@paulwalker-arm paulwalker-arm Apr 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just going to throw this out there but when inspecting tryToPairLdStInst I see there is already provision to disable the use of pair instructions via target features disable-ldp and disable-stp so perhaps we don't need a dedicated flag after all? That might make the implementation easier because then the "should we do this" check only requires the subtarget?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That sounds good to me. I thought having the option to disable just SVE pairing could be useful for debugging, but I'm happy to remove it if you don't think so.


#define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"

namespace {
Expand All @@ -97,6 +101,9 @@ using LdStPairFlags = struct LdStPairFlags {
// a pair-wise insn, and false if the reverse is true.
bool MergeForward = false;

// Set to true when pairing SVE fill/spill instructions.
bool SVEFillSpillPair = false;

// SExtIdx gives the index of the result of the load pair that must be
// extended. The value of SExtIdx assumes that the paired load produces the
// value in this order: (I, returned iterator), i.e., -1 means no value has
Expand All @@ -113,6 +120,9 @@ using LdStPairFlags = struct LdStPairFlags {
void setMergeForward(bool V = true) { MergeForward = V; }
bool getMergeForward() const { return MergeForward; }

void setSVEFillSpillPair(bool V = true) { SVEFillSpillPair = V; }
bool getSVEFillSpillPair() const { return SVEFillSpillPair; }

void setSExtIdx(int V) { SExtIdx = V; }
int getSExtIdx() const { return SExtIdx; }

Expand Down Expand Up @@ -300,6 +310,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
case AArch64::STRXui:
case AArch64::STRXpre:
case AArch64::STURXi:
case AArch64::STR_ZXI:
case AArch64::LDRDui:
case AArch64::LDURDi:
case AArch64::LDRDpre:
Expand All @@ -318,6 +329,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
case AArch64::LDRSui:
case AArch64::LDURSi:
case AArch64::LDRSpre:
case AArch64::LDR_ZXI:
return Opc;
case AArch64::LDRSWui:
return AArch64::LDRWui;
Expand Down Expand Up @@ -363,6 +375,7 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
return AArch64::STPDpre;
case AArch64::STRQui:
case AArch64::STURQi:
case AArch64::STR_ZXI:
return AArch64::STPQi;
case AArch64::STRQpre:
return AArch64::STPQpre;
Expand All @@ -388,6 +401,7 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
return AArch64::LDPDpre;
case AArch64::LDRQui:
case AArch64::LDURQi:
case AArch64::LDR_ZXI:
return AArch64::LDPQi;
case AArch64::LDRQpre:
return AArch64::LDPQpre;
Expand Down Expand Up @@ -833,6 +847,12 @@ static bool isMergeableIndexLdSt(MachineInstr &MI, int &Scale) {
}
}

// Return true if MI is an SVE fill/spill instruction.
static bool isPairableFillSpillInst(const MachineInstr &MI) {
auto const Opc = MI.getOpcode();
return Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI;
}

static bool isRewritableImplicitDef(unsigned Opc) {
switch (Opc) {
default:
Expand Down Expand Up @@ -1227,6 +1247,15 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
(void)MIBSXTW;
LLVM_DEBUG(dbgs() << " Extend operand:\n ");
LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
} else if (Flags.getSVEFillSpillPair()) {
// We are combining SVE fill/spill to LDP/STP, so we need to get the Q
// variant of the registers.
MachineOperand &MOp0 = MIB->getOperand(0);
MachineOperand &MOp1 = MIB->getOperand(1);
assert(AArch64::ZPRRegClass.contains(MOp0.getReg()) &&
AArch64::ZPRRegClass.contains(MOp1.getReg()) && "Invalid register.");
MOp0.setReg(AArch64::Q0 + (MOp0.getReg() - AArch64::Z0));
MOp1.setReg(AArch64::Q0 + (MOp1.getReg() - AArch64::Z0));
} else {
LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
}
Expand Down Expand Up @@ -1829,6 +1858,9 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,

Flags.clearRenameReg();

if (isPairableFillSpillInst(FirstMI))
Flags.setSVEFillSpillPair();

// Track which register units have been modified and used between the first
// insn (inclusive) and the second insn.
ModifiedRegUnits.clear();
Expand Down Expand Up @@ -2661,7 +2693,8 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
// Get the needed alignments to check them if
// ldp-aligned-only/stp-aligned-only features are opted.
uint64_t MemAlignment = MemOp->getAlign().value();
uint64_t TypeAlignment = Align(MemOp->getSize().getValue()).value();
uint64_t TypeAlignment =
Align(MemOp->getSize().getValue().getKnownMinValue()).value();

if (MemAlignment < 2 * TypeAlignment) {
NumFailedAlignmentCheck++;
Expand Down Expand Up @@ -2782,6 +2815,9 @@ bool AArch64LoadStoreOpt::tryToMergeIndexLdSt(MachineBasicBlock::iterator &MBBI,
bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
bool EnableNarrowZeroStOpt) {
AArch64FunctionInfo &AFI = *MBB.getParent()->getInfo<AArch64FunctionInfo>();
bool const CanPairFillSpill = EnableSVEFillSpillPairing &&
Subtarget->isSVEorStreamingSVEAvailable() &&
Subtarget->getSVEVectorSizeInBits() == 128;

bool Modified = false;
// Four tranformations to do here:
Expand Down Expand Up @@ -2822,11 +2858,18 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
}
// 3) Find loads and stores that can be merged into a single load or store
// pair instruction.
// When compiling for SVE 128, also try to combine SVE fill/spill
// instructions into LDP/STP.
// e.g.,
// ldr x0, [x2]
// ldr x1, [x2, #8]
// ; becomes
// ldp x0, x1, [x2]
// e.g.,
// ldr z0, [x2]
// ldr z1, [x2, #1, mul vl]
// ; becomes
// ldp q0, q1, [x2]

if (MBB.getParent()->getRegInfo().tracksLiveness()) {
DefinedInBB.clear();
Expand All @@ -2840,6 +2883,9 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
updateDefinedRegisters(*MBBI, DefinedInBB, TRI);
if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI))
Modified = true;
else if (CanPairFillSpill && isPairableFillSpillInst(*MBBI) &&
tryToPairLdStInst(MBBI))
Modified = true;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is necessary to have a separate path for SVE rather than extending isPairableLdStInst? I can see isPairableLdStInst is also used to group related instructions together, which is something we might want in the future, assuming it doesn't already come for free.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe isPairableLdStInst is static, but I needed to query the subtarget to perform the checks that CanPairFillSpill is doing. I thought it would be better to add the new path to reduce the number of changes, but I'm happy to revisit this if you have a better idea.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's worth trying. I wasn't thinking of extending isPairableLdStInst specifically but wondered if the extra checks can be pushing into the tryTo.. function or perhaps isCandidateToMergeOrPair.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think they can, I think I tried this at first but found myself having to check for the opcodes for the fill/spill instructions in more than one place, and so went for the separate paths in the end. But I'll give this a try so that we can see how it looks proper.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've moved these checks into isPairableLdStInst and isCandidateToMergeOrPair - please let me know what you think of the new approach, otherwise I'm happy to revert it. :)

else
++MBBI;
}
Expand Down
218 changes: 218 additions & 0 deletions llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,218 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,ldp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-LDPALIGNEDONLY
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,stp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-STPALIGNEDONLY
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK-OFF
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK-OFF
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 -aarch64-sve-fill-spill-pairing=0 < %s | FileCheck %s --check-prefixes=CHECK-OFF

define void @nxv16i8(ptr %ldptr, ptr %stptr) {
; CHECK-LABEL: nxv16i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: stp q0, q1, [x1]
; CHECK-NEXT: ret
;
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8:
; CHECK-LDPALIGNEDONLY: // %bb.0:
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0]
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1]
; CHECK-LDPALIGNEDONLY-NEXT: ret
;
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8:
; CHECK-STPALIGNEDONLY: // %bb.0:
; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1]
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: ret
;
; CHECK-OFF-LABEL: nxv16i8:
; CHECK-OFF: // %bb.0:
; CHECK-OFF-NEXT: ldr z0, [x0]
; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl]
; CHECK-OFF-NEXT: str z0, [x1]
; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl]
; CHECK-OFF-NEXT: ret
%vscale = tail call i64 @llvm.vscale()
%vl = shl nuw nsw i64 %vscale, 4
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
%ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
ret void
}

define void @nxv16i8_max_range(ptr %ldptr, ptr %stptr) {
; CHECK-LABEL: nxv16i8_max_range:
; CHECK: // %bb.0:
; CHECK-NEXT: ldp q0, q1, [x0, #-1024]
; CHECK-NEXT: stp q0, q1, [x1, #1008]
; CHECK-NEXT: ret
;
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_max_range:
; CHECK-LDPALIGNEDONLY: // %bb.0:
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-64, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-63, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1, #1008]
; CHECK-LDPALIGNEDONLY-NEXT: ret
;
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_max_range:
; CHECK-STPALIGNEDONLY: // %bb.0:
; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0, #-1024]
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #63, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #64, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: ret
;
; CHECK-OFF-LABEL: nxv16i8_max_range:
; CHECK-OFF: // %bb.0:
; CHECK-OFF-NEXT: ldr z0, [x0, #-64, mul vl]
; CHECK-OFF-NEXT: ldr z1, [x0, #-63, mul vl]
; CHECK-OFF-NEXT: str z0, [x1, #63, mul vl]
; CHECK-OFF-NEXT: str z1, [x1, #64, mul vl]
; CHECK-OFF-NEXT: ret
%vscale = tail call i64 @llvm.vscale()
%ldoff1 = mul i64 %vscale, -1024
%ldoff2 = mul i64 %vscale, -1008
%stoff1 = mul i64 %vscale, 1008
%stoff2 = mul i64 %vscale, 1024
%ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
%stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
%ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
ret void
}

define void @nxv16i8_outside_range(ptr %ldptr, ptr %stptr) {
; CHECK-LABEL: nxv16i8_outside_range:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0, #-65, mul vl]
; CHECK-NEXT: ldr z1, [x0, #-64, mul vl]
; CHECK-NEXT: str z0, [x1, #64, mul vl]
; CHECK-NEXT: str z1, [x1, #65, mul vl]
; CHECK-NEXT: ret
;
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_outside_range:
; CHECK-LDPALIGNEDONLY: // %bb.0:
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: ret
;
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_outside_range:
; CHECK-STPALIGNEDONLY: // %bb.0:
; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: ret
;
; CHECK-OFF-LABEL: nxv16i8_outside_range:
; CHECK-OFF: // %bb.0:
; CHECK-OFF-NEXT: ldr z0, [x0, #-65, mul vl]
; CHECK-OFF-NEXT: ldr z1, [x0, #-64, mul vl]
; CHECK-OFF-NEXT: str z0, [x1, #64, mul vl]
; CHECK-OFF-NEXT: str z1, [x1, #65, mul vl]
; CHECK-OFF-NEXT: ret
%vscale = tail call i64 @llvm.vscale()
%ldoff1 = mul i64 %vscale, -1040
%ldoff2 = mul i64 %vscale, -1024
%stoff1 = mul i64 %vscale, 1024
%stoff2 = mul i64 %vscale, 1040
%ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
%stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
%ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
ret void
}

define void @nxv16i8_2vl_stride(ptr %ldptr, ptr %stptr) {
; CHECK-LABEL: nxv16i8_2vl_stride:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x0, #2, mul vl]
; CHECK-NEXT: str z0, [x1]
; CHECK-NEXT: str z1, [x1, #2, mul vl]
; CHECK-NEXT: ret
;
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
; CHECK-LDPALIGNEDONLY: // %bb.0:
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0]
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1]
; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: ret
;
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
; CHECK-STPALIGNEDONLY: // %bb.0:
; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0]
; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1]
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: ret
;
; CHECK-OFF-LABEL: nxv16i8_2vl_stride:
; CHECK-OFF: // %bb.0:
; CHECK-OFF-NEXT: ldr z0, [x0]
; CHECK-OFF-NEXT: ldr z1, [x0, #2, mul vl]
; CHECK-OFF-NEXT: str z0, [x1]
; CHECK-OFF-NEXT: str z1, [x1, #2, mul vl]
; CHECK-OFF-NEXT: ret
%vscale = tail call i64 @llvm.vscale()
%vl = shl nuw nsw i64 %vscale, 5
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
%ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
ret void
}

define void @nxv2f64_32b_aligned(ptr %ldptr, ptr %stptr) {
; CHECK-LABEL: nxv2f64_32b_aligned:
; CHECK: // %bb.0:
; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: stp q0, q1, [x1]
; CHECK-NEXT: ret
;
; CHECK-LDPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
; CHECK-LDPALIGNEDONLY: // %bb.0:
; CHECK-LDPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1]
; CHECK-LDPALIGNEDONLY-NEXT: ret
;
; CHECK-STPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
; CHECK-STPALIGNEDONLY: // %bb.0:
; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
; CHECK-STPALIGNEDONLY-NEXT: stp q0, q1, [x1]
; CHECK-STPALIGNEDONLY-NEXT: ret
;
; CHECK-OFF-LABEL: nxv2f64_32b_aligned:
; CHECK-OFF: // %bb.0:
; CHECK-OFF-NEXT: ldr z0, [x0]
; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl]
; CHECK-OFF-NEXT: str z0, [x1]
; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl]
; CHECK-OFF-NEXT: ret
%vscale = tail call i64 @llvm.vscale()
%vl = shl nuw nsw i64 %vscale, 4
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
%ld1 = load <vscale x 2 x double>, ptr %ldptr, align 32
%ld2 = load <vscale x 2 x double>, ptr %ldptr2, align 32
store <vscale x 2 x double> %ld1, ptr %stptr, align 32
store <vscale x 2 x double> %ld2, ptr %stptr2, align 32
ret void
}