Skip to content

[AMDGPU][NFC] Remove _DEFERRED operands. #139123

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 9, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 5 additions & 17 deletions llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1956,7 +1956,6 @@ static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
Expand All @@ -1975,14 +1974,12 @@ static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
return &APFloat::IEEEdouble();
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_KIMM16:
return &APFloat::IEEEhalf();
case AMDGPU::OPERAND_REG_IMM_BF16:
case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
Expand Down Expand Up @@ -2304,7 +2301,6 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
llvm_unreachable("fp literal in 64-bit integer instruction.");

case AMDGPU::OPERAND_REG_IMM_BF16:
case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
Expand All @@ -2321,14 +2317,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo

case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
Expand Down Expand Up @@ -2369,7 +2363,6 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
Expand Down Expand Up @@ -2425,7 +2418,6 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo

case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
if (isSafeTruncation(Val, 16) &&
AMDGPU::isInlinableLiteralFP16(static_cast<int16_t>(Val),
AsmParser->hasInv2PiInlineImm())) {
Expand All @@ -2439,7 +2431,6 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
return;

case AMDGPU::OPERAND_REG_IMM_BF16:
case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
if (isSafeTruncation(Val, 16) &&
AMDGPU::isInlinableLiteralBF16(static_cast<int16_t>(Val),
Expand Down Expand Up @@ -3615,13 +3606,11 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
return AMDGPU::isInlinableLiteralV2BF16(Val);

if (OperandType == AMDGPU::OPERAND_REG_IMM_FP16 ||
OperandType == AMDGPU::OPERAND_REG_INLINE_C_FP16 ||
OperandType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED)
OperandType == AMDGPU::OPERAND_REG_INLINE_C_FP16)
return AMDGPU::isInlinableLiteralFP16(Val, hasInv2PiInlineImm());

if (OperandType == AMDGPU::OPERAND_REG_IMM_BF16 ||
OperandType == AMDGPU::OPERAND_REG_INLINE_C_BF16 ||
OperandType == AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED)
OperandType == AMDGPU::OPERAND_REG_INLINE_C_BF16)
return AMDGPU::isInlinableLiteralBF16(Val, hasInv2PiInlineImm());

llvm_unreachable("invalid operand type");
Expand Down Expand Up @@ -3671,15 +3660,14 @@ static OperandIndices getSrcOperandIndices(unsigned Opcode,
AddMandatoryLiterals ? getNamedOperandIdx(Opcode, OpName::imm) : -1;

if (isVOPD(Opcode)) {
int16_t ImmDeferredIdx =
AddMandatoryLiterals ? getNamedOperandIdx(Opcode, OpName::immDeferred)
: -1;
int16_t ImmXIdx =
AddMandatoryLiterals ? getNamedOperandIdx(Opcode, OpName::immX) : -1;

return {getNamedOperandIdx(Opcode, OpName::src0X),
getNamedOperandIdx(Opcode, OpName::vsrc1X),
getNamedOperandIdx(Opcode, OpName::src0Y),
getNamedOperandIdx(Opcode, OpName::vsrc1Y),
ImmDeferredIdx,
ImmXIdx,
ImmIdx};
}

Expand Down
62 changes: 7 additions & 55 deletions llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -238,13 +238,6 @@ static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm,
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, Decoder);
}

template <unsigned OpWidth>
static DecodeStatus decodeSrcRegOrImmDeferred9(MCInst &Inst, unsigned Imm,
uint64_t /* Addr */,
const MCDisassembler *Decoder) {
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
}

// Default decoders generated by tablegen: 'Decode<RegClass>RegisterClass'
// when RegisterClass is used as an operand. Most often used for destination
// operands.
Expand Down Expand Up @@ -324,22 +317,6 @@ static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm,
return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
}

template <unsigned OpWidth>
static DecodeStatus
decodeOperand_VSrcT16_Lo128_Deferred(MCInst &Inst, unsigned Imm,
uint64_t /*Addr*/,
const MCDisassembler *Decoder) {
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
assert(isUInt<9>(Imm) && "9-bit encoding expected");

if (Imm & AMDGPU::EncValues::IS_VGPR) {
bool IsHi = Imm & (1 << 7);
unsigned RegIdx = Imm & 0x7f;
return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
}
return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
}

template <unsigned OpWidth>
static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
uint64_t /*Addr*/,
Expand Down Expand Up @@ -559,31 +536,21 @@ void AMDGPUDisassembler::decodeImmOperands(MCInst &MI,
}

if (Imm == AMDGPU::EncValues::LITERAL_CONST) {
switch (OpDesc.OperandType) {
case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
Op = MCOperand::createImm(AMDGPU::EncValues::LITERAL_CONST);
continue;
default:
Op = decodeLiteralConstant(OpDesc.OperandType ==
AMDGPU::OPERAND_REG_IMM_FP64);
continue;
}
Op = decodeLiteralConstant(OpDesc.OperandType ==
AMDGPU::OPERAND_REG_IMM_FP64);
continue;
}

if (AMDGPU::EncValues::INLINE_FLOATING_C_MIN <= Imm &&
Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX) {
switch (OpDesc.OperandType) {
case AMDGPU::OPERAND_REG_IMM_BF16:
case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
Imm = getInlineImmValBF16(Imm);
break;
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Expand Down Expand Up @@ -894,11 +861,9 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
}

int ImmLitIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm);
bool IsSOPK = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SOPK;
if (ImmLitIdx != -1 && !IsSOPK)
convertFMAanyK(MI, ImmLitIdx);
if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm) && !IsSOPK)
convertFMAanyK(MI);

// Some VOPC instructions, e.g., v_cmpx_f_f64, use VOP3 encoding and
// have EXEC as implicit destination. Issue a warning if encoding for
Expand Down Expand Up @@ -1380,22 +1345,9 @@ void AMDGPUDisassembler::convertVOPC64DPPInst(MCInst &MI) const {
}
}

void AMDGPUDisassembler::convertFMAanyK(MCInst &MI, int ImmLitIdx) const {
void AMDGPUDisassembler::convertFMAanyK(MCInst &MI) const {
assert(HasLiteral && "Should have decoded a literal");
const MCInstrDesc &Desc = MCII->get(MI.getOpcode());
unsigned DescNumOps = Desc.getNumOperands();
insertNamedMCOperand(MI, MCOperand::createImm(Literal),
AMDGPU::OpName::immDeferred);
assert(DescNumOps == MI.getNumOperands());
for (unsigned I = 0; I < DescNumOps; ++I) {
auto &Op = MI.getOperand(I);
auto OpType = Desc.operands()[I].OperandType;
bool IsDeferredOp = (OpType == AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED ||
OpType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED);
if (Op.isImm() && Op.getImm() == AMDGPU::EncValues::LITERAL_CONST &&
IsDeferredOp)
Op.setImm(Literal);
}
insertNamedMCOperand(MI, MCOperand::createImm(Literal), AMDGPU::OpName::immX);
}

const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ class AMDGPUDisassembler : public MCDisassembler {

void convertEXPInst(MCInst &MI) const;
void convertVINTERPInst(MCInst &MI) const;
void convertFMAanyK(MCInst &MI, int ImmLitIdx) const;
void convertFMAanyK(MCInst &MI) const;
void convertSDWAInst(MCInst &MI) const;
void convertMAIInst(MCInst &MI) const;
void convertDPP8Inst(MCInst &MI) const;
Expand Down
3 changes: 0 additions & 3 deletions llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -715,7 +715,6 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
Expand All @@ -741,12 +740,10 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
break;
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
printImmediateF16(Op.getImm(), STI, O);
break;
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
case AMDGPU::OPERAND_REG_IMM_BF16:
case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
printImmediateBF16(Op.getImm(), STI, O);
break;
case AMDGPU::OPERAND_REG_IMM_V2INT16:
Expand Down
3 changes: 0 additions & 3 deletions llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,6 @@ AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
switch (OpInfo.OperandType) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
Expand All @@ -295,14 +294,12 @@ AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
return getLit16IntEncoding(static_cast<uint32_t>(Imm), STI);

case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
// FIXME Is this correct? What do inline immediates do on SI for f16 src
// which does not have f16 support?
return getLit16Encoding(static_cast<uint16_t>(Imm), STI);

case AMDGPU::OPERAND_REG_IMM_BF16:
case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
// We don't actually need to check Inv2Pi here because BF16 instructions can
// only be emitted for targets that already support the feature.
Expand Down
3 changes: 0 additions & 3 deletions llvm/lib/Target/AMDGPU/SIDefines.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,9 +204,6 @@ enum OperandType : unsigned {
OPERAND_REG_IMM_FP64,
OPERAND_REG_IMM_BF16,
OPERAND_REG_IMM_FP16,
OPERAND_REG_IMM_BF16_DEFERRED,
OPERAND_REG_IMM_FP16_DEFERRED,
OPERAND_REG_IMM_FP32_DEFERRED,
OPERAND_REG_IMM_V2BF16,
OPERAND_REG_IMM_V2FP16,
OPERAND_REG_IMM_V2INT16,
Expand Down
4 changes: 0 additions & 4 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4376,7 +4376,6 @@ bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
switch (OperandType) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_IMM_V2FP32:
Expand Down Expand Up @@ -4416,7 +4415,6 @@ bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
return AMDGPU::isInlinableLiteralV2BF16(Imm);
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
if (isInt<16>(Imm) || isUInt<16>(Imm)) {
// A few special case instructions have 16-bit operands on subtargets
Expand All @@ -4431,7 +4429,6 @@ bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
return false;
}
case AMDGPU::OPERAND_REG_IMM_BF16:
case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_BF16: {
if (isInt<16>(Imm) || isUInt<16>(Imm)) {
int16_t Trunc = static_cast<int16_t>(Imm);
Expand Down Expand Up @@ -4842,7 +4839,6 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
break;
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_IMM_V2FP32:
break;
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
Expand Down
6 changes: 2 additions & 4 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -2701,13 +2701,11 @@ class VOPProfile <list<ValueType> _ArgVT, bit _EnableClamp = 0> {
HasSDWAOMod, Src0ModSDWA, Src1ModSDWA,
DstVT>.ret;
field dag InsVOPDX = (ins Src0RC32:$src0X, Src1RC32:$vsrc1X);
// It is a slight misnomer to use the deferred f32 operand type for non-float
// It is a slight misnomer to use the f32 operand type for non-float
// operands, but this operand type will only be used if the other dual
// component is FMAAK or FMAMK
field dag InsVOPDXDeferred = (ins !if(!eq(Src0VT.Size, 32), VSrc_f32_Deferred, VSrc_f16_Deferred):$src0X, VGPR_32:$vsrc1X);
field dag InsVOPDX_immX = (ins !if(!eq(Src0VT.Size, 32), VSrc_f32, VSrc_f16):$src0X, VGPR_32:$vsrc1X);
field dag InsVOPDY = (ins Src0RC32:$src0Y, Src1RC32:$vsrc1Y);
field dag InsVOPDYDeferred = (ins !if(!eq(Src1VT.Size, 32), VSrc_f32_Deferred, VSrc_f16_Deferred):$src0Y, VGPR_32:$vsrc1Y);


field string Asm32 = getAsm32<HasDst, NumSrcArgs, DstVT>.ret;
field string AsmDPP = !if(HasExtDPP,
Expand Down
36 changes: 1 addition & 35 deletions llvm/lib/Target/AMDGPU/SIRegisterInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -1100,7 +1100,7 @@ class RegOrImmOperand <RegisterClass RegClass, string OperandTypeName>
: RegisterOperand<RegClass> {
let OperandNamespace = "AMDGPU";
let OperandType = OperandTypeName;
let ParserMatchClass = RegImmMatcher<!subst("_Deferred", "", NAME)>;
let ParserMatchClass = RegImmMatcher<NAME>;
}

//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -1128,19 +1128,6 @@ def SSrc_b64 : SrcRegOrImm9 <SReg_64, "OPERAND_REG_IMM_INT64">;

def SSrcOrLds_b32 : SrcRegOrImm9 <SRegOrLds_32, "OPERAND_REG_IMM_INT32">;

//===----------------------------------------------------------------------===//
// SSrc_32_Deferred Operands with an SGPR or a 32-bit immediate for use with
// FMAMK/FMAAK
//===----------------------------------------------------------------------===//

class SrcRegOrImmDeferred9<RegisterClass regClass, string operandType>
: RegOrImmOperand<regClass, operandType> {
string DecoderMethodName = "decodeSrcRegOrImmDeferred9";
let DecoderMethod = DecoderMethodName # "<" # regClass.Size # ">";
}

def SSrc_f32_Deferred : SrcRegOrImmDeferred9<SReg_32, "OPERAND_REG_IMM_FP32_DEFERRED">;

//===----------------------------------------------------------------------===//
// SCSrc_* Operands with an SGPR or a inline constant
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -1187,27 +1174,6 @@ def VSrc_f64 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_FP64"> {
def VSrc_v2b32 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_V2INT32">;
def VSrc_v2f32 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_V2FP32">;

//===----------------------------------------------------------------------===//
// VSrc_*_Deferred Operands with an SGPR, VGPR or a 32-bit immediate for use
// with FMAMK/FMAAK
//===----------------------------------------------------------------------===//

def VSrc_bf16_Deferred : SrcRegOrImmDeferred9<VS_32, "OPERAND_REG_IMM_BF16_DEFERRED">;
def VSrc_f16_Deferred : SrcRegOrImmDeferred9<VS_32, "OPERAND_REG_IMM_FP16_DEFERRED">;
def VSrc_f32_Deferred : SrcRegOrImmDeferred9<VS_32, "OPERAND_REG_IMM_FP32_DEFERRED">;

// True 16 Operands
def VSrcT_f16_Lo128_Deferred : SrcRegOrImmDeferred9<VS_16_Lo128,
"OPERAND_REG_IMM_FP16_DEFERRED"> {
let DecoderMethodName = "decodeOperand_VSrcT16_Lo128_Deferred";
let EncoderMethod = "getMachineOpValueT16Lo128";
}

def VSrcFake16_bf16_Lo128_Deferred
: SrcRegOrImmDeferred9<VS_32_Lo128, "OPERAND_REG_IMM_BF16_DEFERRED">;
def VSrcFake16_f16_Lo128_Deferred
: SrcRegOrImmDeferred9<VS_32_Lo128, "OPERAND_REG_IMM_FP16_DEFERRED">;

//===----------------------------------------------------------------------===//
// VRegSrc_* Operands with a VGPR
//===----------------------------------------------------------------------===//
Expand Down
Loading
Loading