Skip to content

Commit 2d0ac85

Browse files
committed
[X86] Fix gcc warning about mix of enumeral and non-enumeral types. NFC
1 parent 35baff8 commit 2d0ac85

File tree

1 file changed

+5
-4
lines changed

1 file changed

+5
-4
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3297,21 +3297,22 @@ unsigned X86TargetLowering::preferedOpcodeForCmpEqPiecesOfOperand(
32973297
// If the current setup has imm64 mask, then inverse will have
32983298
// at least imm32 mask (or be zext i32 -> i64).
32993299
if (VT == MVT::i64)
3300-
return AndMask->getSignificantBits() > 32 ? ISD::SRL : ShiftOpc;
3300+
return AndMask->getSignificantBits() > 32 ? (unsigned)ISD::SRL
3301+
: ShiftOpc;
33013302

33023303
// We can only benefit if req at least 7-bit for the mask. We
33033304
// don't want to replace shl of 1,2,3 as they can be implemented
33043305
// with lea/add.
3305-
return ShiftOrRotateAmt.uge(7) ? ISD::SRL : ShiftOpc;
3306+
return ShiftOrRotateAmt.uge(7) ? (unsigned)ISD::SRL : ShiftOpc;
33063307
}
33073308

33083309
if (VT == MVT::i64)
33093310
// Keep exactly 32-bit imm64, this is zext i32 -> i64 which is
33103311
// extremely efficient.
3311-
return AndMask->getSignificantBits() > 33 ? ISD::SHL : ShiftOpc;
3312+
return AndMask->getSignificantBits() > 33 ? (unsigned)ISD::SHL : ShiftOpc;
33123313

33133314
// Keep small shifts as shl so we can generate add/lea.
3314-
return ShiftOrRotateAmt.ult(7) ? ISD::SHL : ShiftOpc;
3315+
return ShiftOrRotateAmt.ult(7) ? (unsigned)ISD::SHL : ShiftOpc;
33153316
}
33163317

33173318
// We prefer rotate for vectors of if we won't get a zext mask with SRL

0 commit comments

Comments
 (0)