File tree 1 file changed +5
-4
lines changed
1 file changed +5
-4
lines changed Original file line number Diff line number Diff line change @@ -3297,21 +3297,22 @@ unsigned X86TargetLowering::preferedOpcodeForCmpEqPiecesOfOperand(
3297
3297
// If the current setup has imm64 mask, then inverse will have
3298
3298
// at least imm32 mask (or be zext i32 -> i64).
3299
3299
if (VT == MVT::i64)
3300
- return AndMask->getSignificantBits() > 32 ? ISD::SRL : ShiftOpc;
3300
+ return AndMask->getSignificantBits() > 32 ? (unsigned)ISD::SRL
3301
+ : ShiftOpc;
3301
3302
3302
3303
// We can only benefit if req at least 7-bit for the mask. We
3303
3304
// don't want to replace shl of 1,2,3 as they can be implemented
3304
3305
// with lea/add.
3305
- return ShiftOrRotateAmt.uge(7) ? ISD::SRL : ShiftOpc;
3306
+ return ShiftOrRotateAmt.uge(7) ? (unsigned) ISD::SRL : ShiftOpc;
3306
3307
}
3307
3308
3308
3309
if (VT == MVT::i64)
3309
3310
// Keep exactly 32-bit imm64, this is zext i32 -> i64 which is
3310
3311
// extremely efficient.
3311
- return AndMask->getSignificantBits() > 33 ? ISD::SHL : ShiftOpc;
3312
+ return AndMask->getSignificantBits() > 33 ? (unsigned) ISD::SHL : ShiftOpc;
3312
3313
3313
3314
// Keep small shifts as shl so we can generate add/lea.
3314
- return ShiftOrRotateAmt.ult(7) ? ISD::SHL : ShiftOpc;
3315
+ return ShiftOrRotateAmt.ult(7) ? (unsigned) ISD::SHL : ShiftOpc;
3315
3316
}
3316
3317
3317
3318
// We prefer rotate for vectors of if we won't get a zext mask with SRL
You can’t perform that action at this time.
0 commit comments