@@ -177,7 +177,7 @@ macro_rules! checked_op {
177
177
178
178
// `Int` + `SignedInt` implemented for signed integers
179
179
macro_rules! int_impl {
180
- ( $ActualT: ident, $UnsignedT: ty, $BITS: expr,
180
+ ( $SelfT : ty , $ ActualT: ident, $UnsignedT: ty, $BITS: expr,
181
181
$add_with_overflow: path,
182
182
$sub_with_overflow: path,
183
183
$mul_with_overflow: path) => {
@@ -850,6 +850,17 @@ macro_rules! int_impl {
850
850
/// ```
851
851
#[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
852
852
#[ inline( always) ]
853
+ #[ cfg( not( stage0) ) ]
854
+ pub fn wrapping_shl( self , rhs: u32 ) -> Self {
855
+ unsafe {
856
+ intrinsics:: unchecked_shl( self , ( rhs & ( $BITS - 1 ) ) as $SelfT)
857
+ }
858
+ }
859
+
860
+ /// Stage 0
861
+ #[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
862
+ #[ inline( always) ]
863
+ #[ cfg( stage0) ]
853
864
pub fn wrapping_shl( self , rhs: u32 ) -> Self {
854
865
self . overflowing_shl( rhs) . 0
855
866
}
@@ -875,6 +886,17 @@ macro_rules! int_impl {
875
886
/// ```
876
887
#[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
877
888
#[ inline( always) ]
889
+ #[ cfg( not( stage0) ) ]
890
+ pub fn wrapping_shr( self , rhs: u32 ) -> Self {
891
+ unsafe {
892
+ intrinsics:: unchecked_shr( self , ( rhs & ( $BITS - 1 ) ) as $SelfT)
893
+ }
894
+ }
895
+
896
+ /// Stage 0
897
+ #[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
898
+ #[ inline( always) ]
899
+ #[ cfg( stage0) ]
878
900
pub fn wrapping_shr( self , rhs: u32 ) -> Self {
879
901
self . overflowing_shr( rhs) . 0
880
902
}
@@ -1089,6 +1111,15 @@ macro_rules! int_impl {
1089
1111
/// ```
1090
1112
#[ inline]
1091
1113
#[ stable( feature = "wrapping" , since = "1.7.0" ) ]
1114
+ #[ cfg( not( stage0) ) ]
1115
+ pub fn overflowing_shl( self , rhs: u32 ) -> ( Self , bool ) {
1116
+ ( self . wrapping_shl( rhs) , ( rhs > ( $BITS - 1 ) ) )
1117
+ }
1118
+
1119
+ /// Stage 0
1120
+ #[ inline]
1121
+ #[ stable( feature = "wrapping" , since = "1.7.0" ) ]
1122
+ #[ cfg( stage0) ]
1092
1123
pub fn overflowing_shl( self , rhs: u32 ) -> ( Self , bool ) {
1093
1124
( self << ( rhs & ( $BITS - 1 ) ) , ( rhs > ( $BITS - 1 ) ) )
1094
1125
}
@@ -1111,6 +1142,15 @@ macro_rules! int_impl {
1111
1142
/// ```
1112
1143
#[ inline]
1113
1144
#[ stable( feature = "wrapping" , since = "1.7.0" ) ]
1145
+ #[ cfg( not( stage0) ) ]
1146
+ pub fn overflowing_shr( self , rhs: u32 ) -> ( Self , bool ) {
1147
+ ( self . wrapping_shr( rhs) , ( rhs > ( $BITS - 1 ) ) )
1148
+ }
1149
+
1150
+ /// Stage 0
1151
+ #[ inline]
1152
+ #[ stable( feature = "wrapping" , since = "1.7.0" ) ]
1153
+ #[ cfg( stage0) ]
1114
1154
pub fn overflowing_shr( self , rhs: u32 ) -> ( Self , bool ) {
1115
1155
( self >> ( rhs & ( $BITS - 1 ) ) , ( rhs > ( $BITS - 1 ) ) )
1116
1156
}
@@ -1268,39 +1308,39 @@ macro_rules! int_impl {
1268
1308
1269
1309
#[ lang = "i8" ]
1270
1310
impl i8 {
1271
- int_impl ! { i8 , u8 , 8 ,
1311
+ int_impl ! { i8 , i8 , u8 , 8 ,
1272
1312
intrinsics:: add_with_overflow,
1273
1313
intrinsics:: sub_with_overflow,
1274
1314
intrinsics:: mul_with_overflow }
1275
1315
}
1276
1316
1277
1317
#[ lang = "i16" ]
1278
1318
impl i16 {
1279
- int_impl ! { i16 , u16 , 16 ,
1319
+ int_impl ! { i16 , i16 , u16 , 16 ,
1280
1320
intrinsics:: add_with_overflow,
1281
1321
intrinsics:: sub_with_overflow,
1282
1322
intrinsics:: mul_with_overflow }
1283
1323
}
1284
1324
1285
1325
#[ lang = "i32" ]
1286
1326
impl i32 {
1287
- int_impl ! { i32 , u32 , 32 ,
1327
+ int_impl ! { i32 , i32 , u32 , 32 ,
1288
1328
intrinsics:: add_with_overflow,
1289
1329
intrinsics:: sub_with_overflow,
1290
1330
intrinsics:: mul_with_overflow }
1291
1331
}
1292
1332
1293
1333
#[ lang = "i64" ]
1294
1334
impl i64 {
1295
- int_impl ! { i64 , u64 , 64 ,
1335
+ int_impl ! { i64 , i64 , u64 , 64 ,
1296
1336
intrinsics:: add_with_overflow,
1297
1337
intrinsics:: sub_with_overflow,
1298
1338
intrinsics:: mul_with_overflow }
1299
1339
}
1300
1340
1301
1341
#[ lang = "i128" ]
1302
1342
impl i128 {
1303
- int_impl ! { i128 , u128 , 128 ,
1343
+ int_impl ! { i128 , i128 , u128 , 128 ,
1304
1344
intrinsics:: add_with_overflow,
1305
1345
intrinsics:: sub_with_overflow,
1306
1346
intrinsics:: mul_with_overflow }
@@ -1309,7 +1349,7 @@ impl i128 {
1309
1349
#[ cfg( target_pointer_width = "16" ) ]
1310
1350
#[ lang = "isize" ]
1311
1351
impl isize {
1312
- int_impl ! { i16 , u16 , 16 ,
1352
+ int_impl ! { isize , i16 , u16 , 16 ,
1313
1353
intrinsics:: add_with_overflow,
1314
1354
intrinsics:: sub_with_overflow,
1315
1355
intrinsics:: mul_with_overflow }
@@ -1318,7 +1358,7 @@ impl isize {
1318
1358
#[ cfg( target_pointer_width = "32" ) ]
1319
1359
#[ lang = "isize" ]
1320
1360
impl isize {
1321
- int_impl ! { i32 , u32 , 32 ,
1361
+ int_impl ! { isize , i32 , u32 , 32 ,
1322
1362
intrinsics:: add_with_overflow,
1323
1363
intrinsics:: sub_with_overflow,
1324
1364
intrinsics:: mul_with_overflow }
@@ -1327,15 +1367,15 @@ impl isize {
1327
1367
#[ cfg( target_pointer_width = "64" ) ]
1328
1368
#[ lang = "isize" ]
1329
1369
impl isize {
1330
- int_impl ! { i64 , u64 , 64 ,
1370
+ int_impl ! { isize , i64 , u64 , 64 ,
1331
1371
intrinsics:: add_with_overflow,
1332
1372
intrinsics:: sub_with_overflow,
1333
1373
intrinsics:: mul_with_overflow }
1334
1374
}
1335
1375
1336
1376
// `Int` + `UnsignedInt` implemented for unsigned integers
1337
1377
macro_rules! uint_impl {
1338
- ( $ActualT: ty, $BITS: expr,
1378
+ ( $SelfT : ty , $ ActualT: ty, $BITS: expr,
1339
1379
$ctpop: path,
1340
1380
$ctlz: path,
1341
1381
$cttz: path,
@@ -1978,6 +2018,17 @@ macro_rules! uint_impl {
1978
2018
/// ```
1979
2019
#[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
1980
2020
#[ inline( always) ]
2021
+ #[ cfg( not( stage0) ) ]
2022
+ pub fn wrapping_shl( self , rhs: u32 ) -> Self {
2023
+ unsafe {
2024
+ intrinsics:: unchecked_shl( self , ( rhs & ( $BITS - 1 ) ) as $SelfT)
2025
+ }
2026
+ }
2027
+
2028
+ /// Stage 0
2029
+ #[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
2030
+ #[ inline( always) ]
2031
+ #[ cfg( stage0) ]
1981
2032
pub fn wrapping_shl( self , rhs: u32 ) -> Self {
1982
2033
self . overflowing_shl( rhs) . 0
1983
2034
}
@@ -2003,6 +2054,17 @@ macro_rules! uint_impl {
2003
2054
/// ```
2004
2055
#[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
2005
2056
#[ inline( always) ]
2057
+ #[ cfg( not( stage0) ) ]
2058
+ pub fn wrapping_shr( self , rhs: u32 ) -> Self {
2059
+ unsafe {
2060
+ intrinsics:: unchecked_shr( self , ( rhs & ( $BITS - 1 ) ) as $SelfT)
2061
+ }
2062
+ }
2063
+
2064
+ /// Stage 0
2065
+ #[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
2066
+ #[ inline( always) ]
2067
+ #[ cfg( stage0) ]
2006
2068
pub fn wrapping_shr( self , rhs: u32 ) -> Self {
2007
2069
self . overflowing_shr( rhs) . 0
2008
2070
}
@@ -2170,6 +2232,15 @@ macro_rules! uint_impl {
2170
2232
/// ```
2171
2233
#[ inline]
2172
2234
#[ stable( feature = "wrapping" , since = "1.7.0" ) ]
2235
+ #[ cfg( not( stage0) ) ]
2236
+ pub fn overflowing_shl( self , rhs: u32 ) -> ( Self , bool ) {
2237
+ ( self . wrapping_shl( rhs) , ( rhs > ( $BITS - 1 ) ) )
2238
+ }
2239
+
2240
+ /// Stage 0
2241
+ #[ inline]
2242
+ #[ stable( feature = "wrapping" , since = "1.7.0" ) ]
2243
+ #[ cfg( stage0) ]
2173
2244
pub fn overflowing_shl( self , rhs: u32 ) -> ( Self , bool ) {
2174
2245
( self << ( rhs & ( $BITS - 1 ) ) , ( rhs > ( $BITS - 1 ) ) )
2175
2246
}
@@ -2192,6 +2263,16 @@ macro_rules! uint_impl {
2192
2263
/// ```
2193
2264
#[ inline]
2194
2265
#[ stable( feature = "wrapping" , since = "1.7.0" ) ]
2266
+ #[ cfg( not( stage0) ) ]
2267
+ pub fn overflowing_shr( self , rhs: u32 ) -> ( Self , bool ) {
2268
+ ( self . wrapping_shr( rhs) , ( rhs > ( $BITS - 1 ) ) )
2269
+
2270
+ }
2271
+
2272
+ /// Stage 0
2273
+ #[ inline]
2274
+ #[ stable( feature = "wrapping" , since = "1.7.0" ) ]
2275
+ #[ cfg( stage0) ]
2195
2276
pub fn overflowing_shr( self , rhs: u32 ) -> ( Self , bool ) {
2196
2277
( self >> ( rhs & ( $BITS - 1 ) ) , ( rhs > ( $BITS - 1 ) ) )
2197
2278
}
@@ -2292,7 +2373,7 @@ macro_rules! uint_impl {
2292
2373
2293
2374
#[ lang = "u8" ]
2294
2375
impl u8 {
2295
- uint_impl ! { u8 , 8 ,
2376
+ uint_impl ! { u8 , u8 , 8 ,
2296
2377
intrinsics:: ctpop,
2297
2378
intrinsics:: ctlz,
2298
2379
intrinsics:: cttz,
@@ -2304,7 +2385,7 @@ impl u8 {
2304
2385
2305
2386
#[ lang = "u16" ]
2306
2387
impl u16 {
2307
- uint_impl ! { u16 , 16 ,
2388
+ uint_impl ! { u16 , u16 , 16 ,
2308
2389
intrinsics:: ctpop,
2309
2390
intrinsics:: ctlz,
2310
2391
intrinsics:: cttz,
@@ -2316,7 +2397,7 @@ impl u16 {
2316
2397
2317
2398
#[ lang = "u32" ]
2318
2399
impl u32 {
2319
- uint_impl ! { u32 , 32 ,
2400
+ uint_impl ! { u32 , u32 , 32 ,
2320
2401
intrinsics:: ctpop,
2321
2402
intrinsics:: ctlz,
2322
2403
intrinsics:: cttz,
@@ -2328,7 +2409,7 @@ impl u32 {
2328
2409
2329
2410
#[ lang = "u64" ]
2330
2411
impl u64 {
2331
- uint_impl ! { u64 , 64 ,
2412
+ uint_impl ! { u64 , u64 , 64 ,
2332
2413
intrinsics:: ctpop,
2333
2414
intrinsics:: ctlz,
2334
2415
intrinsics:: cttz,
@@ -2340,7 +2421,7 @@ impl u64 {
2340
2421
2341
2422
#[ lang = "u128" ]
2342
2423
impl u128 {
2343
- uint_impl ! { u128 , 128 ,
2424
+ uint_impl ! { u128 , u128 , 128 ,
2344
2425
intrinsics:: ctpop,
2345
2426
intrinsics:: ctlz,
2346
2427
intrinsics:: cttz,
@@ -2353,7 +2434,7 @@ impl u128 {
2353
2434
#[ cfg( target_pointer_width = "16" ) ]
2354
2435
#[ lang = "usize" ]
2355
2436
impl usize {
2356
- uint_impl ! { u16 , 16 ,
2437
+ uint_impl ! { usize , u16 , 16 ,
2357
2438
intrinsics:: ctpop,
2358
2439
intrinsics:: ctlz,
2359
2440
intrinsics:: cttz,
@@ -2365,7 +2446,7 @@ impl usize {
2365
2446
#[ cfg( target_pointer_width = "32" ) ]
2366
2447
#[ lang = "usize" ]
2367
2448
impl usize {
2368
- uint_impl ! { u32 , 32 ,
2449
+ uint_impl ! { usize , u32 , 32 ,
2369
2450
intrinsics:: ctpop,
2370
2451
intrinsics:: ctlz,
2371
2452
intrinsics:: cttz,
@@ -2378,7 +2459,7 @@ impl usize {
2378
2459
#[ cfg( target_pointer_width = "64" ) ]
2379
2460
#[ lang = "usize" ]
2380
2461
impl usize {
2381
- uint_impl ! { u64 , 64 ,
2462
+ uint_impl ! { usize , u64 , 64 ,
2382
2463
intrinsics:: ctpop,
2383
2464
intrinsics:: ctlz,
2384
2465
intrinsics:: cttz,
0 commit comments