@@ -326,8 +326,8 @@ where
326
326
unsafe {
327
327
// Branchless comparison.
328
328
* end_l = i as u8 ;
329
- end_l = end_l. offset ( !is_less ( & * elem, pivot) as isize ) ;
330
- elem = elem. offset ( 1 ) ;
329
+ end_l = end_l. add ( !is_less ( & * elem, pivot) as usize ) ;
330
+ elem = elem. add ( 1 ) ;
331
331
}
332
332
}
333
333
}
@@ -352,9 +352,9 @@ where
352
352
// Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
353
353
unsafe {
354
354
// Branchless comparison.
355
- elem = elem. offset ( - 1 ) ;
355
+ elem = elem. sub ( 1 ) ;
356
356
* end_r = i as u8 ;
357
- end_r = end_r. offset ( is_less ( & * elem, pivot) as isize ) ;
357
+ end_r = end_r. add ( is_less ( & * elem, pivot) as usize ) ;
358
358
}
359
359
}
360
360
}
@@ -365,12 +365,12 @@ where
365
365
if count > 0 {
366
366
macro_rules! left {
367
367
( ) => {
368
- l. offset ( * start_l as isize )
368
+ l. add ( * start_l as usize )
369
369
} ;
370
370
}
371
371
macro_rules! right {
372
372
( ) => {
373
- r. offset ( - ( * start_r as isize ) - 1 )
373
+ r. sub ( ( * start_r as usize ) + 1 )
374
374
} ;
375
375
}
376
376
@@ -398,16 +398,16 @@ where
398
398
ptr:: copy_nonoverlapping ( right ! ( ) , left ! ( ) , 1 ) ;
399
399
400
400
for _ in 1 ..count {
401
- start_l = start_l. offset ( 1 ) ;
401
+ start_l = start_l. add ( 1 ) ;
402
402
ptr:: copy_nonoverlapping ( left ! ( ) , right ! ( ) , 1 ) ;
403
- start_r = start_r. offset ( 1 ) ;
403
+ start_r = start_r. add ( 1 ) ;
404
404
ptr:: copy_nonoverlapping ( right ! ( ) , left ! ( ) , 1 ) ;
405
405
}
406
406
407
407
ptr:: copy_nonoverlapping ( & tmp, right ! ( ) , 1 ) ;
408
408
mem:: forget ( tmp) ;
409
- start_l = start_l. offset ( 1 ) ;
410
- start_r = start_r. offset ( 1 ) ;
409
+ start_l = start_l. add ( 1 ) ;
410
+ start_r = start_r. add ( 1 ) ;
411
411
}
412
412
}
413
413
@@ -420,15 +420,15 @@ where
420
420
// safe. Otherwise, the debug assertions in the `is_done` case guarantee that
421
421
// `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
422
422
// for the smaller number of remaining elements.
423
- l = unsafe { l. offset ( block_l as isize ) } ;
423
+ l = unsafe { l. add ( block_l) } ;
424
424
}
425
425
426
426
if start_r == end_r {
427
427
// All out-of-order elements in the right block were moved. Move to the previous block.
428
428
429
429
// SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
430
430
// or `block_r` has been adjusted for the last handful of elements.
431
- r = unsafe { r. offset ( - ( block_r as isize ) ) } ;
431
+ r = unsafe { r. sub ( block_r) } ;
432
432
}
433
433
434
434
if is_done {
@@ -457,9 +457,9 @@ where
457
457
// - `offsets_l` contains valid offsets into `v` collected during the partitioning of
458
458
// the last block, so the `l.offset` calls are valid.
459
459
unsafe {
460
- end_l = end_l. offset ( - 1 ) ;
461
- ptr:: swap ( l. offset ( * end_l as isize ) , r. offset ( - 1 ) ) ;
462
- r = r. offset ( - 1 ) ;
460
+ end_l = end_l. sub ( 1 ) ;
461
+ ptr:: swap ( l. add ( * end_l as usize ) , r. sub ( 1 ) ) ;
462
+ r = r. sub ( 1 ) ;
463
463
}
464
464
}
465
465
width ( v. as_mut_ptr ( ) , r)
@@ -470,9 +470,9 @@ where
470
470
while start_r < end_r {
471
471
// SAFETY: See the reasoning in [remaining-elements-safety].
472
472
unsafe {
473
- end_r = end_r. offset ( - 1 ) ;
474
- ptr:: swap ( l, r. offset ( - ( * end_r as isize ) - 1 ) ) ;
475
- l = l. offset ( 1 ) ;
473
+ end_r = end_r. sub ( 1 ) ;
474
+ ptr:: swap ( l, r. sub ( ( * end_r as usize ) + 1 ) ) ;
475
+ l = l. add ( 1 ) ;
476
476
}
477
477
}
478
478
width ( v. as_mut_ptr ( ) , l)
0 commit comments