@@ -102,20 +102,20 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
102
102
return ;
103
103
}
104
104
105
- let mut i = 0 ;
106
- if i + 3 < count {
105
+ let mut i = 0_usize ;
106
+ if i. wrapping_add ( 3 ) < count {
107
107
ptr:: copy_nonoverlapping ( src. add ( i) , dst. add ( i) , 4 ) ;
108
- i += 4 ;
108
+ i = i . wrapping_add ( 4 ) ;
109
109
}
110
110
111
111
if i + 1 < count {
112
112
ptr:: copy_nonoverlapping ( src. add ( i) , dst. add ( i) , 2 ) ;
113
- i += 2
113
+ i = i . wrapping_add ( 2 ) ;
114
114
}
115
115
116
116
if i < count {
117
117
* dst. add ( i) = * src. add ( i) ;
118
- i += 1 ;
118
+ i = i . wrapping_add ( 1 ) ;
119
119
}
120
120
121
121
debug_assert_eq ! ( i, count) ;
@@ -211,14 +211,14 @@ impl SipHasher128 {
211
211
debug_assert ! ( nbuf < BUFFER_SIZE ) ;
212
212
debug_assert ! ( nbuf + LEN < BUFFER_WITH_SPILL_SIZE ) ;
213
213
214
- if nbuf + LEN < BUFFER_SIZE {
214
+ if nbuf. wrapping_add ( LEN ) < BUFFER_SIZE {
215
215
unsafe {
216
216
// The memcpy call is optimized away because the size is known.
217
217
let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
218
218
ptr:: copy_nonoverlapping ( bytes. as_ptr ( ) , dst, LEN ) ;
219
219
}
220
220
221
- self . nbuf = nbuf + LEN ;
221
+ self . nbuf = nbuf. wrapping_add ( LEN ) ;
222
222
223
223
return ;
224
224
}
@@ -265,8 +265,8 @@ impl SipHasher128 {
265
265
// This function should only be called when the write fills the buffer.
266
266
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
267
267
// LEN is statically known, so the branch is optimized away.
268
- self . nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE } ;
269
- self . processed += BUFFER_SIZE ;
268
+ self . nbuf = if LEN == 1 { 0 } else { nbuf. wrapping_add ( LEN ) . wrapping_sub ( BUFFER_SIZE ) } ;
269
+ self . processed = self . processed . wrapping_add ( BUFFER_SIZE ) ;
270
270
}
271
271
}
272
272
@@ -277,7 +277,7 @@ impl SipHasher128 {
277
277
let nbuf = self . nbuf ;
278
278
debug_assert ! ( nbuf < BUFFER_SIZE ) ;
279
279
280
- if nbuf + length < BUFFER_SIZE {
280
+ if nbuf. wrapping_add ( length) < BUFFER_SIZE {
281
281
unsafe {
282
282
let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
283
283
@@ -289,7 +289,7 @@ impl SipHasher128 {
289
289
}
290
290
}
291
291
292
- self . nbuf = nbuf + length;
292
+ self . nbuf = nbuf. wrapping_add ( length) ;
293
293
294
294
return ;
295
295
}
@@ -327,7 +327,7 @@ impl SipHasher128 {
327
327
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
328
328
// We know that is true, because last step ensured we have a full
329
329
// element in the buffer.
330
- let last = nbuf / ELEM_SIZE + 1 ;
330
+ let last = ( nbuf / ELEM_SIZE ) . wrapping_add ( 1 ) ;
331
331
332
332
for i in 0 ..last {
333
333
let elem = self . buf . get_unchecked ( i) . assume_init ( ) . to_le ( ) ;
@@ -338,7 +338,7 @@ impl SipHasher128 {
338
338
339
339
// Process the remaining element-sized chunks of input.
340
340
let mut processed = needed_in_elem;
341
- let input_left = length - processed;
341
+ let input_left = length. wrapping_sub ( processed) ;
342
342
let elems_left = input_left / ELEM_SIZE ;
343
343
let extra_bytes_left = input_left % ELEM_SIZE ;
344
344
@@ -347,7 +347,7 @@ impl SipHasher128 {
347
347
self . state . v3 ^= elem;
348
348
Sip13Rounds :: c_rounds ( & mut self . state ) ;
349
349
self . state . v0 ^= elem;
350
- processed += ELEM_SIZE ;
350
+ processed = processed . wrapping_add ( ELEM_SIZE ) ;
351
351
}
352
352
353
353
// Copy remaining input into start of buffer.
@@ -356,7 +356,7 @@ impl SipHasher128 {
356
356
copy_nonoverlapping_small ( src, dst, extra_bytes_left) ;
357
357
358
358
self . nbuf = extra_bytes_left;
359
- self . processed += nbuf + processed;
359
+ self . processed = self . processed . wrapping_add ( nbuf) . wrapping_add ( processed) ;
360
360
}
361
361
}
362
362
@@ -394,7 +394,7 @@ impl SipHasher128 {
394
394
} ;
395
395
396
396
// Finalize the hash.
397
- let length = self . processed + self . nbuf ;
397
+ let length = self . processed . wrapping_add ( self . nbuf ) ;
398
398
let b: u64 = ( ( length as u64 & 0xff ) << 56 ) | elem;
399
399
400
400
state. v3 ^= b;
0 commit comments