Skip to content
This repository was archived by the owner on May 28, 2025. It is now read-only.

Commit c2a4946

Browse files
committed
use wrapping_* in some places where rustc-perf complained
1 parent e94ce10 commit c2a4946

File tree

4 files changed

+27
-26
lines changed

4 files changed

+27
-26
lines changed

compiler/rustc_data_structures/src/sip128.rs

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -102,20 +102,20 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
102102
return;
103103
}
104104

105-
let mut i = 0;
106-
if i + 3 < count {
105+
let mut i = 0_usize;
106+
if i.wrapping_add(3) < count {
107107
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
108-
i += 4;
108+
i = i.wrapping_add(4);
109109
}
110110

111111
if i + 1 < count {
112112
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
113-
i += 2
113+
i = i.wrapping_add(2);
114114
}
115115

116116
if i < count {
117117
*dst.add(i) = *src.add(i);
118-
i += 1;
118+
i = i.wrapping_add(1);
119119
}
120120

121121
debug_assert_eq!(i, count);
@@ -211,14 +211,14 @@ impl SipHasher128 {
211211
debug_assert!(nbuf < BUFFER_SIZE);
212212
debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
213213

214-
if nbuf + LEN < BUFFER_SIZE {
214+
if nbuf.wrapping_add(LEN) < BUFFER_SIZE {
215215
unsafe {
216216
// The memcpy call is optimized away because the size is known.
217217
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
218218
ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
219219
}
220220

221-
self.nbuf = nbuf + LEN;
221+
self.nbuf = nbuf.wrapping_add(LEN);
222222

223223
return;
224224
}
@@ -265,8 +265,8 @@ impl SipHasher128 {
265265
// This function should only be called when the write fills the buffer.
266266
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
267267
// LEN is statically known, so the branch is optimized away.
268-
self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
269-
self.processed += BUFFER_SIZE;
268+
self.nbuf = if LEN == 1 { 0 } else { nbuf.wrapping_add(LEN).wrapping_sub(BUFFER_SIZE) };
269+
self.processed = self.processed.wrapping_add(BUFFER_SIZE);
270270
}
271271
}
272272

@@ -277,7 +277,7 @@ impl SipHasher128 {
277277
let nbuf = self.nbuf;
278278
debug_assert!(nbuf < BUFFER_SIZE);
279279

280-
if nbuf + length < BUFFER_SIZE {
280+
if nbuf.wrapping_add(length) < BUFFER_SIZE {
281281
unsafe {
282282
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
283283

@@ -289,7 +289,7 @@ impl SipHasher128 {
289289
}
290290
}
291291

292-
self.nbuf = nbuf + length;
292+
self.nbuf = nbuf.wrapping_add(length);
293293

294294
return;
295295
}
@@ -327,7 +327,7 @@ impl SipHasher128 {
327327
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
328328
// We know that is true, because last step ensured we have a full
329329
// element in the buffer.
330-
let last = nbuf / ELEM_SIZE + 1;
330+
let last = (nbuf / ELEM_SIZE).wrapping_add(1);
331331

332332
for i in 0..last {
333333
let elem = self.buf.get_unchecked(i).assume_init().to_le();
@@ -338,7 +338,7 @@ impl SipHasher128 {
338338

339339
// Process the remaining element-sized chunks of input.
340340
let mut processed = needed_in_elem;
341-
let input_left = length - processed;
341+
let input_left = length.wrapping_sub(processed);
342342
let elems_left = input_left / ELEM_SIZE;
343343
let extra_bytes_left = input_left % ELEM_SIZE;
344344

@@ -347,7 +347,7 @@ impl SipHasher128 {
347347
self.state.v3 ^= elem;
348348
Sip13Rounds::c_rounds(&mut self.state);
349349
self.state.v0 ^= elem;
350-
processed += ELEM_SIZE;
350+
processed = processed.wrapping_add(ELEM_SIZE);
351351
}
352352

353353
// Copy remaining input into start of buffer.
@@ -356,7 +356,7 @@ impl SipHasher128 {
356356
copy_nonoverlapping_small(src, dst, extra_bytes_left);
357357

358358
self.nbuf = extra_bytes_left;
359-
self.processed += nbuf + processed;
359+
self.processed = self.processed.wrapping_add(nbuf).wrapping_add(processed);
360360
}
361361
}
362362

@@ -394,7 +394,7 @@ impl SipHasher128 {
394394
};
395395

396396
// Finalize the hash.
397-
let length = self.processed + self.nbuf;
397+
let length = self.processed.wrapping_add(self.nbuf);
398398
let b: u64 = ((length as u64 & 0xff) << 56) | elem;
399399

400400
state.v3 ^= b;

compiler/rustc_span/src/caching_source_map_view.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ impl CacheEntry {
4040
let pos = self.file.relative_position(pos);
4141
let line_index = self.file.lookup_line(pos).unwrap();
4242
let line_bounds = self.file.line_bounds(line_index);
43-
self.line_number = line_index + 1;
43+
self.line_number = line_index.wrapping_add(1);
4444
self.line = line_bounds;
4545
self.touch(time_stamp);
4646
}
@@ -81,15 +81,15 @@ impl<'sm> CachingSourceMapView<'sm> {
8181
&mut self,
8282
pos: BytePos,
8383
) -> Option<(Lrc<SourceFile>, usize, RelativeBytePos)> {
84-
self.time_stamp += 1;
84+
self.time_stamp = self.time_stamp.wrapping_add(1);
8585

8686
// Check if the position is in one of the cached lines
8787
let cache_idx = self.cache_entry_index(pos);
8888
if cache_idx != -1 {
8989
let cache_entry = &mut self.line_cache[cache_idx as usize];
9090
cache_entry.touch(self.time_stamp);
9191

92-
let col = RelativeBytePos(pos.to_u32() - cache_entry.line.start.to_u32());
92+
let col = RelativeBytePos(pos.to_u32().wrapping_sub(cache_entry.line.start.to_u32()));
9393
return Some((cache_entry.file.clone(), cache_entry.line_number, col));
9494
}
9595

compiler/rustc_span/src/lib.rs

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1746,7 +1746,7 @@ impl SourceFile {
17461746

17471747
#[inline]
17481748
pub fn relative_position(&self, pos: BytePos) -> RelativeBytePos {
1749-
RelativeBytePos::from_u32(pos.to_u32() - self.start_pos.to_u32())
1749+
RelativeBytePos::from_u32(pos.to_u32().wrapping_sub(self.start_pos.to_u32()))
17501750
}
17511751

17521752
#[inline]
@@ -1769,10 +1769,11 @@ impl SourceFile {
17691769

17701770
let lines = self.lines();
17711771
assert!(line_index < lines.len());
1772-
if line_index == (lines.len() - 1) {
1772+
if line_index == (lines.len().wrapping_sub(1)) {
17731773
self.absolute_position(lines[line_index])..self.end_position()
17741774
} else {
1775-
self.absolute_position(lines[line_index])..self.absolute_position(lines[line_index + 1])
1775+
self.absolute_position(lines[line_index])
1776+
..self.absolute_position(lines[line_index.wrapping_add(1)])
17761777
}
17771778
}
17781779

@@ -2039,7 +2040,7 @@ macro_rules! impl_pos {
20392040

20402041
#[inline(always)]
20412042
fn add(self, rhs: $ident) -> $ident {
2042-
$ident(self.0 + rhs.0)
2043+
$ident(self.0.wrapping_add(rhs.0))
20432044
}
20442045
}
20452046

@@ -2048,7 +2049,7 @@ macro_rules! impl_pos {
20482049

20492050
#[inline(always)]
20502051
fn sub(self, rhs: $ident) -> $ident {
2051-
$ident(self.0 - rhs.0)
2052+
$ident(self.0.wrapping_sub(rhs.0))
20522053
}
20532054
}
20542055
)*

compiler/rustc_span/src/span_encoding.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ impl Span {
166166
debug_assert!(len <= MAX_LEN);
167167
SpanData {
168168
lo: BytePos(self.lo_or_index),
169-
hi: BytePos(self.lo_or_index + len),
169+
hi: BytePos(self.lo_or_index.wrapping_add(len)),
170170
ctxt: SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32),
171171
parent: None,
172172
}
@@ -179,7 +179,7 @@ impl Span {
179179
};
180180
SpanData {
181181
lo: BytePos(self.lo_or_index),
182-
hi: BytePos(self.lo_or_index + len),
182+
hi: BytePos(self.lo_or_index.wrapping_add(len)),
183183
ctxt: SyntaxContext::root(),
184184
parent: Some(parent),
185185
}

0 commit comments

Comments
 (0)