Skip to content

Commit 5b9e886

Browse files
committed
Auto merge of #73453 - erikdesjardins:tuplayout, r=eddyb
Ignore ZST offsets when deciding whether to use Scalar/ScalarPair layout This is important because Scalar/ScalarPair layout previously would not be used if any ZST had nonzero offset. For example, before this change, only `((), u128)` would be laid out like `u128`, not `(u128, ())`. Fixes #63244
2 parents b984ef6 + 24e0913 commit 5b9e886

File tree

7 files changed

+228
-74
lines changed

7 files changed

+228
-74
lines changed

compiler/rustc_codegen_ssa/src/mir/place.rs

+27-9
Original file line numberDiff line numberDiff line change
@@ -93,15 +93,33 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
9393
let effective_field_align = self.align.restrict_for_offset(offset);
9494

9595
let mut simple = || {
96-
// Unions and newtypes only use an offset of 0.
97-
let llval = if offset.bytes() == 0 {
98-
self.llval
99-
} else if let Abi::ScalarPair(ref a, ref b) = self.layout.abi {
100-
// Offsets have to match either first or second field.
101-
assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
102-
bx.struct_gep(self.llval, 1)
103-
} else {
104-
bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
96+
let llval = match self.layout.abi {
97+
_ if offset.bytes() == 0 => {
98+
// Unions and newtypes only use an offset of 0.
99+
// Also handles the first field of Scalar, ScalarPair, and Vector layouts.
100+
self.llval
101+
}
102+
Abi::ScalarPair(ref a, ref b)
103+
if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) =>
104+
{
105+
// Offset matches second field.
106+
bx.struct_gep(self.llval, 1)
107+
}
108+
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
109+
// ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
110+
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
111+
bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())])
112+
}
113+
Abi::Scalar(_) | Abi::ScalarPair(..) => {
114+
// All fields of Scalar and ScalarPair layouts must have been handled by this point.
115+
// Vector layouts have additional fields for each element of the vector, so don't panic in that case.
116+
bug!(
117+
"offset of non-ZST field `{:?}` does not match layout `{:#?}`",
118+
field,
119+
self.layout
120+
);
121+
}
122+
_ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)),
105123
};
106124
PlaceRef {
107125
// HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.

compiler/rustc_middle/src/ty/layout.rs

+47-65
Original file line numberDiff line numberDiff line change
@@ -390,78 +390,60 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
390390

391391
// Unpack newtype ABIs and find scalar pairs.
392392
if sized && size.bytes() > 0 {
393-
// All other fields must be ZSTs, and we need them to all start at 0.
394-
let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
395-
if zst_offsets.all(|(_, o)| o.bytes() == 0) {
396-
let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
397-
398-
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
399-
// We have exactly one non-ZST field.
400-
(Some((i, field)), None, None) => {
401-
// Field fills the struct and it has a scalar or scalar pair ABI.
402-
if offsets[i].bytes() == 0
403-
&& align.abi == field.align.abi
404-
&& size == field.size
405-
{
406-
match field.abi {
407-
// For plain scalars, or vectors of them, we can't unpack
408-
// newtypes for `#[repr(C)]`, as that affects C ABIs.
409-
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
410-
abi = field.abi.clone();
411-
}
412-
// But scalar pairs are Rust-specific and get
413-
// treated as aggregates by C ABIs anyway.
414-
Abi::ScalarPair(..) => {
415-
abi = field.abi.clone();
416-
}
417-
_ => {}
393+
// All other fields must be ZSTs.
394+
let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
395+
396+
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
397+
// We have exactly one non-ZST field.
398+
(Some((i, field)), None, None) => {
399+
// Field fills the struct and it has a scalar or scalar pair ABI.
400+
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
401+
{
402+
match field.abi {
403+
// For plain scalars, or vectors of them, we can't unpack
404+
// newtypes for `#[repr(C)]`, as that affects C ABIs.
405+
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
406+
abi = field.abi.clone();
418407
}
408+
// But scalar pairs are Rust-specific and get
409+
// treated as aggregates by C ABIs anyway.
410+
Abi::ScalarPair(..) => {
411+
abi = field.abi.clone();
412+
}
413+
_ => {}
419414
}
420415
}
416+
}
421417

422-
// Two non-ZST fields, and they're both scalars.
423-
(
424-
Some((
425-
i,
426-
&TyAndLayout {
427-
layout: &Layout { abi: Abi::Scalar(ref a), .. }, ..
428-
},
429-
)),
430-
Some((
431-
j,
432-
&TyAndLayout {
433-
layout: &Layout { abi: Abi::Scalar(ref b), .. }, ..
434-
},
435-
)),
436-
None,
437-
) => {
438-
// Order by the memory placement, not source order.
439-
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
440-
((i, a), (j, b))
441-
} else {
442-
((j, b), (i, a))
443-
};
444-
let pair = self.scalar_pair(a.clone(), b.clone());
445-
let pair_offsets = match pair.fields {
446-
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
447-
assert_eq!(memory_index, &[0, 1]);
448-
offsets
449-
}
450-
_ => bug!(),
451-
};
452-
if offsets[i] == pair_offsets[0]
453-
&& offsets[j] == pair_offsets[1]
454-
&& align == pair.align
455-
&& size == pair.size
456-
{
457-
// We can use `ScalarPair` only when it matches our
458-
// already computed layout (including `#[repr(C)]`).
459-
abi = pair.abi;
418+
// Two non-ZST fields, and they're both scalars.
419+
(
420+
Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
421+
Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
422+
None,
423+
) => {
424+
// Order by the memory placement, not source order.
425+
let ((i, a), (j, b)) =
426+
if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
427+
let pair = self.scalar_pair(a.clone(), b.clone());
428+
let pair_offsets = match pair.fields {
429+
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
430+
assert_eq!(memory_index, &[0, 1]);
431+
offsets
460432
}
433+
_ => bug!(),
434+
};
435+
if offsets[i] == pair_offsets[0]
436+
&& offsets[j] == pair_offsets[1]
437+
&& align == pair.align
438+
&& size == pair.size
439+
{
440+
// We can use `ScalarPair` only when it matches our
441+
// already computed layout (including `#[repr(C)]`).
442+
abi = pair.abi;
461443
}
462-
463-
_ => {}
464444
}
445+
446+
_ => {}
465447
}
466448
}
467449

src/test/codegen/tuple-layout-opt.rs

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
// ignore-emscripten
2+
// compile-flags: -C no-prepopulate-passes
3+
4+
// Test that tuples get optimized layout, in particular with a ZST in the last field (#63244)
5+
6+
#![crate_type="lib"]
7+
8+
type ScalarZstLast = (u128, ());
9+
// CHECK: define i128 @test_ScalarZstLast(i128 %_1)
10+
#[no_mangle]
11+
pub fn test_ScalarZstLast(_: ScalarZstLast) -> ScalarZstLast { loop {} }
12+
13+
type ScalarZstFirst = ((), u128);
14+
// CHECK: define i128 @test_ScalarZstFirst(i128 %_1)
15+
#[no_mangle]
16+
pub fn test_ScalarZstFirst(_: ScalarZstFirst) -> ScalarZstFirst { loop {} }
17+
18+
type ScalarPairZstLast = (u8, u128, ());
19+
// CHECK: define { i128, i8 } @test_ScalarPairZstLast(i128 %_1.0, i8 %_1.1)
20+
#[no_mangle]
21+
pub fn test_ScalarPairZstLast(_: ScalarPairZstLast) -> ScalarPairZstLast { loop {} }
22+
23+
type ScalarPairZstFirst = ((), u8, u128);
24+
// CHECK: define { i8, i128 } @test_ScalarPairZstFirst(i8 %_1.0, i128 %_1.1)
25+
#[no_mangle]
26+
pub fn test_ScalarPairZstFirst(_: ScalarPairZstFirst) -> ScalarPairZstFirst { loop {} }
27+
28+
type ScalarPairLotsOfZsts = ((), u8, (), u128, ());
29+
// CHECK: define { i128, i8 } @test_ScalarPairLotsOfZsts(i128 %_1.0, i8 %_1.1)
30+
#[no_mangle]
31+
pub fn test_ScalarPairLotsOfZsts(_: ScalarPairLotsOfZsts) -> ScalarPairLotsOfZsts { loop {} }
32+
33+
type ScalarPairLottaNesting = (((), ((), u8, (), u128, ())), ());
34+
// CHECK: define { i128, i8 } @test_ScalarPairLottaNesting(i128 %_1.0, i8 %_1.1)
35+
#[no_mangle]
36+
pub fn test_ScalarPairLottaNesting(_: ScalarPairLottaNesting) -> ScalarPairLottaNesting { loop {} }

src/test/codegen/zst-offset.rs

+43
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
// compile-flags: -C no-prepopulate-passes
2+
3+
#![crate_type = "lib"]
4+
#![feature(repr_simd)]
5+
6+
// Hack to get the correct size for the length part in slices
7+
// CHECK: @helper([[USIZE:i[0-9]+]] %_1)
8+
#[no_mangle]
9+
pub fn helper(_: usize) {
10+
}
11+
12+
// Check that we correctly generate a GEP for a ZST that is not included in Scalar layout
13+
// CHECK-LABEL: @scalar_layout
14+
#[no_mangle]
15+
pub fn scalar_layout(s: &(u64, ())) {
16+
// CHECK: [[X0:%[0-9]+]] = bitcast i64* %s to i8*
17+
// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 8
18+
let x = &s.1;
19+
&x; // keep variable in an alloca
20+
}
21+
22+
// Check that we correctly generate a GEP for a ZST that is not included in ScalarPair layout
23+
// CHECK-LABEL: @scalarpair_layout
24+
#[no_mangle]
25+
pub fn scalarpair_layout(s: &(u64, u32, ())) {
26+
// CHECK: [[X0:%[0-9]+]] = bitcast { i64, i32 }* %s to i8*
27+
// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 12
28+
let x = &s.2;
29+
&x; // keep variable in an alloca
30+
}
31+
32+
#[repr(simd)]
33+
pub struct U64x4(u64, u64, u64, u64);
34+
35+
// Check that we correctly generate a GEP for a ZST that is not included in Vector layout
36+
// CHECK-LABEL: @vector_layout
37+
#[no_mangle]
38+
pub fn vector_layout(s: &(U64x4, ())) {
39+
// CHECK: [[X0:%[0-9]+]] = bitcast <4 x i64>* %s to i8*
40+
// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 32
41+
let x = &s.1;
42+
&x; // keep variable in an alloca
43+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
// run-pass
2+
3+
#![feature(unsized_tuple_coercion)]
4+
5+
// Ensure that unsizable fields that might be accessed don't get reordered
6+
7+
fn nonzero_size() {
8+
let sized: (u8, [u32; 2]) = (123, [456, 789]);
9+
let unsize: &(u8, [u32]) = &sized;
10+
assert_eq!(unsize.0, 123);
11+
assert_eq!(unsize.1.len(), 2);
12+
assert_eq!(unsize.1[0], 456);
13+
assert_eq!(unsize.1[1], 789);
14+
}
15+
16+
fn zst() {
17+
let sized: (u8, [u32; 0]) = (123, []);
18+
let unsize: &(u8, [u32]) = &sized;
19+
assert_eq!(unsize.0, 123);
20+
assert_eq!(unsize.1.len(), 0);
21+
}
22+
23+
pub fn main() {
24+
nonzero_size();
25+
zst();
26+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
// run-pass
2+
3+
#![feature(unsized_tuple_coercion)]
4+
5+
// Check that we do not change the offsets of ZST fields when unsizing
6+
7+
fn scalar_layout() {
8+
let sized: &(u8, [(); 13]) = &(123, [(); 13]);
9+
let unsize: &(u8, [()]) = sized;
10+
assert_eq!(sized.1.as_ptr(), unsize.1.as_ptr());
11+
}
12+
13+
fn scalarpair_layout() {
14+
let sized: &(u8, u16, [(); 13]) = &(123, 456, [(); 13]);
15+
let unsize: &(u8, u16, [()]) = sized;
16+
assert_eq!(sized.2.as_ptr(), unsize.2.as_ptr());
17+
}
18+
19+
pub fn main() {
20+
scalar_layout();
21+
scalarpair_layout();
22+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
// compile-flags: -Z mir-opt-level=2
2+
// build-pass
3+
#![crate_type="lib"]
4+
5+
// This used to ICE: const-prop did not account for field reordering of scalar pairs,
6+
// and would generate a tuple like `(0x1337, VariantBar): (FooEnum, isize)`,
7+
// causing assertion failures in codegen when trying to read 0x1337 at the wrong type.
8+
9+
pub enum FooEnum {
10+
VariantBar,
11+
VariantBaz,
12+
VariantBuz,
13+
}
14+
15+
pub fn wrong_index() -> isize {
16+
let (_, b) = id((FooEnum::VariantBar, 0x1337));
17+
b
18+
}
19+
20+
pub fn wrong_index_two() -> isize {
21+
let (_, (_, b)) = id(((), (FooEnum::VariantBar, 0x1338)));
22+
b
23+
}
24+
25+
fn id<T>(x: T) -> T {
26+
x
27+
}

0 commit comments

Comments
 (0)