diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 7432768be4a58..4bc578c7985a8 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -1,4 +1,3 @@ -use std::borrow::{Borrow, Cow}; use std::fmt::{self, Write}; use std::ops::{Bound, Deref}; use std::{cmp, iter}; @@ -7,8 +6,8 @@ use rustc_index::Idx; use tracing::debug; use crate::{ - Abi, AbiAndPrefAlign, Align, FieldsShape, IndexSlice, IndexVec, Integer, LayoutS, Niche, - NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding, TargetDataLayout, + Abi, AbiAndPrefAlign, Align, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer, + LayoutS, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding, Variants, WrappingRange, }; @@ -30,19 +29,46 @@ where uninhabited && is_1zst } -pub trait LayoutCalculator { - type TargetDataLayoutRef: Borrow; +/// Determines towards which end of a struct layout optimizations will try to place the best niches. +enum NicheBias { + Start, + End, +} + +#[derive(Copy, Clone, Debug)] +pub enum LayoutCalculatorError { + /// An unsized type was found in a location where a sized type was expected. + /// + /// This is not always a compile error, for example if there is a `[T]: Sized` + /// bound in a where clause. + UnexpectedUnsized, + + /// A type was too large for the target platform. + SizeOverflow, + + /// A union had no fields. + EmptyUnion, +} + +type LayoutCalculatorResult = + Result, LayoutCalculatorError>; + +#[derive(Clone, Copy, Debug)] +pub struct LayoutCalculator { + pub cx: Cx, +} - fn delayed_bug(&self, txt: impl Into>); - fn current_data_layout(&self) -> Self::TargetDataLayoutRef; +impl LayoutCalculator { + pub fn new(cx: Cx) -> Self { + Self { cx } + } - fn scalar_pair( + pub fn scalar_pair( &self, a: Scalar, b: Scalar, ) -> LayoutS { - let dl = self.current_data_layout(); - let dl = dl.borrow(); + let dl = self.cx.data_layout(); let b_align = b.align(dl); let align = a.align(dl).max(b_align).max(dl.aggregate_align); let b_offset = a.size(dl).align_to(b_align.abi); @@ -70,25 +96,25 @@ pub trait LayoutCalculator { } } - fn univariant< + pub fn univariant< 'a, FieldIdx: Idx, VariantIdx: Idx, F: Deref> + fmt::Debug, >( &self, - dl: &TargetDataLayout, fields: &IndexSlice, repr: &ReprOptions, kind: StructKind, - ) -> Option> { - let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start); + ) -> LayoutCalculatorResult { + let dl = self.cx.data_layout(); + let layout = self.univariant_biased(fields, repr, kind, NicheBias::Start); // Enums prefer niches close to the beginning or the end of the variants so that other // (smaller) data-carrying variants can be packed into the space after/before the niche. // If the default field ordering does not give us a niche at the front then we do a second // run and bias niches to the right and then check which one is closer to one of the // struct's edges. - if let Some(layout) = &layout { + if let Ok(layout) = &layout { // Don't try to calculate an end-biased layout for unsizable structs, // otherwise we could end up with different layouts for // Foo and Foo which would break unsizing. @@ -102,7 +128,8 @@ pub trait LayoutCalculator { // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial // to get the unpadded size so we try anyway. if fields.len() > 1 && head_space != 0 && tail_space > 0 { - let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End) + let alt_layout = self + .univariant_biased(fields, repr, kind, NicheBias::End) .expect("alt layout should always work"); let alt_niche = alt_layout .largest_niche @@ -130,12 +157,12 @@ pub trait LayoutCalculator { alt_tail_space, layout.fields.count(), prefer_alt_layout, - format_field_niches(layout, fields, dl), - format_field_niches(&alt_layout, fields, dl), + self.format_field_niches(layout, fields), + self.format_field_niches(&alt_layout, fields), ); if prefer_alt_layout { - return Some(alt_layout); + return Ok(alt_layout); } } } @@ -144,11 +171,10 @@ pub trait LayoutCalculator { layout } - fn layout_of_never_type( + pub fn layout_of_never_type( &self, ) -> LayoutS { - let dl = self.current_data_layout(); - let dl = dl.borrow(); + let dl = self.cx.data_layout(); LayoutS { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Primitive, @@ -161,7 +187,7 @@ pub trait LayoutCalculator { } } - fn layout_of_struct_or_enum< + pub fn layout_of_struct_or_enum< 'a, FieldIdx: Idx, VariantIdx: Idx, @@ -177,10 +203,7 @@ pub trait LayoutCalculator { discriminants: impl Iterator, dont_niche_optimize_enum: bool, always_sized: bool, - ) -> Option> { - let dl = self.current_data_layout(); - let dl = dl.borrow(); - + ) -> LayoutCalculatorResult { let (present_first, present_second) = { let mut present_variants = variants .iter_enumerated() @@ -191,7 +214,7 @@ pub trait LayoutCalculator { Some(present_first) => present_first, // Uninhabited because it has no variants, or only absent ones. None if is_enum => { - return Some(self.layout_of_never_type()); + return Ok(self.layout_of_never_type()); } // If it's a struct, still compute a layout so that we can still compute the // field offsets. @@ -203,15 +226,13 @@ pub trait LayoutCalculator { // or for optimizing univariant enums (present_second.is_none() && !repr.inhibit_enum_layout_opt()) { - layout_of_struct( - self, + self.layout_of_struct( repr, variants, is_enum, is_unsafe_cell, scalar_valid_range, always_sized, - dl, present_first, ) } else { @@ -219,19 +240,17 @@ pub trait LayoutCalculator { // structs. (We have also handled univariant enums // that allow representation optimization.) assert!(is_enum); - layout_of_enum( - self, + self.layout_of_enum( repr, variants, discr_range_of_repr, discriminants, dont_niche_optimize_enum, - dl, ) } } - fn layout_of_union< + pub fn layout_of_union< 'a, FieldIdx: Idx, VariantIdx: Idx, @@ -240,9 +259,8 @@ pub trait LayoutCalculator { &self, repr: &ReprOptions, variants: &IndexSlice>, - ) -> Option> { - let dl = self.current_data_layout(); - let dl = dl.borrow(); + ) -> LayoutCalculatorResult { + let dl = self.cx.data_layout(); let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align }; let mut max_repr_align = repr.align; @@ -257,10 +275,11 @@ pub trait LayoutCalculator { }; let mut size = Size::ZERO; - let only_variant = &variants[VariantIdx::new(0)]; + let only_variant_idx = VariantIdx::new(0); + let only_variant = &variants[only_variant_idx]; for field in only_variant { if field.is_unsized() { - self.delayed_bug("unsized field in union".to_string()); + return Err(LayoutCalculatorError::UnexpectedUnsized); } align = align.max(field.align); @@ -323,9 +342,13 @@ pub trait LayoutCalculator { } }; - Some(LayoutS { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldsShape::Union(NonZeroUsize::new(only_variant.len())?), + let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else { + return Err(LayoutCalculatorError::EmptyUnion); + }; + + Ok(LayoutS { + variants: Variants::Single { index: only_variant_idx }, + fields: FieldsShape::Union(union_field_count), abi, largest_niche: None, align, @@ -334,986 +357,984 @@ pub trait LayoutCalculator { unadjusted_abi_align, }) } -} -/// single-variant enums are just structs, if you think about it -fn layout_of_struct<'a, LC, FieldIdx: Idx, VariantIdx: Idx, F>( - layout_calc: &LC, - repr: &ReprOptions, - variants: &IndexSlice>, - is_enum: bool, - is_unsafe_cell: bool, - scalar_valid_range: (Bound, Bound), - always_sized: bool, - dl: &TargetDataLayout, - present_first: VariantIdx, -) -> Option> -where - LC: LayoutCalculator + ?Sized, - F: Deref> + fmt::Debug, -{ - // Struct, or univariant enum equivalent to a struct. - // (Typechecking will reject discriminant-sizing attrs.) - - let v = present_first; - let kind = if is_enum || variants[v].is_empty() || always_sized { - StructKind::AlwaysSized - } else { - StructKind::MaybeUnsized - }; - - let mut st = layout_calc.univariant(dl, &variants[v], repr, kind)?; - st.variants = Variants::Single { index: v }; - - if is_unsafe_cell { - let hide_niches = |scalar: &mut _| match scalar { - Scalar::Initialized { value, valid_range } => { - *valid_range = WrappingRange::full(value.size(dl)) - } - // Already doesn't have any niches - Scalar::Union { .. } => {} + /// single-variant enums are just structs, if you think about it + fn layout_of_struct<'a, FieldIdx: Idx, VariantIdx: Idx, F>( + &self, + repr: &ReprOptions, + variants: &IndexSlice>, + is_enum: bool, + is_unsafe_cell: bool, + scalar_valid_range: (Bound, Bound), + always_sized: bool, + present_first: VariantIdx, + ) -> LayoutCalculatorResult + where + F: Deref> + fmt::Debug, + { + // Struct, or univariant enum equivalent to a struct. + // (Typechecking will reject discriminant-sizing attrs.) + + let dl = self.cx.data_layout(); + let v = present_first; + let kind = if is_enum || variants[v].is_empty() || always_sized { + StructKind::AlwaysSized + } else { + StructKind::MaybeUnsized }; - match &mut st.abi { - Abi::Uninhabited => {} - Abi::Scalar(scalar) => hide_niches(scalar), - Abi::ScalarPair(a, b) => { - hide_niches(a); - hide_niches(b); + + let mut st = self.univariant(&variants[v], repr, kind)?; + st.variants = Variants::Single { index: v }; + + if is_unsafe_cell { + let hide_niches = |scalar: &mut _| match scalar { + Scalar::Initialized { value, valid_range } => { + *valid_range = WrappingRange::full(value.size(dl)) + } + // Already doesn't have any niches + Scalar::Union { .. } => {} + }; + match &mut st.abi { + Abi::Uninhabited => {} + Abi::Scalar(scalar) => hide_niches(scalar), + Abi::ScalarPair(a, b) => { + hide_niches(a); + hide_niches(b); + } + Abi::Vector { element, count: _ } => hide_niches(element), + Abi::Aggregate { sized: _ } => {} } - Abi::Vector { element, count: _ } => hide_niches(element), - Abi::Aggregate { sized: _ } => {} + st.largest_niche = None; + return Ok(st); } - st.largest_niche = None; - return Some(st); - } - let (start, end) = scalar_valid_range; - match st.abi { - Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { - // Enlarging validity ranges would result in missed - // optimizations, *not* wrongly assuming the inner - // value is valid. e.g. unions already enlarge validity ranges, - // because the values may be uninitialized. - // - // Because of that we only check that the start and end - // of the range is representable with this scalar type. - - let max_value = scalar.size(dl).unsigned_int_max(); - if let Bound::Included(start) = start { - // FIXME(eddyb) this might be incorrect - it doesn't - // account for wrap-around (end < start) ranges. - assert!(start <= max_value, "{start} > {max_value}"); - scalar.valid_range_mut().start = start; - } - if let Bound::Included(end) = end { - // FIXME(eddyb) this might be incorrect - it doesn't - // account for wrap-around (end < start) ranges. - assert!(end <= max_value, "{end} > {max_value}"); - scalar.valid_range_mut().end = end; - } + let (start, end) = scalar_valid_range; + match st.abi { + Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { + // Enlarging validity ranges would result in missed + // optimizations, *not* wrongly assuming the inner + // value is valid. e.g. unions already enlarge validity ranges, + // because the values may be uninitialized. + // + // Because of that we only check that the start and end + // of the range is representable with this scalar type. + + let max_value = scalar.size(dl).unsigned_int_max(); + if let Bound::Included(start) = start { + // FIXME(eddyb) this might be incorrect - it doesn't + // account for wrap-around (end < start) ranges. + assert!(start <= max_value, "{start} > {max_value}"); + scalar.valid_range_mut().start = start; + } + if let Bound::Included(end) = end { + // FIXME(eddyb) this might be incorrect - it doesn't + // account for wrap-around (end < start) ranges. + assert!(end <= max_value, "{end} > {max_value}"); + scalar.valid_range_mut().end = end; + } - // Update `largest_niche` if we have introduced a larger niche. - let niche = Niche::from_scalar(dl, Size::ZERO, *scalar); - if let Some(niche) = niche { - match st.largest_niche { - Some(largest_niche) => { - // Replace the existing niche even if they're equal, - // because this one is at a lower offset. - if largest_niche.available(dl) <= niche.available(dl) { - st.largest_niche = Some(niche); + // Update `largest_niche` if we have introduced a larger niche. + let niche = Niche::from_scalar(dl, Size::ZERO, *scalar); + if let Some(niche) = niche { + match st.largest_niche { + Some(largest_niche) => { + // Replace the existing niche even if they're equal, + // because this one is at a lower offset. + if largest_niche.available(dl) <= niche.available(dl) { + st.largest_niche = Some(niche); + } } + None => st.largest_niche = Some(niche), } - None => st.largest_niche = Some(niche), } } + _ => assert!( + start == Bound::Unbounded && end == Bound::Unbounded, + "nonscalar layout for layout_scalar_valid_range type: {st:#?}", + ), } - _ => assert!( - start == Bound::Unbounded && end == Bound::Unbounded, - "nonscalar layout for layout_scalar_valid_range type: {st:#?}", - ), - } - Some(st) -} - -fn layout_of_enum<'a, LC, FieldIdx: Idx, VariantIdx: Idx, F>( - layout_calc: &LC, - repr: &ReprOptions, - variants: &IndexSlice>, - discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool), - discriminants: impl Iterator, - dont_niche_optimize_enum: bool, - dl: &TargetDataLayout, -) -> Option> -where - LC: LayoutCalculator + ?Sized, - F: Deref> + fmt::Debug, -{ - // Until we've decided whether to use the tagged or - // niche filling LayoutS, we don't want to intern the - // variant layouts, so we can't store them in the - // overall LayoutS. Store the overall LayoutS - // and the variant LayoutSs here until then. - struct TmpLayout { - layout: LayoutS, - variants: IndexVec>, + Ok(st) } - let calculate_niche_filling_layout = || -> Option> { - if dont_niche_optimize_enum { - return None; + fn layout_of_enum<'a, FieldIdx: Idx, VariantIdx: Idx, F>( + &self, + repr: &ReprOptions, + variants: &IndexSlice>, + discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool), + discriminants: impl Iterator, + dont_niche_optimize_enum: bool, + ) -> LayoutCalculatorResult + where + F: Deref> + fmt::Debug, + { + // Until we've decided whether to use the tagged or + // niche filling LayoutS, we don't want to intern the + // variant layouts, so we can't store them in the + // overall LayoutS. Store the overall LayoutS + // and the variant LayoutSs here until then. + struct TmpLayout { + layout: LayoutS, + variants: IndexVec>, } - if variants.len() < 2 { - return None; - } + let dl = self.cx.data_layout(); - let mut align = dl.aggregate_align; - let mut max_repr_align = repr.align; - let mut unadjusted_abi_align = align.abi; + let calculate_niche_filling_layout = || -> Option> { + if dont_niche_optimize_enum { + return None; + } - let mut variant_layouts = variants - .iter_enumerated() - .map(|(j, v)| { - let mut st = layout_calc.univariant(dl, v, repr, StructKind::AlwaysSized)?; - st.variants = Variants::Single { index: j }; + if variants.len() < 2 { + return None; + } - align = align.max(st.align); - max_repr_align = max_repr_align.max(st.max_repr_align); - unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align); + let mut align = dl.aggregate_align; + let mut max_repr_align = repr.align; + let mut unadjusted_abi_align = align.abi; - Some(st) - }) - .collect::>>()?; + let mut variant_layouts = variants + .iter_enumerated() + .map(|(j, v)| { + let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?; + st.variants = Variants::Single { index: j }; - let largest_variant_index = variant_layouts - .iter_enumerated() - .max_by_key(|(_i, layout)| layout.size.bytes()) - .map(|(i, _layout)| i)?; - - let all_indices = variants.indices(); - let needs_disc = - |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]); - let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap() - ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap(); - - let count = - (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1; - - // Find the field with the largest niche - let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index] - .iter() - .enumerate() - .filter_map(|(j, field)| Some((j, field.largest_niche?))) - .max_by_key(|(_, niche)| niche.available(dl)) - .and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?; - let niche_offset = - niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index); - let niche_size = niche.value.size(dl); - let size = variant_layouts[largest_variant_index].size.align_to(align.abi); - - let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| { - if i == largest_variant_index { - return true; - } + align = align.max(st.align); + max_repr_align = max_repr_align.max(st.max_repr_align); + unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align); - layout.largest_niche = None; + Some(st) + }) + .collect::>>()?; - if layout.size <= niche_offset { - // This variant will fit before the niche. - return true; - } + let largest_variant_index = variant_layouts + .iter_enumerated() + .max_by_key(|(_i, layout)| layout.size.bytes()) + .map(|(i, _layout)| i)?; - // Determine if it'll fit after the niche. - let this_align = layout.align.abi; - let this_offset = (niche_offset + niche_size).align_to(this_align); + let all_indices = variants.indices(); + let needs_disc = + |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]); + let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap() + ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap(); - if this_offset + layout.size > size { - return false; - } + let count = + (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1; - // It'll fit, but we need to make some adjustments. - match layout.fields { - FieldsShape::Arbitrary { ref mut offsets, .. } => { - for offset in offsets.iter_mut() { - *offset += this_offset; - } - } - FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => { - panic!("Layout of fields should be Arbitrary for variants") + // Find the field with the largest niche + let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index] + .iter() + .enumerate() + .filter_map(|(j, field)| Some((j, field.largest_niche?))) + .max_by_key(|(_, niche)| niche.available(dl)) + .and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?; + let niche_offset = + niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index); + let niche_size = niche.value.size(dl); + let size = variant_layouts[largest_variant_index].size.align_to(align.abi); + + let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| { + if i == largest_variant_index { + return true; } - } - // It can't be a Scalar or ScalarPair because the offset isn't 0. - if !layout.abi.is_uninhabited() { - layout.abi = Abi::Aggregate { sized: true }; - } - layout.size += this_offset; + layout.largest_niche = None; - true - }); + if layout.size <= niche_offset { + // This variant will fit before the niche. + return true; + } - if !all_variants_fit { - return None; - } + // Determine if it'll fit after the niche. + let this_align = layout.align.abi; + let this_offset = (niche_offset + niche_size).align_to(this_align); - let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar); + if this_offset + layout.size > size { + return false; + } - let others_zst = variant_layouts - .iter_enumerated() - .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO); - let same_size = size == variant_layouts[largest_variant_index].size; - let same_align = align == variant_layouts[largest_variant_index].align; - - let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) { - Abi::Uninhabited - } else if same_size && same_align && others_zst { - match variant_layouts[largest_variant_index].abi { - // When the total alignment and size match, we can use the - // same ABI as the scalar variant with the reserved niche. - Abi::Scalar(_) => Abi::Scalar(niche_scalar), - Abi::ScalarPair(first, second) => { - // Only the niche is guaranteed to be initialised, - // so use union layouts for the other primitive. - if niche_offset == Size::ZERO { - Abi::ScalarPair(niche_scalar, second.to_union()) - } else { - Abi::ScalarPair(first.to_union(), niche_scalar) + // It'll fit, but we need to make some adjustments. + match layout.fields { + FieldsShape::Arbitrary { ref mut offsets, .. } => { + for offset in offsets.iter_mut() { + *offset += this_offset; + } } + FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => { + panic!("Layout of fields should be Arbitrary for variants") + } + } + + // It can't be a Scalar or ScalarPair because the offset isn't 0. + if !layout.abi.is_uninhabited() { + layout.abi = Abi::Aggregate { sized: true }; } - _ => Abi::Aggregate { sized: true }, + layout.size += this_offset; + + true + }); + + if !all_variants_fit { + return None; } - } else { - Abi::Aggregate { sized: true } - }; - let layout = LayoutS { - variants: Variants::Multiple { - tag: niche_scalar, - tag_encoding: TagEncoding::Niche { - untagged_variant: largest_variant_index, - niche_variants, - niche_start, + let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar); + + let others_zst = variant_layouts + .iter_enumerated() + .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO); + let same_size = size == variant_layouts[largest_variant_index].size; + let same_align = align == variant_layouts[largest_variant_index].align; + + let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) { + Abi::Uninhabited + } else if same_size && same_align && others_zst { + match variant_layouts[largest_variant_index].abi { + // When the total alignment and size match, we can use the + // same ABI as the scalar variant with the reserved niche. + Abi::Scalar(_) => Abi::Scalar(niche_scalar), + Abi::ScalarPair(first, second) => { + // Only the niche is guaranteed to be initialised, + // so use union layouts for the other primitive. + if niche_offset == Size::ZERO { + Abi::ScalarPair(niche_scalar, second.to_union()) + } else { + Abi::ScalarPair(first.to_union(), niche_scalar) + } + } + _ => Abi::Aggregate { sized: true }, + } + } else { + Abi::Aggregate { sized: true } + }; + + let layout = LayoutS { + variants: Variants::Multiple { + tag: niche_scalar, + tag_encoding: TagEncoding::Niche { + untagged_variant: largest_variant_index, + niche_variants, + niche_start, + }, + tag_field: 0, + variants: IndexVec::new(), }, - tag_field: 0, - variants: IndexVec::new(), - }, - fields: FieldsShape::Arbitrary { - offsets: [niche_offset].into(), - memory_index: [0].into(), - }, - abi, - largest_niche, - size, - align, - max_repr_align, - unadjusted_abi_align, - }; + fields: FieldsShape::Arbitrary { + offsets: [niche_offset].into(), + memory_index: [0].into(), + }, + abi, + largest_niche, + size, + align, + max_repr_align, + unadjusted_abi_align, + }; - Some(TmpLayout { layout, variants: variant_layouts }) - }; + Some(TmpLayout { layout, variants: variant_layouts }) + }; - let niche_filling_layout = calculate_niche_filling_layout(); + let niche_filling_layout = calculate_niche_filling_layout(); - let (mut min, mut max) = (i128::MAX, i128::MIN); - let discr_type = repr.discr_type(); - let bits = Integer::from_attr(dl, discr_type).size().bits(); - for (i, mut val) in discriminants { - if !repr.c() && variants[i].iter().any(|f| f.abi.is_uninhabited()) { - continue; - } - if discr_type.is_signed() { - // sign extend the raw representation to be an i128 - val = (val << (128 - bits)) >> (128 - bits); - } - if val < min { - min = val; + let (mut min, mut max) = (i128::MAX, i128::MIN); + let discr_type = repr.discr_type(); + let bits = Integer::from_attr(dl, discr_type).size().bits(); + for (i, mut val) in discriminants { + if !repr.c() && variants[i].iter().any(|f| f.abi.is_uninhabited()) { + continue; + } + if discr_type.is_signed() { + // sign extend the raw representation to be an i128 + val = (val << (128 - bits)) >> (128 - bits); + } + if val < min { + min = val; + } + if val > max { + max = val; + } } - if val > max { - max = val; + // We might have no inhabited variants, so pretend there's at least one. + if (min, max) == (i128::MAX, i128::MIN) { + min = 0; + max = 0; } - } - // We might have no inhabited variants, so pretend there's at least one. - if (min, max) == (i128::MAX, i128::MIN) { - min = 0; - max = 0; - } - assert!(min <= max, "discriminant range is {min}...{max}"); - let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max); - - let mut align = dl.aggregate_align; - let mut max_repr_align = repr.align; - let mut unadjusted_abi_align = align.abi; - - let mut size = Size::ZERO; - - // We're interested in the smallest alignment, so start large. - let mut start_align = Align::from_bytes(256).unwrap(); - assert_eq!(Integer::for_align(dl, start_align), None); - - // repr(C) on an enum tells us to make a (tag, union) layout, - // so we need to grow the prefix alignment to be at least - // the alignment of the union. (This value is used both for - // determining the alignment of the overall enum, and the - // determining the alignment of the payload after the tag.) - let mut prefix_align = min_ity.align(dl).abi; - if repr.c() { - for fields in variants { - for field in fields { - prefix_align = prefix_align.max(field.align.abi); + assert!(min <= max, "discriminant range is {min}...{max}"); + let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max); + + let mut align = dl.aggregate_align; + let mut max_repr_align = repr.align; + let mut unadjusted_abi_align = align.abi; + + let mut size = Size::ZERO; + + // We're interested in the smallest alignment, so start large. + let mut start_align = Align::from_bytes(256).unwrap(); + assert_eq!(Integer::for_align(dl, start_align), None); + + // repr(C) on an enum tells us to make a (tag, union) layout, + // so we need to grow the prefix alignment to be at least + // the alignment of the union. (This value is used both for + // determining the alignment of the overall enum, and the + // determining the alignment of the payload after the tag.) + let mut prefix_align = min_ity.align(dl).abi; + if repr.c() { + for fields in variants { + for field in fields { + prefix_align = prefix_align.max(field.align.abi); + } } } - } - // Create the set of structs that represent each variant. - let mut layout_variants = variants - .iter_enumerated() - .map(|(i, field_layouts)| { - let mut st = layout_calc.univariant( - dl, - field_layouts, - repr, - StructKind::Prefixed(min_ity.size(), prefix_align), - )?; - st.variants = Variants::Single { index: i }; - // Find the first field we can't move later - // to make room for a larger discriminant. - for field_idx in st.fields.index_by_increasing_offset() { - let field = &field_layouts[FieldIdx::new(field_idx)]; - if !field.is_1zst() { - start_align = start_align.min(field.align.abi); - break; + // Create the set of structs that represent each variant. + let mut layout_variants = variants + .iter_enumerated() + .map(|(i, field_layouts)| { + let mut st = self.univariant( + field_layouts, + repr, + StructKind::Prefixed(min_ity.size(), prefix_align), + )?; + st.variants = Variants::Single { index: i }; + // Find the first field we can't move later + // to make room for a larger discriminant. + for field_idx in st.fields.index_by_increasing_offset() { + let field = &field_layouts[FieldIdx::new(field_idx)]; + if !field.is_1zst() { + start_align = start_align.min(field.align.abi); + break; + } } - } - size = cmp::max(size, st.size); - align = align.max(st.align); - max_repr_align = max_repr_align.max(st.max_repr_align); - unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align); - Some(st) - }) - .collect::>>()?; + size = cmp::max(size, st.size); + align = align.max(st.align); + max_repr_align = max_repr_align.max(st.max_repr_align); + unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align); + Ok(st) + }) + .collect::, _>>()?; - // Align the maximum variant size to the largest alignment. - size = size.align_to(align.abi); + // Align the maximum variant size to the largest alignment. + size = size.align_to(align.abi); - // FIXME(oli-obk): deduplicate and harden these checks - if size.bytes() >= dl.obj_size_bound() { - return None; - } + // FIXME(oli-obk): deduplicate and harden these checks + if size.bytes() >= dl.obj_size_bound() { + return Err(LayoutCalculatorError::SizeOverflow); + } - let typeck_ity = Integer::from_attr(dl, repr.discr_type()); - if typeck_ity < min_ity { - // It is a bug if Layout decided on a greater discriminant size than typeck for - // some reason at this point (based on values discriminant can take on). Mostly - // because this discriminant will be loaded, and then stored into variable of - // type calculated by typeck. Consider such case (a bug): typeck decided on - // byte-sized discriminant, but layout thinks we need a 16-bit to store all - // discriminant values. That would be a bug, because then, in codegen, in order - // to store this 16-bit discriminant into 8-bit sized temporary some of the - // space necessary to represent would have to be discarded (or layout is wrong - // on thinking it needs 16 bits) - panic!( - "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})" - ); - // However, it is fine to make discr type however large (as an optimisation) - // after this point – we’ll just truncate the value we load in codegen. - } + let typeck_ity = Integer::from_attr(dl, repr.discr_type()); + if typeck_ity < min_ity { + // It is a bug if Layout decided on a greater discriminant size than typeck for + // some reason at this point (based on values discriminant can take on). Mostly + // because this discriminant will be loaded, and then stored into variable of + // type calculated by typeck. Consider such case (a bug): typeck decided on + // byte-sized discriminant, but layout thinks we need a 16-bit to store all + // discriminant values. That would be a bug, because then, in codegen, in order + // to store this 16-bit discriminant into 8-bit sized temporary some of the + // space necessary to represent would have to be discarded (or layout is wrong + // on thinking it needs 16 bits) + panic!( + "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})" + ); + // However, it is fine to make discr type however large (as an optimisation) + // after this point – we’ll just truncate the value we load in codegen. + } + + // Check to see if we should use a different type for the + // discriminant. We can safely use a type with the same size + // as the alignment of the first field of each variant. + // We increase the size of the discriminant to avoid LLVM copying + // padding when it doesn't need to. This normally causes unaligned + // load/stores and excessive memcpy/memset operations. By using a + // bigger integer size, LLVM can be sure about its contents and + // won't be so conservative. + + // Use the initial field alignment + let mut ity = if repr.c() || repr.int.is_some() { + min_ity + } else { + Integer::for_align(dl, start_align).unwrap_or(min_ity) + }; - // Check to see if we should use a different type for the - // discriminant. We can safely use a type with the same size - // as the alignment of the first field of each variant. - // We increase the size of the discriminant to avoid LLVM copying - // padding when it doesn't need to. This normally causes unaligned - // load/stores and excessive memcpy/memset operations. By using a - // bigger integer size, LLVM can be sure about its contents and - // won't be so conservative. - - // Use the initial field alignment - let mut ity = if repr.c() || repr.int.is_some() { - min_ity - } else { - Integer::for_align(dl, start_align).unwrap_or(min_ity) - }; - - // If the alignment is not larger than the chosen discriminant size, - // don't use the alignment as the final size. - if ity <= min_ity { - ity = min_ity; - } else { - // Patch up the variants' first few fields. - let old_ity_size = min_ity.size(); - let new_ity_size = ity.size(); - for variant in &mut layout_variants { - match variant.fields { - FieldsShape::Arbitrary { ref mut offsets, .. } => { - for i in offsets { - if *i <= old_ity_size { - assert_eq!(*i, old_ity_size); - *i = new_ity_size; + // If the alignment is not larger than the chosen discriminant size, + // don't use the alignment as the final size. + if ity <= min_ity { + ity = min_ity; + } else { + // Patch up the variants' first few fields. + let old_ity_size = min_ity.size(); + let new_ity_size = ity.size(); + for variant in &mut layout_variants { + match variant.fields { + FieldsShape::Arbitrary { ref mut offsets, .. } => { + for i in offsets { + if *i <= old_ity_size { + assert_eq!(*i, old_ity_size); + *i = new_ity_size; + } + } + // We might be making the struct larger. + if variant.size <= old_ity_size { + variant.size = new_ity_size; } } - // We might be making the struct larger. - if variant.size <= old_ity_size { - variant.size = new_ity_size; + FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => { + panic!("encountered a non-arbitrary layout during enum layout") } } - FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => { - panic!("encountered a non-arbitrary layout during enum layout") - } } } - } - let tag_mask = ity.size().unsigned_int_max(); - let tag = Scalar::Initialized { - value: Primitive::Int(ity, signed), - valid_range: WrappingRange { - start: (min as u128 & tag_mask), - end: (max as u128 & tag_mask), - }, - }; - let mut abi = Abi::Aggregate { sized: true }; - - if layout_variants.iter().all(|v| v.abi.is_uninhabited()) { - abi = Abi::Uninhabited; - } else if tag.size(dl) == size { - // Make sure we only use scalar layout when the enum is entirely its - // own tag (i.e. it has no padding nor any non-ZST variant fields). - abi = Abi::Scalar(tag); - } else { - // Try to use a ScalarPair for all tagged enums. - // That's possible only if we can find a common primitive type for all variants. - let mut common_prim = None; - let mut common_prim_initialized_in_all_variants = true; - for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) { - let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else { - panic!("encountered a non-arbitrary layout during enum layout"); - }; - // We skip *all* ZST here and later check if we are good in terms of alignment. - // This lets us handle some cases involving aligned ZST. - let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst()); - let (field, offset) = match (fields.next(), fields.next()) { - (None, None) => { - common_prim_initialized_in_all_variants = false; - continue; - } - (Some(pair), None) => pair, - _ => { - common_prim = None; - break; - } - }; - let prim = match field.abi { - Abi::Scalar(scalar) => { - common_prim_initialized_in_all_variants &= - matches!(scalar, Scalar::Initialized { .. }); - scalar.primitive() - } - _ => { - common_prim = None; - break; - } - }; - if let Some((old_prim, common_offset)) = common_prim { - // All variants must be at the same offset - if offset != common_offset { - common_prim = None; - break; - } - // This is pretty conservative. We could go fancier - // by realising that (u8, u8) could just cohabit with - // u16 or even u32. - let new_prim = match (old_prim, prim) { - // Allow all identical primitives. - (x, y) if x == y => x, - // Allow integers of the same size with differing signedness. - // We arbitrarily choose the signedness of the first variant. - (p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p, - // Allow integers mixed with pointers of the same layout. - // We must represent this using a pointer, to avoid - // roundtripping pointers through ptrtoint/inttoptr. - (p @ Primitive::Pointer(_), i @ Primitive::Int(..)) - | (i @ Primitive::Int(..), p @ Primitive::Pointer(_)) - if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) => - { - p + let tag_mask = ity.size().unsigned_int_max(); + let tag = Scalar::Initialized { + value: Primitive::Int(ity, signed), + valid_range: WrappingRange { + start: (min as u128 & tag_mask), + end: (max as u128 & tag_mask), + }, + }; + let mut abi = Abi::Aggregate { sized: true }; + + if layout_variants.iter().all(|v| v.abi.is_uninhabited()) { + abi = Abi::Uninhabited; + } else if tag.size(dl) == size { + // Make sure we only use scalar layout when the enum is entirely its + // own tag (i.e. it has no padding nor any non-ZST variant fields). + abi = Abi::Scalar(tag); + } else { + // Try to use a ScalarPair for all tagged enums. + // That's possible only if we can find a common primitive type for all variants. + let mut common_prim = None; + let mut common_prim_initialized_in_all_variants = true; + for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) { + let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else { + panic!("encountered a non-arbitrary layout during enum layout"); + }; + // We skip *all* ZST here and later check if we are good in terms of alignment. + // This lets us handle some cases involving aligned ZST. + let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst()); + let (field, offset) = match (fields.next(), fields.next()) { + (None, None) => { + common_prim_initialized_in_all_variants = false; + continue; } + (Some(pair), None) => pair, _ => { common_prim = None; break; } }; - // We may be updating the primitive here, for example from int->ptr. - common_prim = Some((new_prim, common_offset)); - } else { - common_prim = Some((prim, offset)); + let prim = match field.abi { + Abi::Scalar(scalar) => { + common_prim_initialized_in_all_variants &= + matches!(scalar, Scalar::Initialized { .. }); + scalar.primitive() + } + _ => { + common_prim = None; + break; + } + }; + if let Some((old_prim, common_offset)) = common_prim { + // All variants must be at the same offset + if offset != common_offset { + common_prim = None; + break; + } + // This is pretty conservative. We could go fancier + // by realising that (u8, u8) could just cohabit with + // u16 or even u32. + let new_prim = match (old_prim, prim) { + // Allow all identical primitives. + (x, y) if x == y => x, + // Allow integers of the same size with differing signedness. + // We arbitrarily choose the signedness of the first variant. + (p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p, + // Allow integers mixed with pointers of the same layout. + // We must represent this using a pointer, to avoid + // roundtripping pointers through ptrtoint/inttoptr. + (p @ Primitive::Pointer(_), i @ Primitive::Int(..)) + | (i @ Primitive::Int(..), p @ Primitive::Pointer(_)) + if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) => + { + p + } + _ => { + common_prim = None; + break; + } + }; + // We may be updating the primitive here, for example from int->ptr. + common_prim = Some((new_prim, common_offset)); + } else { + common_prim = Some((prim, offset)); + } } - } - if let Some((prim, offset)) = common_prim { - let prim_scalar = if common_prim_initialized_in_all_variants { - let size = prim.size(dl); - assert!(size.bits() <= 128); - Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) } - } else { - // Common prim might be uninit. - Scalar::Union { value: prim } - }; - let pair = layout_calc.scalar_pair::(tag, prim_scalar); - let pair_offsets = match pair.fields { - FieldsShape::Arbitrary { ref offsets, ref memory_index } => { - assert_eq!(memory_index.raw, [0, 1]); - offsets + if let Some((prim, offset)) = common_prim { + let prim_scalar = if common_prim_initialized_in_all_variants { + let size = prim.size(dl); + assert!(size.bits() <= 128); + Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) } + } else { + // Common prim might be uninit. + Scalar::Union { value: prim } + }; + let pair = self.scalar_pair::(tag, prim_scalar); + let pair_offsets = match pair.fields { + FieldsShape::Arbitrary { ref offsets, ref memory_index } => { + assert_eq!(memory_index.raw, [0, 1]); + offsets + } + _ => panic!("encountered a non-arbitrary layout during enum layout"), + }; + if pair_offsets[FieldIdx::new(0)] == Size::ZERO + && pair_offsets[FieldIdx::new(1)] == *offset + && align == pair.align + && size == pair.size + { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; } - _ => panic!("encountered a non-arbitrary layout during enum layout"), - }; - if pair_offsets[FieldIdx::new(0)] == Size::ZERO - && pair_offsets[FieldIdx::new(1)] == *offset - && align == pair.align - && size == pair.size - { - // We can use `ScalarPair` only when it matches our - // already computed layout (including `#[repr(C)]`). - abi = pair.abi; } } - } - // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the - // variants to ensure they are consistent. This is because a downcast is - // semantically a NOP, and thus should not affect layout. - if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { - for variant in &mut layout_variants { - // We only do this for variants with fields; the others are not accessed anyway. - // Also do not overwrite any already existing "clever" ABIs. - if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) { - variant.abi = abi; - // Also need to bump up the size and alignment, so that the entire value fits - // in here. - variant.size = cmp::max(variant.size, size); - variant.align.abi = cmp::max(variant.align.abi, align.abi); + // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the + // variants to ensure they are consistent. This is because a downcast is + // semantically a NOP, and thus should not affect layout. + if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { + for variant in &mut layout_variants { + // We only do this for variants with fields; the others are not accessed anyway. + // Also do not overwrite any already existing "clever" ABIs. + if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) { + variant.abi = abi; + // Also need to bump up the size and alignment, so that the entire value fits + // in here. + variant.size = cmp::max(variant.size, size); + variant.align.abi = cmp::max(variant.align.abi, align.abi); + } } } - } - let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag); - - let tagged_layout = LayoutS { - variants: Variants::Multiple { - tag, - tag_encoding: TagEncoding::Direct, - tag_field: 0, - variants: IndexVec::new(), - }, - fields: FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() }, - largest_niche, - abi, - align, - size, - max_repr_align, - unadjusted_abi_align, - }; - - let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants }; - - let mut best_layout = match (tagged_layout, niche_filling_layout) { - (tl, Some(nl)) => { - // Pick the smaller layout; otherwise, - // pick the layout with the larger niche; otherwise, - // pick tagged as it has simpler codegen. - use cmp::Ordering::*; - let niche_size = |tmp_l: &TmpLayout| { - tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl)) - }; - match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) { - (Greater, _) => nl, - (Equal, Less) => nl, - _ => tl, - } - } - (tl, None) => tl, - }; + let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag); - // Now we can intern the variant layouts and store them in the enum layout. - best_layout.layout.variants = match best_layout.layout.variants { - Variants::Multiple { tag, tag_encoding, tag_field, .. } => { - Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants } - } - Variants::Single { .. } => { - panic!("encountered a single-variant enum during multi-variant layout") - } - }; - Some(best_layout.layout) -} + let tagged_layout = LayoutS { + variants: Variants::Multiple { + tag, + tag_encoding: TagEncoding::Direct, + tag_field: 0, + variants: IndexVec::new(), + }, + fields: FieldsShape::Arbitrary { + offsets: [Size::ZERO].into(), + memory_index: [0].into(), + }, + largest_niche, + abi, + align, + size, + max_repr_align, + unadjusted_abi_align, + }; -/// Determines towards which end of a struct layout optimizations will try to place the best niches. -enum NicheBias { - Start, - End, -} + let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants }; -fn univariant< - 'a, - FieldIdx: Idx, - VariantIdx: Idx, - F: Deref> + fmt::Debug, ->( - this: &(impl LayoutCalculator + ?Sized), - dl: &TargetDataLayout, - fields: &IndexSlice, - repr: &ReprOptions, - kind: StructKind, - niche_bias: NicheBias, -) -> Option> { - let pack = repr.pack; - let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align }; - let mut max_repr_align = repr.align; - let mut inverse_memory_index: IndexVec = fields.indices().collect(); - let optimize_field_order = !repr.inhibit_struct_field_reordering(); - if optimize_field_order && fields.len() > 1 { - let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; - let optimizing = &mut inverse_memory_index.raw[..end]; - let fields_excluding_tail = &fields.raw[..end]; - - // If `-Z randomize-layout` was enabled for the type definition we can shuffle - // the field ordering to try and catch some code making assumptions about layouts - // we don't guarantee. - if repr.can_randomize_type_layout() && cfg!(feature = "randomize") { - #[cfg(feature = "randomize")] - { - use rand::seq::SliceRandom; - use rand::SeedableRng; - // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field - // ordering. - let mut rng = - rand_xoshiro::Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed); - - // Shuffle the ordering of the fields. - optimizing.shuffle(&mut rng); - } - // Otherwise we just leave things alone and actually optimize the type's fields - } else { - // To allow unsizing `&Foo` -> `&Foo`, the layout of the struct must - // not depend on the layout of the tail. - let max_field_align = - fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1); - let largest_niche_size = fields_excluding_tail - .iter() - .filter_map(|f| f.largest_niche) - .map(|n| n.available(dl)) - .max() - .unwrap_or(0); - - // Calculates a sort key to group fields by their alignment or possibly some - // size-derived pseudo-alignment. - let alignment_group_key = |layout: &F| { - // The two branches here return values that cannot be meaningfully compared with - // each other. However, we know that consistently for all executions of - // `alignment_group_key`, one or the other branch will be taken, so this is okay. - if let Some(pack) = pack { - // Return the packed alignment in bytes. - layout.align.abi.min(pack).bytes() - } else { - // Returns `log2(effective-align)`. The calculation assumes that size is an - // integer multiple of align, except for ZSTs. - let align = layout.align.abi.bytes(); - let size = layout.size.bytes(); - let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0); - // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields. - let size_as_align = align.max(size).trailing_zeros(); - let size_as_align = if largest_niche_size > 0 { - match niche_bias { - // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the - // array to the front in the first case (for aligned loads) but keep - // the bool in front in the second case for its niches. - NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align), - // When moving niches towards the end of the struct then for - // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple - // in the align-1 group because its bool can be moved closer to the end. - NicheBias::End if niche_size == largest_niche_size => { - align.trailing_zeros() - } - NicheBias::End => size_as_align, - } - } else { - size_as_align - }; - size_as_align as u64 + let mut best_layout = match (tagged_layout, niche_filling_layout) { + (tl, Some(nl)) => { + // Pick the smaller layout; otherwise, + // pick the layout with the larger niche; otherwise, + // pick tagged as it has simpler codegen. + use cmp::Ordering::*; + let niche_size = |tmp_l: &TmpLayout| { + tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl)) + }; + match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) { + (Greater, _) => nl, + (Equal, Less) => nl, + _ => tl, } - }; + } + (tl, None) => tl, + }; - match kind { - StructKind::AlwaysSized | StructKind::MaybeUnsized => { - // Currently `LayoutS` only exposes a single niche so sorting is usually - // sufficient to get one niche into the preferred position. If it ever - // supported multiple niches then a more advanced pick-and-pack approach could - // provide better results. But even for the single-niche cache it's not - // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the - // bool to the front but it would require packing the tuple together with the - // u16 to build a 4-byte group so that the u32 can be placed after it without - // padding. This kind of packing can't be achieved by sorting. - optimizing.sort_by_key(|&x| { - let f = &fields[x]; - let field_size = f.size.bytes(); - let niche_size = f.largest_niche.map_or(0, |n| n.available(dl)); - let niche_size_key = match niche_bias { - // large niche first - NicheBias::Start => !niche_size, - // large niche last - NicheBias::End => niche_size, - }; - let inner_niche_offset_key = match niche_bias { - NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()), - NicheBias::End => f.largest_niche.map_or(0, |n| { - !(field_size - n.value.size(dl).bytes() - n.offset.bytes()) - }), + // Now we can intern the variant layouts and store them in the enum layout. + best_layout.layout.variants = match best_layout.layout.variants { + Variants::Multiple { tag, tag_encoding, tag_field, .. } => { + Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants } + } + Variants::Single { .. } => { + panic!("encountered a single-variant enum during multi-variant layout") + } + }; + Ok(best_layout.layout) + } + + fn univariant_biased< + 'a, + FieldIdx: Idx, + VariantIdx: Idx, + F: Deref> + fmt::Debug, + >( + &self, + fields: &IndexSlice, + repr: &ReprOptions, + kind: StructKind, + niche_bias: NicheBias, + ) -> LayoutCalculatorResult { + let dl = self.cx.data_layout(); + let pack = repr.pack; + let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align }; + let mut max_repr_align = repr.align; + let mut inverse_memory_index: IndexVec = fields.indices().collect(); + let optimize_field_order = !repr.inhibit_struct_field_reordering(); + if optimize_field_order && fields.len() > 1 { + let end = + if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; + let optimizing = &mut inverse_memory_index.raw[..end]; + let fields_excluding_tail = &fields.raw[..end]; + + // If `-Z randomize-layout` was enabled for the type definition we can shuffle + // the field ordering to try and catch some code making assumptions about layouts + // we don't guarantee. + if repr.can_randomize_type_layout() && cfg!(feature = "randomize") { + #[cfg(feature = "randomize")] + { + use rand::seq::SliceRandom; + use rand::SeedableRng; + // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field + // ordering. + let mut rng = + rand_xoshiro::Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed); + + // Shuffle the ordering of the fields. + optimizing.shuffle(&mut rng); + } + // Otherwise we just leave things alone and actually optimize the type's fields + } else { + // To allow unsizing `&Foo` -> `&Foo`, the layout of the struct must + // not depend on the layout of the tail. + let max_field_align = + fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1); + let largest_niche_size = fields_excluding_tail + .iter() + .filter_map(|f| f.largest_niche) + .map(|n| n.available(dl)) + .max() + .unwrap_or(0); + + // Calculates a sort key to group fields by their alignment or possibly some + // size-derived pseudo-alignment. + let alignment_group_key = |layout: &F| { + // The two branches here return values that cannot be meaningfully compared with + // each other. However, we know that consistently for all executions of + // `alignment_group_key`, one or the other branch will be taken, so this is okay. + if let Some(pack) = pack { + // Return the packed alignment in bytes. + layout.align.abi.min(pack).bytes() + } else { + // Returns `log2(effective-align)`. The calculation assumes that size is an + // integer multiple of align, except for ZSTs. + let align = layout.align.abi.bytes(); + let size = layout.size.bytes(); + let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0); + // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields. + let size_as_align = align.max(size).trailing_zeros(); + let size_as_align = if largest_niche_size > 0 { + match niche_bias { + // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the + // array to the front in the first case (for aligned loads) but keep + // the bool in front in the second case for its niches. + NicheBias::Start => { + max_field_align.trailing_zeros().min(size_as_align) + } + // When moving niches towards the end of the struct then for + // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple + // in the align-1 group because its bool can be moved closer to the end. + NicheBias::End if niche_size == largest_niche_size => { + align.trailing_zeros() + } + NicheBias::End => size_as_align, + } + } else { + size_as_align }; + size_as_align as u64 + } + }; - ( - // Then place largest alignments first. - cmp::Reverse(alignment_group_key(f)), - // Then prioritize niche placement within alignment group according to - // `niche_bias_start`. - niche_size_key, - // Then among fields with equally-sized niches prefer the ones - // closer to the start/end of the field. - inner_niche_offset_key, - ) - }); + match kind { + StructKind::AlwaysSized | StructKind::MaybeUnsized => { + // Currently `LayoutS` only exposes a single niche so sorting is usually + // sufficient to get one niche into the preferred position. If it ever + // supported multiple niches then a more advanced pick-and-pack approach could + // provide better results. But even for the single-niche cache it's not + // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the + // bool to the front but it would require packing the tuple together with the + // u16 to build a 4-byte group so that the u32 can be placed after it without + // padding. This kind of packing can't be achieved by sorting. + optimizing.sort_by_key(|&x| { + let f = &fields[x]; + let field_size = f.size.bytes(); + let niche_size = f.largest_niche.map_or(0, |n| n.available(dl)); + let niche_size_key = match niche_bias { + // large niche first + NicheBias::Start => !niche_size, + // large niche last + NicheBias::End => niche_size, + }; + let inner_niche_offset_key = match niche_bias { + NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()), + NicheBias::End => f.largest_niche.map_or(0, |n| { + !(field_size - n.value.size(dl).bytes() - n.offset.bytes()) + }), + }; + + ( + // Then place largest alignments first. + cmp::Reverse(alignment_group_key(f)), + // Then prioritize niche placement within alignment group according to + // `niche_bias_start`. + niche_size_key, + // Then among fields with equally-sized niches prefer the ones + // closer to the start/end of the field. + inner_niche_offset_key, + ) + }); + } + + StructKind::Prefixed(..) => { + // Sort in ascending alignment so that the layout stays optimal + // regardless of the prefix. + // And put the largest niche in an alignment group at the end + // so it can be used as discriminant in jagged enums + optimizing.sort_by_key(|&x| { + let f = &fields[x]; + let niche_size = f.largest_niche.map_or(0, |n| n.available(dl)); + (alignment_group_key(f), niche_size) + }); + } } - StructKind::Prefixed(..) => { - // Sort in ascending alignment so that the layout stays optimal - // regardless of the prefix. - // And put the largest niche in an alignment group at the end - // so it can be used as discriminant in jagged enums - optimizing.sort_by_key(|&x| { - let f = &fields[x]; - let niche_size = f.largest_niche.map_or(0, |n| n.available(dl)); - (alignment_group_key(f), niche_size) - }); + // FIXME(Kixiron): We can always shuffle fields within a given alignment class + // regardless of the status of `-Z randomize-layout` + } + } + // inverse_memory_index holds field indices by increasing memory offset. + // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. + // We now write field offsets to the corresponding offset slot; + // field 5 with offset 0 puts 0 in offsets[5]. + // At the bottom of this function, we invert `inverse_memory_index` to + // produce `memory_index` (see `invert_mapping`). + let mut sized = true; + let mut offsets = IndexVec::from_elem(Size::ZERO, fields); + let mut offset = Size::ZERO; + let mut largest_niche = None; + let mut largest_niche_available = 0; + if let StructKind::Prefixed(prefix_size, prefix_align) = kind { + let prefix_align = + if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align }; + align = align.max(AbiAndPrefAlign::new(prefix_align)); + offset = prefix_size.align_to(prefix_align); + } + for &i in &inverse_memory_index { + let field = &fields[i]; + if !sized { + return Err(LayoutCalculatorError::UnexpectedUnsized); + } + + if field.is_unsized() { + sized = false; + } + + // Invariant: offset < dl.obj_size_bound() <= 1<<61 + let field_align = if let Some(pack) = pack { + field.align.min(AbiAndPrefAlign::new(pack)) + } else { + field.align + }; + offset = offset.align_to(field_align.abi); + align = align.max(field_align); + max_repr_align = max_repr_align.max(field.max_repr_align); + + debug!("univariant offset: {:?} field: {:#?}", offset, field); + offsets[i] = offset; + + if let Some(mut niche) = field.largest_niche { + let available = niche.available(dl); + // Pick up larger niches. + let prefer_new_niche = match niche_bias { + NicheBias::Start => available > largest_niche_available, + // if there are several niches of the same size then pick the last one + NicheBias::End => available >= largest_niche_available, + }; + if prefer_new_niche { + largest_niche_available = available; + niche.offset += offset; + largest_niche = Some(niche); } } - // FIXME(Kixiron): We can always shuffle fields within a given alignment class - // regardless of the status of `-Z randomize-layout` - } - } - // inverse_memory_index holds field indices by increasing memory offset. - // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. - // We now write field offsets to the corresponding offset slot; - // field 5 with offset 0 puts 0 in offsets[5]. - // At the bottom of this function, we invert `inverse_memory_index` to - // produce `memory_index` (see `invert_mapping`). - let mut sized = true; - let mut offsets = IndexVec::from_elem(Size::ZERO, fields); - let mut offset = Size::ZERO; - let mut largest_niche = None; - let mut largest_niche_available = 0; - if let StructKind::Prefixed(prefix_size, prefix_align) = kind { - let prefix_align = - if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align }; - align = align.max(AbiAndPrefAlign::new(prefix_align)); - offset = prefix_size.align_to(prefix_align); - } - for &i in &inverse_memory_index { - let field = &fields[i]; - if !sized { - this.delayed_bug(format!( - "univariant: field #{} comes after unsized field", - offsets.len(), - )); + offset = + offset.checked_add(field.size, dl).ok_or(LayoutCalculatorError::SizeOverflow)?; } - if field.is_unsized() { - sized = false; + // The unadjusted ABI alignment does not include repr(align), but does include repr(pack). + // See documentation on `LayoutS::unadjusted_abi_align`. + let unadjusted_abi_align = align.abi; + if let Some(repr_align) = repr.align { + align = align.max(AbiAndPrefAlign::new(repr_align)); } + // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate. + let align = align; - // Invariant: offset < dl.obj_size_bound() <= 1<<61 - let field_align = if let Some(pack) = pack { - field.align.min(AbiAndPrefAlign::new(pack)) + debug!("univariant min_size: {:?}", offset); + let min_size = offset; + // As stated above, inverse_memory_index holds field indices by increasing offset. + // This makes it an already-sorted view of the offsets vec. + // To invert it, consider: + // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. + // Field 5 would be the first element, so memory_index is i: + // Note: if we didn't optimize, it's already right. + let memory_index = if optimize_field_order { + inverse_memory_index.invert_bijective_mapping() } else { - field.align + debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices())); + inverse_memory_index.into_iter().map(|it| it.index() as u32).collect() }; - offset = offset.align_to(field_align.abi); - align = align.max(field_align); - max_repr_align = max_repr_align.max(field.max_repr_align); - - debug!("univariant offset: {:?} field: {:#?}", offset, field); - offsets[i] = offset; - - if let Some(mut niche) = field.largest_niche { - let available = niche.available(dl); - // Pick up larger niches. - let prefer_new_niche = match niche_bias { - NicheBias::Start => available > largest_niche_available, - // if there are several niches of the same size then pick the last one - NicheBias::End => available >= largest_niche_available, - }; - if prefer_new_niche { - largest_niche_available = available; - niche.offset += offset; - largest_niche = Some(niche); - } + let size = min_size.align_to(align.abi); + // FIXME(oli-obk): deduplicate and harden these checks + if size.bytes() >= dl.obj_size_bound() { + return Err(LayoutCalculatorError::SizeOverflow); } + let mut layout_of_single_non_zst_field = None; + let mut abi = Abi::Aggregate { sized }; - offset = offset.checked_add(field.size, dl)?; - } + let optimize_abi = !repr.inhibit_newtype_abi_optimization(); - // The unadjusted ABI alignment does not include repr(align), but does include repr(pack). - // See documentation on `LayoutS::unadjusted_abi_align`. - let unadjusted_abi_align = align.abi; - if let Some(repr_align) = repr.align { - align = align.max(AbiAndPrefAlign::new(repr_align)); - } - // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate. - let align = align; - - debug!("univariant min_size: {:?}", offset); - let min_size = offset; - // As stated above, inverse_memory_index holds field indices by increasing offset. - // This makes it an already-sorted view of the offsets vec. - // To invert it, consider: - // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. - // Field 5 would be the first element, so memory_index is i: - // Note: if we didn't optimize, it's already right. - let memory_index = if optimize_field_order { - inverse_memory_index.invert_bijective_mapping() - } else { - debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices())); - inverse_memory_index.into_iter().map(|it| it.index() as u32).collect() - }; - let size = min_size.align_to(align.abi); - // FIXME(oli-obk): deduplicate and harden these checks - if size.bytes() >= dl.obj_size_bound() { - return None; - } - let mut layout_of_single_non_zst_field = None; - let mut abi = Abi::Aggregate { sized }; - - let optimize_abi = !repr.inhibit_newtype_abi_optimization(); - - // Try to make this a Scalar/ScalarPair. - if sized && size.bytes() > 0 { - // We skip *all* ZST here and later check if we are good in terms of alignment. - // This lets us handle some cases involving aligned ZST. - let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst()); - - match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { - // We have exactly one non-ZST field. - (Some((i, field)), None, None) => { - layout_of_single_non_zst_field = Some(field); - - // Field fills the struct and it has a scalar or scalar pair ABI. - if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size { - match field.abi { - // For plain scalars, or vectors of them, we can't unpack - // newtypes for `#[repr(C)]`, as that affects C ABIs. - Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => { - abi = field.abi; - } - // But scalar pairs are Rust-specific and get - // treated as aggregates by C ABIs anyway. - Abi::ScalarPair(..) => { - abi = field.abi; + // Try to make this a Scalar/ScalarPair. + if sized && size.bytes() > 0 { + // We skip *all* ZST here and later check if we are good in terms of alignment. + // This lets us handle some cases involving aligned ZST. + let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst()); + + match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { + // We have exactly one non-ZST field. + (Some((i, field)), None, None) => { + layout_of_single_non_zst_field = Some(field); + + // Field fills the struct and it has a scalar or scalar pair ABI. + if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size + { + match field.abi { + // For plain scalars, or vectors of them, we can't unpack + // newtypes for `#[repr(C)]`, as that affects C ABIs. + Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => { + abi = field.abi; + } + // But scalar pairs are Rust-specific and get + // treated as aggregates by C ABIs anyway. + Abi::ScalarPair(..) => { + abi = field.abi; + } + _ => {} } - _ => {} } } - } - // Two non-ZST fields, and they're both scalars. - (Some((i, a)), Some((j, b)), None) => { - match (a.abi, b.abi) { - (Abi::Scalar(a), Abi::Scalar(b)) => { - // Order by the memory placement, not source order. - let ((i, a), (j, b)) = if offsets[i] < offsets[j] { - ((i, a), (j, b)) - } else { - ((j, b), (i, a)) - }; - let pair = this.scalar_pair::(a, b); - let pair_offsets = match pair.fields { - FieldsShape::Arbitrary { ref offsets, ref memory_index } => { - assert_eq!(memory_index.raw, [0, 1]); - offsets + // Two non-ZST fields, and they're both scalars. + (Some((i, a)), Some((j, b)), None) => { + match (a.abi, b.abi) { + (Abi::Scalar(a), Abi::Scalar(b)) => { + // Order by the memory placement, not source order. + let ((i, a), (j, b)) = if offsets[i] < offsets[j] { + ((i, a), (j, b)) + } else { + ((j, b), (i, a)) + }; + let pair = self.scalar_pair::(a, b); + let pair_offsets = match pair.fields { + FieldsShape::Arbitrary { ref offsets, ref memory_index } => { + assert_eq!(memory_index.raw, [0, 1]); + offsets + } + FieldsShape::Primitive + | FieldsShape::Array { .. } + | FieldsShape::Union(..) => { + panic!("encountered a non-arbitrary layout during enum layout") + } + }; + if offsets[i] == pair_offsets[FieldIdx::new(0)] + && offsets[j] == pair_offsets[FieldIdx::new(1)] + && align == pair.align + && size == pair.size + { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; } - FieldsShape::Primitive - | FieldsShape::Array { .. } - | FieldsShape::Union(..) => { - panic!("encountered a non-arbitrary layout during enum layout") - } - }; - if offsets[i] == pair_offsets[FieldIdx::new(0)] - && offsets[j] == pair_offsets[FieldIdx::new(1)] - && align == pair.align - && size == pair.size - { - // We can use `ScalarPair` only when it matches our - // already computed layout (including `#[repr(C)]`). - abi = pair.abi; } + _ => {} } - _ => {} } - } - _ => {} + _ => {} + } + } + if fields.iter().any(|f| f.abi.is_uninhabited()) { + abi = Abi::Uninhabited; } - } - if fields.iter().any(|f| f.abi.is_uninhabited()) { - abi = Abi::Uninhabited; - } - let unadjusted_abi_align = if repr.transparent() { - match layout_of_single_non_zst_field { - Some(l) => l.unadjusted_abi_align, - None => { - // `repr(transparent)` with all ZST fields. - align.abi + let unadjusted_abi_align = if repr.transparent() { + match layout_of_single_non_zst_field { + Some(l) => l.unadjusted_abi_align, + None => { + // `repr(transparent)` with all ZST fields. + align.abi + } } - } - } else { - unadjusted_abi_align - }; - - Some(LayoutS { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldsShape::Arbitrary { offsets, memory_index }, - abi, - largest_niche, - align, - size, - max_repr_align, - unadjusted_abi_align, - }) -} + } else { + unadjusted_abi_align + }; -fn format_field_niches< - 'a, - FieldIdx: Idx, - VariantIdx: Idx, - F: Deref> + fmt::Debug, ->( - layout: &LayoutS, - fields: &IndexSlice, - dl: &TargetDataLayout, -) -> String { - let mut s = String::new(); - for i in layout.fields.index_by_increasing_offset() { - let offset = layout.fields.offset(i); - let f = &fields[FieldIdx::new(i)]; - write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap(); - if let Some(n) = f.largest_niche { - write!( - s, - " n{}b{}s{}", - n.offset.bytes(), - n.available(dl).ilog2(), - n.value.size(dl).bytes() - ) - .unwrap(); + Ok(LayoutS { + variants: Variants::Single { index: VariantIdx::new(0) }, + fields: FieldsShape::Arbitrary { offsets, memory_index }, + abi, + largest_niche, + align, + size, + max_repr_align, + unadjusted_abi_align, + }) + } + + fn format_field_niches< + 'a, + FieldIdx: Idx, + VariantIdx: Idx, + F: Deref> + fmt::Debug, + >( + &self, + layout: &LayoutS, + fields: &IndexSlice, + ) -> String { + let dl = self.cx.data_layout(); + let mut s = String::new(); + for i in layout.fields.index_by_increasing_offset() { + let offset = layout.fields.offset(i); + let f = &fields[FieldIdx::new(i)]; + write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap(); + if let Some(n) = f.largest_niche { + write!( + s, + " n{}b{}s{}", + n.offset.bytes(), + n.available(dl).ilog2(), + n.value.size(dl).bytes() + ) + .unwrap(); + } + write!(s, "] ").unwrap(); } - write!(s, "] ").unwrap(); + s } - s } diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index be42bc8493243..452c7cb3225c7 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -26,7 +26,7 @@ mod layout; #[cfg(test)] mod tests; -pub use layout::LayoutCalculator; +pub use layout::{LayoutCalculator, LayoutCalculatorError}; /// Requirements for a `StableHashingContext` to be used in this crate. /// This is a hack to allow using the `HashStable_Generic` derive macro @@ -393,6 +393,14 @@ impl HasDataLayout for TargetDataLayout { } } +// used by rust-analyzer +impl HasDataLayout for &TargetDataLayout { + #[inline] + fn data_layout(&self) -> &TargetDataLayout { + (**self).data_layout() + } +} + /// Endianness of the target, which must match cfg(target-endian). #[derive(Copy, Clone, PartialEq, Eq)] pub enum Endian { diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index c96296eddb844..8351a6af25b81 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -319,7 +319,7 @@ pub fn valtree_to_const_value<'tcx>( let branches = valtree.unwrap_branch(); // Find the non-ZST field. (There can be aligned ZST!) for (i, &inner_valtree) in branches.iter().enumerate() { - let field = layout.field(&LayoutCx { tcx, param_env }, i); + let field = layout.field(&LayoutCx::new(tcx, param_env), i); if !field.is_zst() { return valtree_to_const_value(tcx, param_env.and(field.ty), inner_valtree); } diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 58926fd61d1b0..c2f049a3874a6 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -946,7 +946,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { ) -> Cow<'e, RangeSet> { assert!(layout.ty.is_union()); assert!(layout.abi.is_sized(), "there are no unsized unions"); - let layout_cx = LayoutCx { tcx: *ecx.tcx, param_env: ecx.param_env }; + let layout_cx = LayoutCx::new(*ecx.tcx, ecx.param_env); return M::cached_union_data_range(ecx, layout.ty, || { let mut out = RangeSet(Vec::new()); union_data_range_uncached(&layout_cx, layout, Size::ZERO, &mut out); diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs index f5277c328ea7a..19393188c9adf 100644 --- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs +++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs @@ -1,5 +1,7 @@ use rustc_middle::bug; -use rustc_middle::ty::layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement}; +use rustc_middle::ty::layout::{ + HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement, +}; use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt}; use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants}; @@ -30,7 +32,7 @@ pub fn check_validity_requirement<'tcx>( return Ok(!layout.abi.is_uninhabited()); } - let layout_cx = LayoutCx { tcx, param_env: param_env_and_ty.param_env }; + let layout_cx = LayoutCx::new(tcx, param_env_and_ty.param_env); if kind == ValidityRequirement::Uninit || tcx.sess.opts.unstable_opts.strict_init_checks { check_validity_requirement_strict(layout, &layout_cx, kind) } else { @@ -47,7 +49,7 @@ fn check_validity_requirement_strict<'tcx>( ) -> Result> { let machine = CompileTimeMachine::new(CanAccessMutGlobal::No, CheckAlignment::Error); - let mut cx = InterpCx::new(cx.tcx, rustc_span::DUMMY_SP, cx.param_env, machine); + let mut cx = InterpCx::new(cx.tcx(), rustc_span::DUMMY_SP, cx.param_env, machine); let allocated = cx .allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap)) diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 48eb82270227b..6d878ab765430 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -1,4 +1,3 @@ -use std::borrow::Cow; use std::num::NonZero; use std::ops::Bound; use std::{cmp, fmt}; @@ -287,19 +286,13 @@ impl<'tcx> IntoDiagArg for LayoutError<'tcx> { #[derive(Clone, Copy)] pub struct LayoutCx<'tcx> { - pub tcx: TyCtxt<'tcx>, + pub calc: LayoutCalculator>, pub param_env: ty::ParamEnv<'tcx>, } -impl<'tcx> LayoutCalculator for LayoutCx<'tcx> { - type TargetDataLayoutRef = &'tcx TargetDataLayout; - - fn delayed_bug(&self, txt: impl Into>) { - self.tcx.dcx().delayed_bug(txt); - } - - fn current_data_layout(&self) -> Self::TargetDataLayoutRef { - &self.tcx.data_layout +impl<'tcx> LayoutCx<'tcx> { + pub fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self { + Self { calc: LayoutCalculator::new(tcx), param_env } } } @@ -576,25 +569,25 @@ impl<'tcx> HasParamEnv<'tcx> for LayoutCx<'tcx> { impl<'tcx> HasDataLayout for LayoutCx<'tcx> { fn data_layout(&self) -> &TargetDataLayout { - self.tcx.data_layout() + self.calc.cx.data_layout() } } impl<'tcx> HasTargetSpec for LayoutCx<'tcx> { fn target_spec(&self) -> &Target { - self.tcx.target_spec() + self.calc.cx.target_spec() } } impl<'tcx> HasWasmCAbiOpt for LayoutCx<'tcx> { fn wasm_c_abi_opt(&self) -> WasmCAbi { - self.tcx.wasm_c_abi_opt() + self.calc.cx.wasm_c_abi_opt() } } impl<'tcx> HasTyCtxt<'tcx> for LayoutCx<'tcx> { fn tcx(&self) -> TyCtxt<'tcx> { - self.tcx.tcx() + self.calc.cx } } @@ -695,7 +688,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx> { _: Span, _: Ty<'tcx>, ) -> &'tcx LayoutError<'tcx> { - self.tcx.arena.alloc(err) + self.tcx().arena.alloc(err) } } @@ -1323,7 +1316,7 @@ impl<'tcx> TyCtxt<'tcx> { where I: Iterator, { - let cx = LayoutCx { tcx: self, param_env }; + let cx = LayoutCx::new(self, param_env); let mut offset = Size::ZERO; for (variant, field) in indices { diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs index e1bc770d81731..312cc3a26ef47 100644 --- a/compiler/rustc_passes/src/layout_test.rs +++ b/compiler/rustc_passes/src/layout_test.rs @@ -128,7 +128,7 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) { } Err(layout_error) => { - tcx.dcx().emit_fatal(Spanned { node: layout_error.into_diagnostic(), span }); + tcx.dcx().emit_err(Spanned { node: layout_error.into_diagnostic(), span }); } } } diff --git a/compiler/rustc_transmute/src/layout/mod.rs b/compiler/rustc_transmute/src/layout/mod.rs index 596d80869eae1..a5c47c480e181 100644 --- a/compiler/rustc_transmute/src/layout/mod.rs +++ b/compiler/rustc_transmute/src/layout/mod.rs @@ -63,7 +63,7 @@ pub mod rustc { use std::fmt::{self, Write}; use rustc_middle::mir::Mutability; - use rustc_middle::ty::layout::{LayoutCx, LayoutError}; + use rustc_middle::ty::layout::{HasTyCtxt, LayoutCx, LayoutError}; use rustc_middle::ty::{self, Ty}; use rustc_target::abi::Layout; @@ -128,7 +128,7 @@ pub mod rustc { ty: Ty<'tcx>, ) -> Result, &'tcx LayoutError<'tcx>> { use rustc_middle::ty::layout::LayoutOf; - let ty = cx.tcx.erase_regions(ty); + let ty = cx.tcx().erase_regions(ty); cx.layout_of(ty).map(|tl| tl.layout) } } diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs index 3b7284c1ad6fa..ddf9b6c28f3a9 100644 --- a/compiler/rustc_transmute/src/layout/tree.rs +++ b/compiler/rustc_transmute/src/layout/tree.rs @@ -212,7 +212,7 @@ pub(crate) mod rustc { return Err(Err::TypeError(e)); } - let target = cx.tcx.data_layout(); + let target = cx.data_layout(); let pointer_size = target.pointer_size; match ty.kind() { @@ -320,7 +320,7 @@ pub(crate) mod rustc { // Computes the variant of a given index. let layout_of_variant = |index, encoding: Option>| { - let tag = cx.tcx.tag_for_variant((cx.tcx.erase_regions(ty), index)); + let tag = cx.tcx().tag_for_variant((cx.tcx().erase_regions(ty), index)); let variant_def = Def::Variant(def.variant(index)); let variant_layout = ty_variant(cx, (ty, layout), index); Self::from_variant( @@ -417,7 +417,7 @@ pub(crate) mod rustc { } } } - struct_tree = struct_tree.then(Self::from_tag(*tag, cx.tcx)); + struct_tree = struct_tree.then(Self::from_tag(*tag, cx.tcx())); } // Append the fields, in memory order, to the layout. @@ -509,12 +509,12 @@ pub(crate) mod rustc { match layout.variants { Variants::Single { index } => { let field = &def.variant(index).fields[i]; - field.ty(cx.tcx, args) + field.ty(cx.tcx(), args) } // Discriminant field for enums (where applicable). Variants::Multiple { tag, .. } => { assert_eq!(i.as_usize(), 0); - ty::layout::PrimitiveExt::to_ty(&tag.primitive(), cx.tcx) + ty::layout::PrimitiveExt::to_ty(&tag.primitive(), cx.tcx()) } } } @@ -531,7 +531,7 @@ pub(crate) mod rustc { (ty, layout): (Ty<'tcx>, Layout<'tcx>), i: VariantIdx, ) -> Layout<'tcx> { - let ty = cx.tcx.erase_regions(ty); + let ty = cx.tcx().erase_regions(ty); TyAndLayout { ty, layout }.for_variant(&cx, i).layout } } diff --git a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs index 95eaedbf04a5f..9a31d9e3ac446 100644 --- a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs +++ b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs @@ -43,7 +43,7 @@ mod rustc { pub fn answer(self) -> Answer< as QueryContext>::Ref> { let Self { src, dst, assume, context } = self; - let layout_cx = LayoutCx { tcx: context, param_env: ParamEnv::reveal_all() }; + let layout_cx = LayoutCx::new(context, ParamEnv::reveal_all()); // Convert `src` and `dst` from their rustc representations, to `Tree`-based // representations. diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index 0d433da3aea8b..2d0c2e83690a6 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -331,7 +331,7 @@ fn fn_abi_of_fn_ptr<'tcx>( ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> { let (param_env, (sig, extra_args)) = query.into_parts(); - let cx = LayoutCx { tcx, param_env }; + let cx = LayoutCx::new(tcx, param_env); fn_abi_new_uncached(&cx, sig, extra_args, None, None, false) } @@ -347,7 +347,7 @@ fn fn_abi_of_instance<'tcx>( instance.def.requires_caller_location(tcx).then(|| tcx.caller_location_ty()); fn_abi_new_uncached( - &LayoutCx { tcx, param_env }, + &LayoutCx::new(tcx, param_env), sig, extra_args, caller_location, @@ -386,12 +386,14 @@ fn adjust_for_rust_scalar<'tcx>( attrs.set(ArgAttribute::NonNull); } + let tcx = cx.tcx(); + if let Some(pointee) = layout.pointee_info_at(&cx, offset) { let kind = if let Some(kind) = pointee.safe { Some(kind) } else if let Some(pointee) = drop_target_pointee { // The argument to `drop_in_place` is semantically equivalent to a mutable reference. - Some(PointerKind::MutableRef { unpin: pointee.is_unpin(cx.tcx, cx.param_env()) }) + Some(PointerKind::MutableRef { unpin: pointee.is_unpin(tcx, cx.param_env()) }) } else { None }; @@ -415,12 +417,12 @@ fn adjust_for_rust_scalar<'tcx>( // The aliasing rules for `Box` are still not decided, but currently we emit // `noalias` for it. This can be turned off using an unstable flag. // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326 - let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias; + let noalias_for_box = tcx.sess.opts.unstable_opts.box_noalias; // LLVM prior to version 12 had known miscompiles in the presence of noalias attributes // (see #54878), so it was conditionally disabled, but we don't support earlier // versions at all anymore. We still support turning it off using -Zmutable-noalias. - let noalias_mut_ref = cx.tcx.sess.opts.unstable_opts.mutable_noalias; + let noalias_mut_ref = tcx.sess.opts.unstable_opts.mutable_noalias; // `&T` where `T` contains no `UnsafeCell` is immutable, and can be marked as both // `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely on memory @@ -458,6 +460,7 @@ fn fn_abi_sanity_check<'tcx>( spec_abi: SpecAbi, arg: &ArgAbi<'tcx, Ty<'tcx>>, ) { + let tcx = cx.tcx(); match &arg.mode { PassMode::Ignore => {} PassMode::Direct(_) => { @@ -484,7 +487,7 @@ fn fn_abi_sanity_check<'tcx>( // It needs to switch to something else before stabilization can happen. // (See issue: https://github.com/rust-lang/rust/issues/117271) assert!( - matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64") + matches!(&*tcx.sess.target.arch, "wasm32" | "wasm64") || matches!(spec_abi, SpecAbi::PtxKernel | SpecAbi::Unadjusted), "`PassMode::Direct` for aggregates only allowed for \"unadjusted\" and \"ptx-kernel\" functions and on wasm\n\ Problematic type: {:#?}", @@ -516,7 +519,7 @@ fn fn_abi_sanity_check<'tcx>( // With metadata. Must be unsized and not on the stack. assert!(arg.layout.is_unsized() && !on_stack); // Also, must not be `extern` type. - let tail = cx.tcx.struct_tail_for_codegen(arg.layout.ty, cx.param_env()); + let tail = tcx.struct_tail_for_codegen(arg.layout.ty, cx.param_env()); if matches!(tail.kind(), ty::Foreign(..)) { // These types do not have metadata, so having `meta_attrs` is bogus. // Conceptually, unsized arguments must be copied around, which requires dynamically @@ -546,7 +549,8 @@ fn fn_abi_new_uncached<'tcx>( // FIXME(eddyb) replace this with something typed, like an `enum`. force_thin_self_ptr: bool, ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> { - let sig = cx.tcx.normalize_erasing_late_bound_regions(cx.param_env, sig); + let tcx = cx.tcx(); + let sig = tcx.normalize_erasing_late_bound_regions(cx.param_env, sig); let conv = conv_from_spec_abi(cx.tcx(), sig.abi, sig.c_variadic); @@ -576,7 +580,7 @@ fn fn_abi_new_uncached<'tcx>( }; let is_drop_in_place = - fn_def_id.is_some_and(|def_id| cx.tcx.is_lang_item(def_id, LangItem::DropInPlace)); + fn_def_id.is_some_and(|def_id| tcx.is_lang_item(def_id, LangItem::DropInPlace)); let arg_of = |ty: Ty<'tcx>, arg_idx: Option| -> Result<_, &'tcx FnAbiError<'tcx>> { let span = tracing::debug_span!("arg_of"); @@ -588,8 +592,7 @@ fn fn_abi_new_uncached<'tcx>( _ => bug!("argument to drop_in_place is not a raw ptr: {:?}", ty), }); - let layout = - cx.layout_of(ty).map_err(|err| &*cx.tcx.arena.alloc(FnAbiError::Layout(*err)))?; + let layout = cx.layout_of(ty).map_err(|err| &*tcx.arena.alloc(FnAbiError::Layout(*err)))?; let layout = if force_thin_self_ptr && arg_idx == Some(0) { // Don't pass the vtable, it's not an argument of the virtual fn. // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait` @@ -638,7 +641,7 @@ fn fn_abi_new_uncached<'tcx>( fn_abi_adjust_for_abi(cx, &mut fn_abi, sig.abi, fn_def_id)?; debug!("fn_abi_new_uncached = {:?}", fn_abi); fn_abi_sanity_check(cx, &fn_abi, sig.abi); - Ok(cx.tcx.arena.alloc(fn_abi)) + Ok(tcx.arena.alloc(fn_abi)) } #[tracing::instrument(level = "trace", skip(cx))] @@ -670,17 +673,18 @@ fn fn_abi_adjust_for_abi<'tcx>( return Ok(()); } + let tcx = cx.tcx(); + if abi == SpecAbi::Rust || abi == SpecAbi::RustCall || abi == SpecAbi::RustIntrinsic { // Look up the deduced parameter attributes for this function, if we have its def ID and // we're optimizing in non-incremental mode. We'll tag its parameters with those attributes // as appropriate. - let deduced_param_attrs = if cx.tcx.sess.opts.optimize != OptLevel::No - && cx.tcx.sess.opts.incremental.is_none() - { - fn_def_id.map(|fn_def_id| cx.tcx.deduced_param_attrs(fn_def_id)).unwrap_or_default() - } else { - &[] - }; + let deduced_param_attrs = + if tcx.sess.opts.optimize != OptLevel::No && tcx.sess.opts.incremental.is_none() { + fn_def_id.map(|fn_def_id| tcx.deduced_param_attrs(fn_def_id)).unwrap_or_default() + } else { + &[] + }; let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>, arg_idx: Option| { if arg.is_ignore() { @@ -689,7 +693,7 @@ fn fn_abi_adjust_for_abi<'tcx>( // Avoid returning floats in x87 registers on x86 as loading and storing from x87 // registers will quiet signalling NaNs. - if cx.tcx.sess.target.arch == "x86" + if tcx.sess.target.arch == "x86" && arg_idx.is_none() // Intrinsics themselves are not actual "real" functions, so theres no need to // change their ABIs. @@ -744,7 +748,7 @@ fn fn_abi_adjust_for_abi<'tcx>( // that's how we connect up to LLVM and it's unstable // anyway, we control all calls to it in libstd. Abi::Vector { .. } - if abi != SpecAbi::RustIntrinsic && cx.tcx.sess.target.simd_types_indirect => + if abi != SpecAbi::RustIntrinsic && tcx.sess.target.simd_types_indirect => { arg.make_indirect(); return; @@ -793,7 +797,7 @@ fn fn_abi_adjust_for_abi<'tcx>( } else { fn_abi .adjust_for_foreign_abi(cx, abi) - .map_err(|err| &*cx.tcx.arena.alloc(FnAbiError::AdjustForForeignAbi(err)))?; + .map_err(|err| &*tcx.arena.alloc(FnAbiError::AdjustForForeignAbi(err)))?; } Ok(()) diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 2c2276ad40def..50b6d8a0c3fb3 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -9,7 +9,7 @@ use rustc_middle::bug; use rustc_middle::mir::{CoroutineLayout, CoroutineSavedLocal}; use rustc_middle::query::Providers; use rustc_middle::ty::layout::{ - FloatExt, IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES, + FloatExt, HasTyCtxt, IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES, }; use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_middle::ty::{ @@ -63,14 +63,14 @@ fn layout_of<'tcx>( return tcx.layout_of(param_env.and(ty)); } - let cx = LayoutCx { tcx, param_env }; + let cx = LayoutCx::new(tcx, param_env); let layout = layout_of_uncached(&cx, ty)?; let layout = TyAndLayout { ty, layout }; // If we are running with `-Zprint-type-sizes`, maybe record layouts // for dumping later. - if cx.tcx.sess.opts.unstable_opts.print_type_sizes { + if cx.tcx().sess.opts.unstable_opts.print_type_sizes { record_layout_for_printing(&cx, layout); } @@ -80,7 +80,36 @@ fn layout_of<'tcx>( } fn error<'tcx>(cx: &LayoutCx<'tcx>, err: LayoutError<'tcx>) -> &'tcx LayoutError<'tcx> { - cx.tcx.arena.alloc(err) + cx.tcx().arena.alloc(err) +} + +fn map_error<'tcx>( + cx: &LayoutCx<'tcx>, + ty: Ty<'tcx>, + err: LayoutCalculatorError, +) -> &'tcx LayoutError<'tcx> { + let err = match err { + LayoutCalculatorError::SizeOverflow => { + // This is sometimes not a compile error in `check` builds. + LayoutError::SizeOverflow(ty) + } + LayoutCalculatorError::UnexpectedUnsized => { + // This is sometimes not a compile error if there are trivially false where + // clauses, but it is always a compiler error in the empty environment. + if cx.param_env.caller_bounds().is_empty() { + cx.tcx().dcx().delayed_bug(format!( + "encountered unexpected unsized field in layout of {ty:?}" + )); + } + LayoutError::Unknown(ty) + } + LayoutCalculatorError::EmptyUnion => { + // This is always a compile error. + cx.tcx().dcx().delayed_bug(format!("computed layout of empty union: {ty:?}")); + LayoutError::Unknown(ty) + } + }; + error(cx, err) } fn univariant_uninterned<'tcx>( @@ -90,13 +119,12 @@ fn univariant_uninterned<'tcx>( repr: &ReprOptions, kind: StructKind, ) -> Result, &'tcx LayoutError<'tcx>> { - let dl = cx.data_layout(); let pack = repr.pack; if pack.is_some() && repr.align.is_some() { - cx.tcx.dcx().bug("struct cannot be packed and aligned"); + cx.tcx().dcx().bug("struct cannot be packed and aligned"); } - cx.univariant(dl, fields, repr, kind).ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty))) + cx.calc.univariant(fields, repr, kind).map_err(|err| map_error(cx, ty, err)) } fn layout_of_uncached<'tcx>( @@ -110,7 +138,7 @@ fn layout_of_uncached<'tcx>( return Err(error(cx, LayoutError::ReferencesError(guar))); } - let tcx = cx.tcx; + let tcx = cx.tcx(); let param_env = cx.param_env; let dl = cx.data_layout(); let scalar_unit = |value: Primitive| { @@ -188,7 +216,7 @@ fn layout_of_uncached<'tcx>( } // The never type. - ty::Never => tcx.mk_layout(cx.layout_of_never_type()), + ty::Never => tcx.mk_layout(cx.calc.layout_of_never_type()), // Potentially-wide pointers. ty::Ref(_, pointee, _) | ty::RawPtr(pointee, _) => { @@ -264,7 +292,7 @@ fn layout_of_uncached<'tcx>( }; // Effectively a (ptr, meta) tuple. - tcx.mk_layout(cx.scalar_pair(data_ptr, metadata)) + tcx.mk_layout(cx.calc.scalar_pair(data_ptr, metadata)) } ty::Dynamic(_, _, ty::DynStar) => { @@ -272,7 +300,7 @@ fn layout_of_uncached<'tcx>( data.valid_range_mut().start = 0; let mut vtable = scalar_unit(Pointer(AddressSpace::DATA)); vtable.valid_range_mut().start = 1; - tcx.mk_layout(cx.scalar_pair(data, vtable)) + tcx.mk_layout(cx.calc.scalar_pair(data, vtable)) } // Arrays and slices. @@ -531,7 +559,7 @@ fn layout_of_uncached<'tcx>( if def.is_union() { if def.repr().pack.is_some() && def.repr().align.is_some() { - cx.tcx.dcx().span_delayed_bug( + tcx.dcx().span_delayed_bug( tcx.def_span(def.did()), "union cannot be packed and aligned", ); @@ -539,8 +567,9 @@ fn layout_of_uncached<'tcx>( } return Ok(tcx.mk_layout( - cx.layout_of_union(&def.repr(), &variants) - .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?, + cx.calc + .layout_of_union(&def.repr(), &variants) + .map_err(|err| map_error(cx, ty, err))?, )); } @@ -557,7 +586,7 @@ fn layout_of_uncached<'tcx>( })?; if is_unsized { - cx.tcx.dcx().span_delayed_bug(tcx.def_span(def.did()), err_msg.to_owned()); + tcx.dcx().span_delayed_bug(tcx.def_span(def.did()), err_msg.to_owned()); Err(error(cx, LayoutError::Unknown(ty))) } else { Ok(()) @@ -600,19 +629,20 @@ fn layout_of_uncached<'tcx>( !tcx.type_of(last_field.did).instantiate_identity().is_sized(tcx, param_env) }); - let Some(layout) = cx.layout_of_struct_or_enum( - &def.repr(), - &variants, - def.is_enum(), - def.is_unsafe_cell(), - tcx.layout_scalar_valid_range(def.did()), - get_discriminant_type, - discriminants_iter(), - dont_niche_optimize_enum, - !maybe_unsized, - ) else { - return Err(error(cx, LayoutError::SizeOverflow(ty))); - }; + let layout = cx + .calc + .layout_of_struct_or_enum( + &def.repr(), + &variants, + def.is_enum(), + def.is_unsafe_cell(), + tcx.layout_scalar_valid_range(def.did()), + get_discriminant_type, + discriminants_iter(), + dont_niche_optimize_enum, + !maybe_unsized, + ) + .map_err(|err| map_error(cx, ty, err))?; // If the struct tail is sized and can be unsized, check that unsizing doesn't move the fields around. if cfg!(debug_assertions) @@ -623,7 +653,7 @@ fn layout_of_uncached<'tcx>( let tail_replacement = cx.layout_of(Ty::new_slice(tcx, tcx.types.u8)).unwrap(); *variants[FIRST_VARIANT].raw.last_mut().unwrap() = tail_replacement.layout; - let Some(unsized_layout) = cx.layout_of_struct_or_enum( + let Ok(unsized_layout) = cx.calc.layout_of_struct_or_enum( &def.repr(), &variants, def.is_enum(), @@ -812,7 +842,7 @@ fn coroutine_layout<'tcx>( args: GenericArgsRef<'tcx>, ) -> Result, &'tcx LayoutError<'tcx>> { use SavedLocalEligibility::*; - let tcx = cx.tcx; + let tcx = cx.tcx(); let instantiate_field = |ty: Ty<'tcx>| EarlyBinder::bind(ty).instantiate(tcx, args); let Some(info) = tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) else { @@ -832,7 +862,7 @@ fn coroutine_layout<'tcx>( value: Primitive::Int(discr_int, false), valid_range: WrappingRange { start: 0, end: max_discr }, }; - let tag_layout = cx.tcx.mk_layout(LayoutS::scalar(cx, tag)); + let tag_layout = tcx.mk_layout(LayoutS::scalar(cx, tag)); let promoted_layouts = ineligible_locals.iter().map(|local| { let field_ty = instantiate_field(info.field_tys[local].ty); @@ -1025,7 +1055,7 @@ fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx>, layout: TyAndLayout<'tc // (delay format until we actually need it) let record = |kind, packed, opt_discr_size, variants| { let type_desc = with_no_trimmed_paths!(format!("{}", layout.ty)); - cx.tcx.sess.code_stats.record_type_size( + cx.tcx().sess.code_stats.record_type_size( kind, type_desc, layout.align.abi, @@ -1148,8 +1178,8 @@ fn variant_info_for_coroutine<'tcx>( return (vec![], None); }; - let coroutine = cx.tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()).unwrap(); - let upvar_names = cx.tcx.closure_saved_names_of_captured_variables(def_id); + let coroutine = cx.tcx().coroutine_layout(def_id, args.as_coroutine().kind_ty()).unwrap(); + let upvar_names = cx.tcx().closure_saved_names_of_captured_variables(def_id); let mut upvars_size = Size::ZERO; let upvar_fields: Vec<_> = args diff --git a/compiler/rustc_ty_utils/src/layout_sanity_check.rs b/compiler/rustc_ty_utils/src/layout_sanity_check.rs index 38fbd7a943748..be0a7c5ee890a 100644 --- a/compiler/rustc_ty_utils/src/layout_sanity_check.rs +++ b/compiler/rustc_ty_utils/src/layout_sanity_check.rs @@ -1,20 +1,22 @@ use std::assert_matches::assert_matches; use rustc_middle::bug; -use rustc_middle::ty::layout::{LayoutCx, TyAndLayout}; +use rustc_middle::ty::layout::{HasTyCtxt, LayoutCx, TyAndLayout}; use rustc_target::abi::*; /// Enforce some basic invariants on layouts. pub(super) fn sanity_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) { + let tcx = cx.tcx(); + // Type-level uninhabitedness should always imply ABI uninhabitedness. - if layout.ty.is_privately_uninhabited(cx.tcx, cx.param_env) { + if layout.ty.is_privately_uninhabited(tcx, cx.param_env) { assert!(layout.abi.is_uninhabited()); } if layout.size.bytes() % layout.align.abi.bytes() != 0 { bug!("size is not a multiple of align, in the following layout:\n{layout:#?}"); } - if layout.size.bytes() >= cx.tcx.data_layout.obj_size_bound() { + if layout.size.bytes() >= tcx.data_layout.obj_size_bound() { bug!("size is too large, in the following layout:\n{layout:#?}"); } diff --git a/src/tools/miri/src/eval.rs b/src/tools/miri/src/eval.rs index 0850a8f24d96c..f95177684aee6 100644 --- a/src/tools/miri/src/eval.rs +++ b/src/tools/miri/src/eval.rs @@ -277,7 +277,7 @@ pub fn create_ecx<'tcx>( config: &MiriConfig, ) -> InterpResult<'tcx, InterpCx<'tcx, MiriMachine<'tcx>>> { let param_env = ty::ParamEnv::reveal_all(); - let layout_cx = LayoutCx { tcx, param_env }; + let layout_cx = LayoutCx::new(tcx, param_env); let mut ecx = InterpCx::new(tcx, rustc_span::DUMMY_SP, param_env, MiriMachine::new(config, layout_cx)); diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs index 8d0a9263cb3e3..c2b0aedbde1fc 100644 --- a/src/tools/miri/src/machine.rs +++ b/src/tools/miri/src/machine.rs @@ -21,7 +21,7 @@ use rustc_middle::{ query::TyCtxtAt, ty::{ self, - layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout}, + layout::{HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout}, Instance, Ty, TyCtxt, }, }; @@ -382,7 +382,7 @@ pub struct PrimitiveLayouts<'tcx> { impl<'tcx> PrimitiveLayouts<'tcx> { fn new(layout_cx: LayoutCx<'tcx>) -> Result> { - let tcx = layout_cx.tcx; + let tcx = layout_cx.tcx(); let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit); let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit); Ok(Self { @@ -597,13 +597,12 @@ pub struct MiriMachine<'tcx> { impl<'tcx> MiriMachine<'tcx> { pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx>) -> Self { - let tcx = layout_cx.tcx; + let tcx = layout_cx.tcx(); let local_crates = helpers::get_local_crates(tcx); let layouts = PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types"); let profiler = config.measureme_out.as_ref().map(|out| { - let crate_name = layout_cx - .tcx + let crate_name = tcx .sess .opts .crate_name @@ -701,7 +700,7 @@ impl<'tcx> MiriMachine<'tcx> { clock: Clock::new(config.isolated_op == IsolatedOp::Allow), #[cfg(unix)] native_lib: config.native_lib.as_ref().map(|lib_file_path| { - let target_triple = layout_cx.tcx.sess.opts.target_triple.triple(); + let target_triple = tcx.sess.opts.target_triple.triple(); // Check if host target == the session target. if env!("TARGET") != target_triple { panic!( diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs index 47cc2a2f1e6bd..cc1f19c6b1773 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs @@ -1,13 +1,13 @@ //! Compute the binary representation of a type -use std::{borrow::Cow, fmt}; +use std::fmt; use base_db::salsa::Cycle; use chalk_ir::{AdtId, FloatTy, IntTy, TyKind, UintTy}; use hir_def::{ layout::{ - Abi, FieldsShape, Float, Integer, LayoutCalculator, LayoutS, Primitive, ReprOptions, - Scalar, Size, StructKind, TargetDataLayout, WrappingRange, + Abi, FieldsShape, Float, Integer, LayoutCalculator, LayoutCalculatorError, LayoutS, + Primitive, ReprOptions, Scalar, Size, StructKind, TargetDataLayout, WrappingRange, }, LocalFieldId, StructId, }; @@ -15,7 +15,6 @@ use la_arena::{Idx, RawIdx}; use rustc_abi::AddressSpace; use rustc_index::{IndexSlice, IndexVec}; -use stdx::never; use triomphe::Arc; use crate::{ @@ -107,19 +106,24 @@ impl fmt::Display for LayoutError { } } -struct LayoutCx<'a> { - target: &'a TargetDataLayout, +impl From for LayoutError { + fn from(err: LayoutCalculatorError) -> Self { + match err { + LayoutCalculatorError::UnexpectedUnsized | LayoutCalculatorError::EmptyUnion => { + LayoutError::Unknown + } + LayoutCalculatorError::SizeOverflow => LayoutError::SizeOverflow, + } + } } -impl<'a> LayoutCalculator for LayoutCx<'a> { - type TargetDataLayoutRef = &'a TargetDataLayout; - - fn delayed_bug(&self, txt: impl Into>) { - never!("{}", txt.into()); - } +struct LayoutCx<'a> { + calc: LayoutCalculator<&'a TargetDataLayout>, +} - fn current_data_layout(&self) -> &'a TargetDataLayout { - self.target +impl<'a> LayoutCx<'a> { + fn new(target: &'a TargetDataLayout) -> Self { + Self { calc: LayoutCalculator::new(target) } } } @@ -205,8 +209,8 @@ pub fn layout_of_ty_query( let Ok(target) = db.target_data_layout(krate) else { return Err(LayoutError::TargetLayoutNotAvailable); }; - let cx = LayoutCx { target: &target }; - let dl = cx.current_data_layout(); + let dl = &*target; + let cx = LayoutCx::new(dl); let ty = normalize(db, trait_env.clone(), ty); let result = match ty.kind(Interner) { TyKind::Adt(AdtId(def), subst) => { @@ -281,7 +285,7 @@ pub fn layout_of_ty_query( .collect::, _>>()?; let fields = fields.iter().map(|it| &**it).collect::>(); let fields = fields.iter().collect::>(); - cx.univariant(dl, &fields, &ReprOptions::default(), kind).ok_or(LayoutError::Unknown)? + cx.calc.univariant(&fields, &ReprOptions::default(), kind)? } TyKind::Array(element, count) => { let count = try_const_usize(db, count).ok_or(LayoutError::HasErrorConst)? as u64; @@ -367,12 +371,12 @@ pub fn layout_of_ty_query( }; // Effectively a (ptr, meta) tuple. - cx.scalar_pair(data_ptr, metadata) + cx.calc.scalar_pair(data_ptr, metadata) } - TyKind::FnDef(_, _) => layout_of_unit(&cx, dl)?, - TyKind::Never => cx.layout_of_never_type(), + TyKind::FnDef(_, _) => layout_of_unit(&cx)?, + TyKind::Never => cx.calc.layout_of_never_type(), TyKind::Dyn(_) | TyKind::Foreign(_) => { - let mut unit = layout_of_unit(&cx, dl)?; + let mut unit = layout_of_unit(&cx)?; match &mut unit.abi { Abi::Aggregate { sized } => *sized = false, _ => return Err(LayoutError::Unknown), @@ -414,8 +418,7 @@ pub fn layout_of_ty_query( .collect::, _>>()?; let fields = fields.iter().map(|it| &**it).collect::>(); let fields = fields.iter().collect::>(); - cx.univariant(dl, &fields, &ReprOptions::default(), StructKind::AlwaysSized) - .ok_or(LayoutError::Unknown)? + cx.calc.univariant(&fields, &ReprOptions::default(), StructKind::AlwaysSized)? } TyKind::Coroutine(_, _) | TyKind::CoroutineWitness(_, _) => { return Err(LayoutError::NotImplemented) @@ -447,14 +450,14 @@ pub fn layout_of_ty_recover( Err(LayoutError::RecursiveTypeWithoutIndirection) } -fn layout_of_unit(cx: &LayoutCx<'_>, dl: &TargetDataLayout) -> Result { - cx.univariant::( - dl, - IndexSlice::empty(), - &ReprOptions::default(), - StructKind::AlwaysSized, - ) - .ok_or(LayoutError::Unknown) +fn layout_of_unit(cx: &LayoutCx<'_>) -> Result { + cx.calc + .univariant::( + IndexSlice::empty(), + &ReprOptions::default(), + StructKind::AlwaysSized, + ) + .map_err(Into::into) } fn struct_tail_erasing_lifetimes(db: &dyn HirDatabase, pointee: Ty) -> Ty { diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs index 3463e69097287..a060ebfe6be2a 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs @@ -5,7 +5,7 @@ use std::{cmp, ops::Bound}; use base_db::salsa::Cycle; use hir_def::{ data::adt::VariantData, - layout::{Integer, LayoutCalculator, ReprOptions, TargetDataLayout}, + layout::{Integer, ReprOptions, TargetDataLayout}, AdtId, VariantId, }; use intern::sym; @@ -36,8 +36,8 @@ pub fn layout_of_adt_query( let Ok(target) = db.target_data_layout(krate) else { return Err(LayoutError::TargetLayoutNotAvailable); }; - let cx = LayoutCx { target: &target }; - let dl = cx.current_data_layout(); + let dl = &*target; + let cx = LayoutCx::new(dl); let handle_variant = |def: VariantId, var: &VariantData| { var.fields() .iter() @@ -73,9 +73,9 @@ pub fn layout_of_adt_query( .collect::>(); let variants = variants.iter().map(|it| it.iter().collect()).collect::>(); let result = if matches!(def, AdtId::UnionId(..)) { - cx.layout_of_union(&repr, &variants).ok_or(LayoutError::Unknown)? + cx.calc.layout_of_union(&repr, &variants)? } else { - cx.layout_of_struct_or_enum( + cx.calc.layout_of_struct_or_enum( &repr, &variants, matches!(def, AdtId::EnumId(..)), @@ -103,8 +103,7 @@ pub fn layout_of_adt_query( .next() .and_then(|it| it.iter().last().map(|it| !it.is_unsized())) .unwrap_or(true), - ) - .ok_or(LayoutError::SizeOverflow)? + )? }; Ok(Arc::new(result)) } diff --git a/tests/crashes/124182.rs b/tests/crashes/124182.rs deleted file mode 100644 index 46948207df381..0000000000000 --- a/tests/crashes/124182.rs +++ /dev/null @@ -1,22 +0,0 @@ -//@ known-bug: #124182 -struct LazyLock { - data: (Copy, fn() -> T), -} - -impl LazyLock { - pub const fn new(f: fn() -> T) -> LazyLock { - LazyLock { data: (None, f) } - } -} - -struct A(Option); - -impl Default for A { - fn default() -> Self { - A(None) - } -} - -static EMPTY_SET: LazyLock> = LazyLock::new(A::default); - -fn main() {} diff --git a/tests/crashes/126939.rs b/tests/crashes/126939.rs index 1edf748460604..07bafd35420ee 100644 --- a/tests/crashes/126939.rs +++ b/tests/crashes/126939.rs @@ -1,21 +1,12 @@ //@ known-bug: rust-lang/rust#126939 -struct MySlice(bool, T); +struct MySlice(T); type MySliceBool = MySlice<[bool]>; -use std::mem; - -struct P2 { - a: T, +struct P2 { b: MySliceBool, } -macro_rules! check { - ($t:ty, $align:expr) => ({ - assert_eq!(mem::align_of::<$t>(), $align); - }); -} +static CHECK: () = assert!(align_of::() == 1); -pub fn main() { - check!(P2, 1); -} +fn main() {} diff --git a/tests/ui/layout/debug.rs b/tests/ui/layout/debug.rs index 91e96d78ff556..166321798de30 100644 --- a/tests/ui/layout/debug.rs +++ b/tests/ui/layout/debug.rs @@ -76,3 +76,8 @@ impl S { #[rustc_layout(debug)] type Impossible = (str, str); //~ ERROR: cannot be known at compilation time + +// Test that computing the layout of an empty union doesn't ICE. +#[rustc_layout(debug)] +union EmptyUnion {} //~ ERROR: has an unknown layout +//~^ ERROR: unions cannot have zero fields diff --git a/tests/ui/layout/debug.stderr b/tests/ui/layout/debug.stderr index 5162a771b4df7..c9715a8e14632 100644 --- a/tests/ui/layout/debug.stderr +++ b/tests/ui/layout/debug.stderr @@ -1,3 +1,9 @@ +error: unions cannot have zero fields + --> $DIR/debug.rs:82:1 + | +LL | union EmptyUnion {} + | ^^^^^^^^^^^^^^^^^^^ + error: layout_of(E) = Layout { size: Size(12 bytes), align: AbiAndPrefAlign { @@ -566,12 +572,18 @@ LL | type Impossible = (str, str); = help: the trait `Sized` is not implemented for `str` = note: only the last element of a tuple may have a dynamically sized type +error: the type `EmptyUnion` has an unknown layout + --> $DIR/debug.rs:82:1 + | +LL | union EmptyUnion {} + | ^^^^^^^^^^^^^^^^ + error: `#[rustc_layout]` can only be applied to `struct`/`enum`/`union` declarations and type aliases --> $DIR/debug.rs:74:5 | LL | const C: () = (); | ^^^^^^^^^^^ -error: aborting due to 17 previous errors +error: aborting due to 19 previous errors For more information about this error, try `rustc --explain E0277`. diff --git a/tests/ui/layout/invalid-unsized-const-eval.rs b/tests/ui/layout/invalid-unsized-const-eval.rs new file mode 100644 index 0000000000000..2dec0b0faacf2 --- /dev/null +++ b/tests/ui/layout/invalid-unsized-const-eval.rs @@ -0,0 +1,14 @@ +// issue: #124182 + +//! This test used to trip an assertion in const eval, because `layout_of(LazyLock)` +//! returned `Ok` with an unsized layout when a sized layout was expected. +//! It was fixed by making `layout_of` always return `Err` for types that +//! contain unsized fields in unexpected locations. + +struct LazyLock { + data: (dyn Sync, ()), //~ ERROR the size for values of type +} + +static EMPTY_SET: LazyLock = todo!(); + +fn main() {} diff --git a/tests/ui/layout/invalid-unsized-const-eval.stderr b/tests/ui/layout/invalid-unsized-const-eval.stderr new file mode 100644 index 0000000000000..bf65782b7a805 --- /dev/null +++ b/tests/ui/layout/invalid-unsized-const-eval.stderr @@ -0,0 +1,12 @@ +error[E0277]: the size for values of type `(dyn Sync + 'static)` cannot be known at compilation time + --> $DIR/invalid-unsized-const-eval.rs:9:11 + | +LL | data: (dyn Sync, ()), + | ^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `(dyn Sync + 'static)` + = note: only the last element of a tuple may have a dynamically sized type + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0277`. diff --git a/tests/ui/layout/trivial-bounds-sized.rs b/tests/ui/layout/trivial-bounds-sized.rs new file mode 100644 index 0000000000000..a32539f80fa4f --- /dev/null +++ b/tests/ui/layout/trivial-bounds-sized.rs @@ -0,0 +1,51 @@ +//@ check-pass + +//! With trivial bounds, it is possible to have ADTs with unsized fields +//! in arbitrary places. Test that we do not ICE for such types. + +#![feature(trivial_bounds)] +#![expect(trivial_bounds)] + +struct Struct +where + [u8]: Sized, + [i16]: Sized, +{ + a: [u8], + b: [i16], + c: f32, +} + +union Union +where + [u8]: Copy, + [i16]: Copy, +{ + a: [u8], + b: [i16], + c: f32, +} + +enum Enum +where + [u8]: Sized, + [i16]: Sized, +{ + V1([u8], [i16]), + V2([i16], f32), +} + +// This forces layout computation via the `variant_size_differences` lint. +// FIXME: This could be made more robust, possibly with a variant of `rustc_layout` +// that doesn't error. +enum Check +where + [u8]: Copy, + [i16]: Copy, +{ + Struct(Struct), + Union(Union), + Enum(Enum), +} + +fn main() {} diff --git a/tests/crashes/123134.rs b/tests/ui/layout/unsatisfiable-sized-ungated.rs similarity index 55% rename from tests/crashes/123134.rs rename to tests/ui/layout/unsatisfiable-sized-ungated.rs index 61c043db763f5..d9c1f739bdbfa 100644 --- a/tests/crashes/123134.rs +++ b/tests/ui/layout/unsatisfiable-sized-ungated.rs @@ -1,4 +1,9 @@ -//@ known-bug: #123134 +//@ check-pass +// issue: #123134 + +//! This is a variant of `trivial-bounds-sized.rs` that compiles without any +//! feature gates and used to trigger a delayed bug. + trait Api: Sized { type Device: ?Sized; } @@ -7,7 +12,7 @@ struct OpenDevice where A::Device: Sized, { - device: A::Device, + device: A::Device, // <- this is the type that ends up being unsized. queue: (), } @@ -31,6 +36,8 @@ impl Adapter for T { fn open() -> OpenDevice where ::Device: Sized, + // ^ the bound expands to `<::A as Api>::Device: Sized`, which + // is not considered trivial due to containing the type parameter `T` { unreachable!() }