1use std::collections::BTreeSet;2use std::fmt::{self, Write};3use std::ops::Deref;4use std::{cmp, iter};56use rustc_hashes::Hash64;7use rustc_index::Idx;8use rustc_index::bit_set::BitMatrix;9use tracing::{debug, trace};1011use crate::{12 AbiAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,13 LayoutData, Niche, NonZeroUsize, NumScalableVectors, Primitive, ReprOptions, Scalar, Size,14 StructKind, TagEncoding, TargetDataLayout, Variants, WrappingRange,15};1617mod coroutine;18mod simple;1920#[cfg(feature = "nightly")]21mod ty;2223#[cfg(feature = "nightly")]24pub use ty::{Layout, TyAbiInterface, TyAndLayout};2526rustc_index::newtype_index! {27 /// The *source-order* index of a field in a variant.28 ///29 /// This is how most code after type checking refers to fields, rather than30 /// using names (as names have hygiene complications and more complex lookup).31 ///32 /// Particularly for `repr(Rust)` types, this may not be the same as *layout* order.33 /// (It is for `repr(C)` `struct`s, however.)34 ///35 /// For example, in the following types,36 /// ```rust37 /// # enum Never {}38 /// # #[repr(u16)]39 /// enum Demo1 {40 /// Variant0 { a: Never, b: i32 } = 100,41 /// Variant1 { c: u8, d: u64 } = 10,42 /// }43 /// struct Demo2 { e: u8, f: u16, g: u8 }44 /// ```45 /// `b` is `FieldIdx(1)` in `VariantIdx(0)`,46 /// `d` is `FieldIdx(1)` in `VariantIdx(1)`, and47 /// `f` is `FieldIdx(1)` in `VariantIdx(0)`.48 #[stable_hash]49 #[encodable]50 #[orderable]51 #[gate_rustc_only]52 pub struct FieldIdx {}53}5455impl FieldIdx {56 /// The second field, at index 1.57 ///58 /// For use alongside [`FieldIdx::ZERO`], particularly with scalar pairs.59 pub const ONE: FieldIdx = FieldIdx::from_u32(1);60}6162rustc_index::newtype_index! {63 /// The *source-order* index of a variant in a type.64 ///65 /// For enums, these are always `0..variant_count`, regardless of any66 /// custom discriminants that may have been defined, and including any67 /// variants that may end up uninhabited due to field types. (Some of the68 /// variants may not be present in a monomorphized ABI [`Variants`], but69 /// those skipped variants are always counted when determining the *index*.)70 ///71 /// `struct`s, `tuples`, and `unions`s are considered to have a single variant72 /// with variant index zero, aka [`FIRST_VARIANT`].73 #[stable_hash]74 #[encodable]75 #[orderable]76 #[gate_rustc_only]77 pub struct VariantIdx {78 /// Equivalent to `VariantIdx(0)`.79 const FIRST_VARIANT = 0;80 }81}8283// A variant is absent if it's uninhabited and only has ZST fields.84// Present uninhabited variants only require space for their fields,85// but *not* an encoding of the discriminant (e.g., a tag value).86// See issue #49298 for more details on the need to leave space87// for non-ZST uninhabited data (mostly partial initialization).88fn absent<'a, FieldIdx, VariantIdx, F>(fields: &IndexSlice<FieldIdx, F>) -> bool89where90 FieldIdx: Idx,91 VariantIdx: Idx,92 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,93{94 let uninhabited = fields.iter().any(|f| f.is_uninhabited());95 // We cannot ignore alignment; that might lead us to entirely discard a variant and96 // produce an enum that is less aligned than it should be!97 let is_1zst = fields.iter().all(|f| f.is_1zst());98 uninhabited && is_1zst99}100101/// Determines towards which end of a struct layout optimizations will try to place the best niches.102enum NicheBias {103 Start,104 End,105}106107#[derive(Copy, Clone, Debug, PartialEq, Eq)]108pub enum LayoutCalculatorError<F> {109 /// An unsized type was found in a location where a sized type was expected.110 ///111 /// This is not always a compile error, for example if there is a `[T]: Sized`112 /// bound in a where clause.113 ///114 /// Contains the field that was unexpectedly unsized.115 UnexpectedUnsized(F),116117 /// A type was too large for the target platform.118 SizeOverflow,119120 /// A union had no fields.121 EmptyUnion,122123 /// The fields or variants have irreconcilable reprs124 ReprConflict,125126 /// The length of an SIMD type is zero127 ZeroLengthSimdType,128129 /// The length of an SIMD type exceeds the maximum number of lanes130 OversizedSimdType { max_lanes: u64 },131132 /// An element type of an SIMD type isn't a primitive133 NonPrimitiveSimdType(F),134}135136impl<F> LayoutCalculatorError<F> {137 pub fn without_payload(&self) -> LayoutCalculatorError<()> {138 use LayoutCalculatorError::*;139 match *self {140 UnexpectedUnsized(_) => UnexpectedUnsized(()),141 SizeOverflow => SizeOverflow,142 EmptyUnion => EmptyUnion,143 ReprConflict => ReprConflict,144 ZeroLengthSimdType => ZeroLengthSimdType,145 OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes },146 NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()),147 }148 }149150 /// Format an untranslated diagnostic for this type151 ///152 /// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.153 pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {154 use LayoutCalculatorError::*;155 f.write_str(match self {156 UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected",157 SizeOverflow => "size overflow",158 EmptyUnion => "type is a union with no fields",159 ReprConflict => "type has an invalid repr",160 ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => {161 "invalid simd type definition"162 }163 })164 }165}166167type LayoutCalculatorResult<FieldIdx, VariantIdx, F> =168 Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>>;169170#[derive(Clone, Copy, Debug)]171pub struct LayoutCalculator<Cx> {172 pub cx: Cx,173}174175impl<Cx: HasDataLayout> LayoutCalculator<Cx> {176 pub fn new(cx: Cx) -> Self {177 Self { cx }178 }179180 pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>(181 &self,182 element: &LayoutData<FieldIdx, VariantIdx>,183 count_if_sized: Option<u64>, // None for slices184 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {185 let count = count_if_sized.unwrap_or(0);186 let size =187 element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?;188189 Ok(LayoutData {190 variants: Variants::Single { index: VariantIdx::new(0) },191 fields: FieldsShape::Array { stride: element.size, count },192 backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() },193 largest_niche: element.largest_niche.filter(|_| count != 0),194 uninhabited: element.uninhabited && count != 0,195 align: element.align,196 size,197 max_repr_align: None,198 unadjusted_abi_align: element.align.abi,199 randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),200 })201 }202203 pub fn scalable_vector_type<FieldIdx, VariantIdx, F>(204 &self,205 element: F,206 count: u64,207 number_of_vectors: NumScalableVectors,208 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>209 where210 FieldIdx: Idx,211 VariantIdx: Idx,212 F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,213 {214 vector_type_layout(215 SimdVectorKind::Scalable(number_of_vectors),216 self.cx.data_layout(),217 element,218 count,219 )220 }221222 pub fn simd_type<FieldIdx, VariantIdx, F>(223 &self,224 element: F,225 count: u64,226 repr_packed: bool,227 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>228 where229 FieldIdx: Idx,230 VariantIdx: Idx,231 F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,232 {233 let kind = if repr_packed { SimdVectorKind::PackedFixed } else { SimdVectorKind::Fixed };234 vector_type_layout(kind, self.cx.data_layout(), element, count)235 }236237 /// Compute the layout for a coroutine.238 ///239 /// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine240 /// fields may be shared between multiple variants (see the [`coroutine`] module for details).241 pub fn coroutine<242 'a,243 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,244 VariantIdx: Idx,245 FieldIdx: Idx,246 LocalIdx: Idx,247 >(248 &self,249 local_layouts: &IndexSlice<LocalIdx, F>,250 prefix_layouts: IndexVec<FieldIdx, F>,251 variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,252 storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,253 tag_to_layout: impl Fn(Scalar) -> F,254 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {255 coroutine::layout(256 self,257 local_layouts,258 prefix_layouts,259 variant_fields,260 storage_conflicts,261 tag_to_layout,262 )263 }264265 pub fn univariant<266 'a,267 FieldIdx: Idx,268 VariantIdx: Idx,269 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,270 >(271 &self,272 fields: &IndexSlice<FieldIdx, F>,273 repr: &ReprOptions,274 kind: StructKind,275 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {276 let dl = self.cx.data_layout();277 let layout = self.univariant_biased(fields, repr, kind, NicheBias::Start);278 // Enums prefer niches close to the beginning or the end of the variants so that other279 // (smaller) data-carrying variants can be packed into the space after/before the niche.280 // If the default field ordering does not give us a niche at the front then we do a second281 // run and bias niches to the right and then check which one is closer to one of the282 // struct's edges.283 if let Ok(layout) = &layout {284 // Don't try to calculate an end-biased layout for unsizable structs,285 // otherwise we could end up with different layouts for286 // Foo<Type> and Foo<dyn Trait> which would break unsizing.287 if !matches!(kind, StructKind::MaybeUnsized) {288 if let Some(niche) = layout.largest_niche {289 let head_space = niche.offset.bytes();290 let niche_len = niche.value.size(dl).bytes();291 let tail_space = layout.size.bytes() - head_space - niche_len;292293 // This may end up doing redundant work if the niche is already in the last294 // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial295 // to get the unpadded size so we try anyway.296 if fields.len() > 1 && head_space != 0 && tail_space > 0 {297 let alt_layout = self298 .univariant_biased(fields, repr, kind, NicheBias::End)299 .expect("alt layout should always work");300 let alt_niche = alt_layout301 .largest_niche302 .expect("alt layout should have a niche like the regular one");303 let alt_head_space = alt_niche.offset.bytes();304 let alt_niche_len = alt_niche.value.size(dl).bytes();305 let alt_tail_space =306 alt_layout.size.bytes() - alt_head_space - alt_niche_len;307308 debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());309310 let prefer_alt_layout =311 alt_head_space > head_space && alt_head_space > tail_space;312313 debug!(314 "sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\315 layout: {}\n\316 alt_layout: {}\n",317 layout.size.bytes(),318 head_space,319 niche_len,320 tail_space,321 alt_head_space,322 alt_niche_len,323 alt_tail_space,324 layout.fields.count(),325 prefer_alt_layout,326 self.format_field_niches(layout, fields),327 self.format_field_niches(&alt_layout, fields),328 );329330 if prefer_alt_layout {331 return Ok(alt_layout);332 }333 }334 }335 }336 }337 layout338 }339340 pub fn layout_of_struct_or_enum<341 'a,342 FieldIdx: Idx,343 VariantIdx: Idx,344 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,345 >(346 &self,347 repr: &ReprOptions,348 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,349 is_enum: bool,350 is_special_no_niche: bool,351 discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),352 discriminants: impl Iterator<Item = (VariantIdx, i128)>,353 always_sized: bool,354 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {355 let (present_first, present_second) = {356 let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {357 if !repr.inhibit_enum_layout_opt() && absent(v) { None } else { Some(i) }358 });359 (present_variants.next(), present_variants.next())360 };361 let present_first = match present_first {362 Some(present_first) => present_first,363 // Uninhabited because it has no variants, or only absent ones.364 None if is_enum => {365 return Ok(LayoutData::never_type(&self.cx));366 }367 // If it's a struct, still compute a layout so that we can still compute the368 // field offsets.369 None => VariantIdx::new(0),370 };371372 // take the struct path if it is an actual struct373 if !is_enum ||374 // or for optimizing univariant enums375 (present_second.is_none() && !repr.inhibit_enum_layout_opt())376 {377 self.layout_of_struct(378 repr,379 variants,380 is_enum,381 is_special_no_niche,382 always_sized,383 present_first,384 )385 } else {386 // At this point, we have handled all unions and387 // structs. (We have also handled univariant enums388 // that allow representation optimization.)389 assert!(is_enum);390 self.layout_of_enum(repr, variants, discr_range_of_repr, discriminants)391 }392 }393394 pub fn layout_of_union<395 'a,396 FieldIdx: Idx,397 VariantIdx: Idx,398 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,399 >(400 &self,401 repr: &ReprOptions,402 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,403 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {404 let dl = self.cx.data_layout();405 let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };406 let mut max_repr_align = repr.align;407408 // If all the non-ZST fields have the same repr and union repr optimizations aren't409 // disabled, we can use that common repr for the union as a whole.410 struct AbiMismatch;411 let mut common_non_zst_repr_and_align = if repr.inhibits_union_abi_opt() {412 // Can't optimize413 Err(AbiMismatch)414 } else {415 Ok(None)416 };417418 let mut size = Size::ZERO;419 let only_variant_idx = VariantIdx::new(0);420 let only_variant = &variants[only_variant_idx];421 for field in only_variant {422 if field.is_unsized() {423 return Err(LayoutCalculatorError::UnexpectedUnsized(*field));424 }425426 align = align.max(field.align.abi);427 max_repr_align = max_repr_align.max(field.max_repr_align);428 size = cmp::max(size, field.size);429430 if field.is_zst() {431 // Nothing more to do for ZST fields432 continue;433 }434435 if let Ok(common) = common_non_zst_repr_and_align {436 // Discard valid range information and allow undef437 let field_abi = field.backend_repr.to_union();438439 if let Some((common_abi, common_align)) = common {440 if common_abi != field_abi {441 // Different fields have different ABI: disable opt442 common_non_zst_repr_and_align = Err(AbiMismatch);443 } else {444 // Fields with the same non-Aggregate ABI should also445 // have the same alignment446 if !matches!(common_abi, BackendRepr::Memory { .. }) {447 assert_eq!(448 common_align, field.align.abi,449 "non-Aggregate field with matching ABI but differing alignment"450 );451 }452 }453 } else {454 // First non-ZST field: record its ABI and alignment455 common_non_zst_repr_and_align = Ok(Some((field_abi, field.align.abi)));456 }457 }458 }459460 if let Some(pack) = repr.pack {461 align = align.min(pack);462 }463 // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).464 // See documentation on `LayoutData::unadjusted_abi_align`.465 let unadjusted_abi_align = align;466 if let Some(repr_align) = repr.align {467 align = align.max(repr_align);468 }469 // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.470 let align = align;471472 // If all non-ZST fields have the same ABI, we may forward that ABI473 // for the union as a whole, unless otherwise inhibited.474 let backend_repr = match common_non_zst_repr_and_align {475 Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },476 Ok(Some((repr, _))) => match repr {477 // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt478 BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _)479 if repr.scalar_align(dl).unwrap() != align =>480 {481 BackendRepr::Memory { sized: true }482 }483 // Vectors require at least element alignment, else disable the opt484 BackendRepr::SimdVector { element, count: _ } if element.align(dl).abi > align => {485 BackendRepr::Memory { sized: true }486 }487 // the alignment tests passed and we can use this488 BackendRepr::Scalar(..)489 | BackendRepr::ScalarPair(..)490 | BackendRepr::SimdVector { .. }491 | BackendRepr::SimdScalableVector { .. }492 | BackendRepr::Memory { .. } => repr,493 },494 };495496 let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else {497 return Err(LayoutCalculatorError::EmptyUnion);498 };499500 let combined_seed = only_variant501 .iter()502 .map(|v| v.randomization_seed)503 .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));504505 Ok(LayoutData {506 variants: Variants::Single { index: only_variant_idx },507 fields: FieldsShape::Union(union_field_count),508 backend_repr,509 largest_niche: None,510 uninhabited: false,511 align: AbiAlign::new(align),512 size: size.align_to(align),513 max_repr_align,514 unadjusted_abi_align,515 randomization_seed: combined_seed,516 })517 }518519 /// single-variant enums are just structs, if you think about it520 fn layout_of_struct<521 'a,522 FieldIdx: Idx,523 VariantIdx: Idx,524 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,525 >(526 &self,527 repr: &ReprOptions,528 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,529 is_enum: bool,530 is_special_no_niche: bool,531 always_sized: bool,532 present_first: VariantIdx,533 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {534 // Struct, or univariant enum equivalent to a struct.535 // (Typechecking will reject discriminant-sizing attrs.)536537 let dl = self.cx.data_layout();538 let v = present_first;539 let kind = if is_enum || variants[v].is_empty() || always_sized {540 StructKind::AlwaysSized541 } else {542 StructKind::MaybeUnsized543 };544545 let mut st = self.univariant(&variants[v], repr, kind)?;546 st.variants = Variants::Single { index: v };547548 if is_special_no_niche {549 let hide_niches = |scalar: &mut _| match scalar {550 Scalar::Initialized { value, valid_range } => {551 *valid_range = WrappingRange::full(value.size(dl))552 }553 // Already doesn't have any niches554 Scalar::Union { .. } => {}555 };556 match &mut st.backend_repr {557 BackendRepr::Scalar(scalar) => hide_niches(scalar),558 BackendRepr::ScalarPair(a, b) => {559 hide_niches(a);560 hide_niches(b);561 }562 BackendRepr::SimdVector { element, .. }563 | BackendRepr::SimdScalableVector { element, .. } => hide_niches(element),564 BackendRepr::Memory { sized: _ } => {}565 }566 st.largest_niche = None;567 return Ok(st);568 }569570 Ok(st)571 }572573 fn layout_of_enum<574 'a,575 FieldIdx: Idx,576 VariantIdx: Idx,577 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,578 >(579 &self,580 repr: &ReprOptions,581 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,582 discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),583 discriminants: impl Iterator<Item = (VariantIdx, i128)>,584 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {585 let dl = self.cx.data_layout();586 // bail if the enum has an incoherent repr that cannot be computed587 if repr.packed() {588 return Err(LayoutCalculatorError::ReprConflict);589 }590591 let calculate_niche_filling_layout = || -> Option<LayoutData<FieldIdx, VariantIdx>> {592 if repr.inhibit_enum_layout_opt() {593 return None;594 }595596 if variants.len() < 2 {597 return None;598 }599600 let mut align = dl.aggregate_align;601 let mut max_repr_align = repr.align;602 let mut unadjusted_abi_align = align;603604 let mut variant_layouts = variants605 .iter_enumerated()606 .map(|(j, v)| {607 let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?;608 st.variants = Variants::Single { index: j };609610 align = align.max(st.align.abi);611 max_repr_align = max_repr_align.max(st.max_repr_align);612 unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);613614 Some(st)615 })616 .collect::<Option<IndexVec<VariantIdx, _>>>()?;617618 let largest_variant_index = variant_layouts619 .iter_enumerated()620 .max_by_key(|(_i, layout)| layout.size.bytes())621 .map(|(i, _layout)| i)?;622623 let all_indices = variants.indices();624 let needs_disc =625 |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);626 let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()627 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();628629 let count =630 (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;631632 // Use the largest niche in the largest variant.633 let niche = variant_layouts[largest_variant_index].largest_niche?;634 let (niche_start, niche_scalar) = niche.reserve(dl, count)?;635 let niche_offset = niche.offset;636 let niche_size = niche.value.size(dl);637 let size = variant_layouts[largest_variant_index].size.align_to(align);638639 let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {640 if i == largest_variant_index {641 return true;642 }643644 layout.largest_niche = None;645646 if layout.size <= niche_offset {647 // This variant will fit before the niche.648 return true;649 }650651 // Determine if it'll fit after the niche.652 let this_align = layout.align.abi;653 let this_offset = (niche_offset + niche_size).align_to(this_align);654655 if this_offset + layout.size > size {656 return false;657 }658659 // It'll fit, but we need to make some adjustments.660 match layout.fields {661 FieldsShape::Arbitrary { ref mut offsets, .. } => {662 for offset in offsets.iter_mut() {663 *offset += this_offset;664 }665 }666 FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {667 panic!("Layout of fields should be Arbitrary for variants")668 }669 }670671 // It can't be a Scalar or ScalarPair because the offset isn't 0.672 if !layout.is_uninhabited() {673 layout.backend_repr = BackendRepr::Memory { sized: true };674 }675 layout.size += this_offset;676677 true678 });679680 if !all_variants_fit {681 return None;682 }683684 let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);685686 let others_zst = variant_layouts687 .iter_enumerated()688 .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);689 let same_size = size == variant_layouts[largest_variant_index].size;690 let same_align = align == variant_layouts[largest_variant_index].align.abi;691692 let uninhabited = variant_layouts.iter().all(|v| v.is_uninhabited());693 let abi = if same_size && same_align && others_zst {694 match variant_layouts[largest_variant_index].backend_repr {695 // When the total alignment and size match, we can use the696 // same ABI as the scalar variant with the reserved niche.697 BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),698 BackendRepr::ScalarPair(first, second) => {699 // Only the niche is guaranteed to be initialised,700 // so use union layouts for the other primitive.701 if niche_offset == Size::ZERO {702 BackendRepr::ScalarPair(niche_scalar, second.to_union())703 } else {704 BackendRepr::ScalarPair(first.to_union(), niche_scalar)705 }706 }707 _ => BackendRepr::Memory { sized: true },708 }709 } else {710 BackendRepr::Memory { sized: true }711 };712713 let combined_seed = variant_layouts714 .iter()715 .map(|v| v.randomization_seed)716 .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));717718 let layout = LayoutData {719 variants: Variants::Multiple {720 tag: niche_scalar,721 tag_encoding: TagEncoding::Niche {722 untagged_variant: largest_variant_index,723 niche_variants,724 niche_start,725 },726 tag_field: FieldIdx::new(0),727 variants: variant_layouts,728 },729 fields: FieldsShape::Arbitrary {730 offsets: [niche_offset].into(),731 in_memory_order: [FieldIdx::new(0)].into(),732 },733 backend_repr: abi,734 largest_niche,735 uninhabited,736 size,737 align: AbiAlign::new(align),738 max_repr_align,739 unadjusted_abi_align,740 randomization_seed: combined_seed,741 };742743 Some(layout)744 };745746 let niche_filling_layout = calculate_niche_filling_layout();747748 let discr_type = repr.discr_type();749 let discr_int = Integer::from_attr(dl, discr_type);750 // Because we can only represent one range of valid values, we'll look for the751 // largest range of invalid values and pick everything else as the range of valid752 // values.753754 // First we need to sort the possible discriminant values so that we can look for the largest gap:755 let valid_discriminants: BTreeSet<i128> = discriminants756 .filter(|&(i, _)| repr.c() || variants[i].iter().all(|f| !f.is_uninhabited()))757 .map(|(_, val)| {758 if discr_type.is_signed() {759 // sign extend the raw representation to be an i128760 // FIXME: do this at the discriminant iterator creation sites761 discr_int.size().sign_extend(val as u128)762 } else {763 val764 }765 })766 .collect();767 trace!(?valid_discriminants);768 let discriminants = valid_discriminants.iter().copied();769 //let next_discriminants = discriminants.clone().cycle().skip(1);770 let next_discriminants =771 discriminants.clone().chain(valid_discriminants.first().copied()).skip(1);772 // Iterate over pairs of each discriminant together with the next one.773 // Since they were sorted, we can now compute the niche sizes and pick the largest.774 let discriminants = discriminants.zip(next_discriminants);775 let largest_niche = discriminants.max_by_key(|&(start, end)| {776 trace!(?start, ?end);777 // If this is a wraparound range, the niche size is `MAX - abs(diff)`, as the diff between778 // the two end points is actually the size of the valid discriminants.779 let dist = if start > end {780 // Overflow can happen for 128 bit discriminants if `end` is negative.781 // But in that case casting to `u128` still gets us the right value,782 // as the distance must be positive if the lhs of the subtraction is larger than the rhs.783 let dist = start.wrapping_sub(end);784 if discr_type.is_signed() {785 discr_int.signed_max().wrapping_sub(dist) as u128786 } else {787 discr_int.size().unsigned_int_max() - dist as u128788 }789 } else {790 // Overflow can happen for 128 bit discriminants if `start` is negative.791 // But in that case casting to `u128` still gets us the right value,792 // as the distance must be positive if the lhs of the subtraction is larger than the rhs.793 end.wrapping_sub(start) as u128794 };795 trace!(?dist);796 dist797 });798 trace!(?largest_niche);799800 // `max` is the last valid discriminant before the largest niche801 // `min` is the first valid discriminant after the largest niche802 let (max, min) = largest_niche803 // We might have no inhabited variants, so pretend there's at least one.804 .unwrap_or((0, 0));805 let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::discr_range_of_repr(tcx, ty, &repr, min, max);806807 let mut align = dl.aggregate_align;808 let mut max_repr_align = repr.align;809 let mut unadjusted_abi_align = align;810811 let mut size = Size::ZERO;812813 // We're interested in the smallest alignment, so start large.814 let mut start_align = Align::from_bytes(256).unwrap();815 assert_eq!(Integer::for_align(dl, start_align), None);816817 // repr(C) on an enum tells us to make a (tag, union) layout,818 // so we need to grow the prefix alignment to be at least819 // the alignment of the union. (This value is used both for820 // determining the alignment of the overall enum, and the821 // determining the alignment of the payload after the tag.)822 let mut prefix_align = min_ity.align(dl).abi;823 if repr.c() {824 for fields in variants {825 for field in fields {826 prefix_align = prefix_align.max(field.align.abi);827 }828 }829 }830831 // Create the set of structs that represent each variant.832 let mut layout_variants = variants833 .iter_enumerated()834 .map(|(i, field_layouts)| {835 let mut st = self.univariant(836 field_layouts,837 repr,838 StructKind::Prefixed(min_ity.size(), prefix_align),839 )?;840 st.variants = Variants::Single { index: i };841 // Find the first field we can't move later842 // to make room for a larger discriminant.843 for field_idx in st.fields.index_by_increasing_offset() {844 let field = &field_layouts[FieldIdx::new(field_idx)];845 if !field.is_1zst() {846 start_align = start_align.min(field.align.abi);847 break;848 }849 }850 size = cmp::max(size, st.size);851 align = align.max(st.align.abi);852 max_repr_align = max_repr_align.max(st.max_repr_align);853 unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);854 Ok(st)855 })856 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;857858 // Align the maximum variant size to the largest alignment.859 size = size.align_to(align);860861 // FIXME(oli-obk): deduplicate and harden these checks862 if size.bytes() >= dl.obj_size_bound() {863 return Err(LayoutCalculatorError::SizeOverflow);864 }865866 let typeck_ity = Integer::from_attr(dl, repr.discr_type());867 if typeck_ity < min_ity {868 // It is a bug if Layout decided on a greater discriminant size than typeck for869 // some reason at this point (based on values discriminant can take on). Mostly870 // because this discriminant will be loaded, and then stored into variable of871 // type calculated by typeck. Consider such case (a bug): typeck decided on872 // byte-sized discriminant, but layout thinks we need a 16-bit to store all873 // discriminant values. That would be a bug, because then, in codegen, in order874 // to store this 16-bit discriminant into 8-bit sized temporary some of the875 // space necessary to represent would have to be discarded (or layout is wrong876 // on thinking it needs 16 bits)877 panic!(878 "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"879 );880 // However, it is fine to make discr type however large (as an optimisation)881 // after this point – we’ll just truncate the value we load in codegen.882 }883884 // Check to see if we should use a different type for the885 // discriminant. We can safely use a type with the same size886 // as the alignment of the first field of each variant.887 // We increase the size of the discriminant to avoid LLVM copying888 // padding when it doesn't need to. This normally causes unaligned889 // load/stores and excessive memcpy/memset operations. By using a890 // bigger integer size, LLVM can be sure about its contents and891 // won't be so conservative.892893 // Use the initial field alignment894 let mut ity = if repr.c() || repr.int.is_some() {895 min_ity896 } else {897 Integer::for_align(dl, start_align).unwrap_or(min_ity)898 };899900 // If the alignment is not larger than the chosen discriminant size,901 // don't use the alignment as the final size.902 if ity <= min_ity {903 ity = min_ity;904 } else {905 // Patch up the variants' first few fields.906 let old_ity_size = min_ity.size();907 let new_ity_size = ity.size();908 for variant in &mut layout_variants {909 match variant.fields {910 FieldsShape::Arbitrary { ref mut offsets, .. } => {911 for i in offsets {912 if *i <= old_ity_size {913 assert_eq!(*i, old_ity_size);914 *i = new_ity_size;915 }916 }917 // We might be making the struct larger.918 if variant.size <= old_ity_size {919 variant.size = new_ity_size;920 }921 }922 FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {923 panic!("encountered a non-arbitrary layout during enum layout")924 }925 }926 }927 }928929 let tag_mask = ity.size().unsigned_int_max();930 let tag = Scalar::Initialized {931 value: Primitive::Int(ity, signed),932 valid_range: WrappingRange {933 start: (min as u128 & tag_mask),934 end: (max as u128 & tag_mask),935 },936 };937 let mut abi = BackendRepr::Memory { sized: true };938939 let uninhabited = layout_variants.iter().all(|v| v.is_uninhabited());940 if tag.size(dl) == size {941 // Make sure we only use scalar layout when the enum is entirely its942 // own tag (i.e. it has no padding nor any non-ZST variant fields).943 abi = BackendRepr::Scalar(tag);944 } else {945 // Try to use a ScalarPair for all tagged enums.946 // That's possible only if we can find a common primitive type for all variants.947 let mut common_prim = None;948 let mut common_prim_initialized_in_all_variants = true;949 for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {950 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {951 panic!("encountered a non-arbitrary layout during enum layout");952 };953 // We skip *all* ZST here and later check if we are good in terms of alignment.954 // This lets us handle some cases involving aligned ZST.955 let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());956 let (field, offset) = match (fields.next(), fields.next()) {957 (None, None) => {958 common_prim_initialized_in_all_variants = false;959 continue;960 }961 (Some(pair), None) => pair,962 _ => {963 common_prim = None;964 break;965 }966 };967 let prim = match field.backend_repr {968 BackendRepr::Scalar(scalar) => {969 common_prim_initialized_in_all_variants &=970 matches!(scalar, Scalar::Initialized { .. });971 scalar.primitive()972 }973 _ => {974 common_prim = None;975 break;976 }977 };978 if let Some((old_prim, common_offset)) = common_prim {979 // All variants must be at the same offset980 if offset != common_offset {981 common_prim = None;982 break;983 }984 // This is pretty conservative. We could go fancier985 // by realising that (u8, u8) could just cohabit with986 // u16 or even u32.987 let new_prim = match (old_prim, prim) {988 // Allow all identical primitives.989 (x, y) if x == y => x,990 // Allow integers of the same size with differing signedness.991 // We arbitrarily choose the signedness of the first variant.992 (p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p,993 // Allow integers mixed with pointers of the same layout.994 // We must represent this using a pointer, to avoid995 // roundtripping pointers through ptrtoint/inttoptr.996 (p @ Primitive::Pointer(_), i @ Primitive::Int(..))997 | (i @ Primitive::Int(..), p @ Primitive::Pointer(_))998 if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) =>999 {1000 p1001 }1002 _ => {1003 common_prim = None;1004 break;1005 }1006 };1007 // We may be updating the primitive here, for example from int->ptr.1008 common_prim = Some((new_prim, common_offset));1009 } else {1010 common_prim = Some((prim, offset));1011 }1012 }1013 if let Some((prim, offset)) = common_prim {1014 let prim_scalar = if common_prim_initialized_in_all_variants {1015 let size = prim.size(dl);1016 assert!(size.bits() <= 128);1017 Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) }1018 } else {1019 // Common prim might be uninit.1020 Scalar::Union { value: prim }1021 };1022 let pair =1023 LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);1024 let pair_offsets = match pair.fields {1025 FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {1026 assert_eq!(in_memory_order.raw, [FieldIdx::new(0), FieldIdx::new(1)]);1027 offsets1028 }1029 _ => panic!("encountered a non-arbitrary layout during enum layout"),1030 };1031 if pair_offsets[FieldIdx::new(0)] == Size::ZERO1032 && pair_offsets[FieldIdx::new(1)] == *offset1033 && align == pair.align.abi1034 && size == pair.size1035 {1036 // We can use `ScalarPair` only when it matches our1037 // already computed layout (including `#[repr(C)]`).1038 abi = pair.backend_repr;1039 }1040 }1041 }10421043 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the1044 // variants to ensure they are consistent. This is because a downcast is1045 // semantically a NOP, and thus should not affect layout.1046 if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {1047 for variant in &mut layout_variants {1048 // We only do this for variants with fields; the others are not accessed anyway.1049 // Also do not overwrite any already existing "clever" ABIs.1050 if variant.fields.count() > 01051 && matches!(variant.backend_repr, BackendRepr::Memory { .. })1052 {1053 variant.backend_repr = abi;1054 // Also need to bump up the size and alignment, so that the entire value fits1055 // in here.1056 variant.size = cmp::max(variant.size, size);1057 variant.align.abi = cmp::max(variant.align.abi, align);1058 }1059 }1060 }10611062 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);10631064 let combined_seed = layout_variants1065 .iter()1066 .map(|v| v.randomization_seed)1067 .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));10681069 let tagged_layout = LayoutData {1070 variants: Variants::Multiple {1071 tag,1072 tag_encoding: TagEncoding::Direct,1073 tag_field: FieldIdx::new(0),1074 variants: layout_variants,1075 },1076 fields: FieldsShape::Arbitrary {1077 offsets: [Size::ZERO].into(),1078 in_memory_order: [FieldIdx::new(0)].into(),1079 },1080 largest_niche,1081 uninhabited,1082 backend_repr: abi,1083 align: AbiAlign::new(align),1084 size,1085 max_repr_align,1086 unadjusted_abi_align,1087 randomization_seed: combined_seed,1088 };10891090 let best_layout = match (tagged_layout, niche_filling_layout) {1091 (tl, Some(nl)) => {1092 // Pick the smaller layout; otherwise,1093 // pick the layout with the larger niche; otherwise,1094 // pick tagged as it has simpler codegen.1095 use cmp::Ordering::*;1096 let niche_size = |l: &LayoutData<FieldIdx, VariantIdx>| {1097 l.largest_niche.map_or(0, |n| n.available(dl))1098 };1099 match (tl.size.cmp(&nl.size), niche_size(&tl).cmp(&niche_size(&nl))) {1100 (Greater, _) => nl,1101 (Equal, Less) => nl,1102 _ => tl,1103 }1104 }1105 (tl, None) => tl,1106 };11071108 Ok(best_layout)1109 }11101111 fn univariant_biased<1112 'a,1113 FieldIdx: Idx,1114 VariantIdx: Idx,1115 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,1116 >(1117 &self,1118 fields: &IndexSlice<FieldIdx, F>,1119 repr: &ReprOptions,1120 kind: StructKind,1121 niche_bias: NicheBias,1122 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {1123 let dl = self.cx.data_layout();1124 let pack = repr.pack;1125 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };1126 let mut max_repr_align = repr.align;1127 let mut in_memory_order: IndexVec<u32, FieldIdx> = fields.indices().collect();1128 let optimize_field_order = !repr.inhibit_struct_field_reordering();1129 let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };1130 let optimizing = &mut in_memory_order.raw[..end];1131 let fields_excluding_tail = &fields.raw[..end];1132 // unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts.1133 let field_seed = fields_excluding_tail1134 .iter()1135 .fold(Hash64::ZERO, |acc, f| acc.wrapping_add(f.randomization_seed));11361137 if optimize_field_order && fields.len() > 1 {1138 // If `-Z randomize-layout` was enabled for the type definition we can shuffle1139 // the field ordering to try and catch some code making assumptions about layouts1140 // we don't guarantee.1141 if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {1142 #[cfg(feature = "randomize")]1143 {1144 use rand::SeedableRng;1145 use rand::seq::SliceRandom;1146 // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field1147 // ordering.1148 let mut rng = rand_xoshiro::Xoshiro128StarStar::seed_from_u64(1149 field_seed.wrapping_add(repr.field_shuffle_seed).as_u64(),1150 );11511152 // Shuffle the ordering of the fields.1153 optimizing.shuffle(&mut rng);1154 }1155 // Otherwise we just leave things alone and actually optimize the type's fields1156 } else {1157 // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must1158 // not depend on the layout of the tail.1159 let max_field_align =1160 fields_excluding_tail.iter().map(|f| f.align.bytes()).max().unwrap_or(1);1161 let largest_niche_size = fields_excluding_tail1162 .iter()1163 .filter_map(|f| f.largest_niche)1164 .map(|n| n.available(dl))1165 .max()1166 .unwrap_or(0);11671168 // Calculates a sort key to group fields by their alignment or possibly some1169 // size-derived pseudo-alignment.1170 let alignment_group_key = |layout: &F| {1171 // The two branches here return values that cannot be meaningfully compared with1172 // each other. However, we know that consistently for all executions of1173 // `alignment_group_key`, one or the other branch will be taken, so this is okay.1174 if let Some(pack) = pack {1175 // Return the packed alignment in bytes.1176 layout.align.abi.min(pack).bytes()1177 } else {1178 // Returns `log2(effective-align)`. The calculation assumes that size is an1179 // integer multiple of align, except for ZSTs.1180 let align = layout.align.bytes();1181 let size = layout.size.bytes();1182 let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);1183 // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.1184 let size_as_align = align.max(size).trailing_zeros();1185 let size_as_align = if largest_niche_size > 0 {1186 match niche_bias {1187 // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the1188 // array to the front in the first case (for aligned loads) but keep1189 // the bool in front in the second case for its niches.1190 NicheBias::Start => {1191 max_field_align.trailing_zeros().min(size_as_align)1192 }1193 // When moving niches towards the end of the struct then for1194 // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple1195 // in the align-1 group because its bool can be moved closer to the end.1196 NicheBias::End if niche_size == largest_niche_size => {1197 align.trailing_zeros()1198 }1199 NicheBias::End => size_as_align,1200 }1201 } else {1202 size_as_align1203 };1204 size_as_align as u641205 }1206 };12071208 match kind {1209 StructKind::AlwaysSized | StructKind::MaybeUnsized => {1210 // Currently `LayoutData` only exposes a single niche so sorting is usually1211 // sufficient to get one niche into the preferred position. If it ever1212 // supported multiple niches then a more advanced pick-and-pack approach could1213 // provide better results. But even for the single-niche cache it's not1214 // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the1215 // bool to the front but it would require packing the tuple together with the1216 // u16 to build a 4-byte group so that the u32 can be placed after it without1217 // padding. This kind of packing can't be achieved by sorting.1218 optimizing.sort_by_key(|&x| {1219 let f = &fields[x];1220 let field_size = f.size.bytes();1221 let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));1222 let niche_size_key = match niche_bias {1223 // large niche first1224 NicheBias::Start => !niche_size,1225 // large niche last1226 NicheBias::End => niche_size,1227 };1228 let inner_niche_offset_key = match niche_bias {1229 NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),1230 NicheBias::End => f.largest_niche.map_or(0, |n| {1231 !(field_size - n.value.size(dl).bytes() - n.offset.bytes())1232 }),1233 };12341235 (1236 // Then place largest alignments first.1237 cmp::Reverse(alignment_group_key(f)),1238 // Then prioritize niche placement within alignment group according to1239 // `niche_bias_start`.1240 niche_size_key,1241 // Then among fields with equally-sized niches prefer the ones1242 // closer to the start/end of the field.1243 inner_niche_offset_key,1244 )1245 });1246 }12471248 StructKind::Prefixed(..) => {1249 // Sort in ascending alignment so that the layout stays optimal1250 // regardless of the prefix.1251 // And put the largest niche in an alignment group at the end1252 // so it can be used as discriminant in jagged enums1253 optimizing.sort_by_key(|&x| {1254 let f = &fields[x];1255 let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));1256 (alignment_group_key(f), niche_size)1257 });1258 }1259 }12601261 // FIXME(Kixiron): We can always shuffle fields within a given alignment class1262 // regardless of the status of `-Z randomize-layout`1263 }1264 }1265 // in_memory_order holds field indices by increasing memory offset.1266 // That is, if field 5 has offset 0, the first element of in_memory_order is 5.1267 // We now write field offsets to the corresponding offset slot;1268 // field 5 with offset 0 puts 0 in offsets[5].1269 let mut unsized_field = None::<&F>;1270 let mut offsets = IndexVec::from_elem(Size::ZERO, fields);1271 let mut offset = Size::ZERO;1272 let mut largest_niche = None;1273 let mut largest_niche_available = 0;1274 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {1275 let prefix_align =1276 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };1277 align = align.max(prefix_align);1278 offset = prefix_size.align_to(prefix_align);1279 }1280 for &i in &in_memory_order {1281 let field = &fields[i];1282 if let Some(unsized_field) = unsized_field {1283 return Err(LayoutCalculatorError::UnexpectedUnsized(*unsized_field));1284 }12851286 if field.is_unsized() {1287 if let StructKind::MaybeUnsized = kind {1288 unsized_field = Some(field);1289 } else {1290 return Err(LayoutCalculatorError::UnexpectedUnsized(*field));1291 }1292 }12931294 // Invariant: offset < dl.obj_size_bound() <= 1<<611295 let field_align = if let Some(pack) = pack {1296 field.align.min(AbiAlign::new(pack))1297 } else {1298 field.align1299 };1300 offset = offset.align_to(field_align.abi);1301 align = align.max(field_align.abi);1302 max_repr_align = max_repr_align.max(field.max_repr_align);13031304 debug!("univariant offset: {:?} field: {:#?}", offset, field);1305 offsets[i] = offset;13061307 if let Some(mut niche) = field.largest_niche {1308 let available = niche.available(dl);1309 // Pick up larger niches.1310 let prefer_new_niche = match niche_bias {1311 NicheBias::Start => available > largest_niche_available,1312 // if there are several niches of the same size then pick the last one1313 NicheBias::End => available >= largest_niche_available,1314 };1315 if prefer_new_niche {1316 largest_niche_available = available;1317 niche.offset += offset;1318 largest_niche = Some(niche);1319 }1320 }13211322 offset =1323 offset.checked_add(field.size, dl).ok_or(LayoutCalculatorError::SizeOverflow)?;1324 }13251326 // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).1327 // See documentation on `LayoutData::unadjusted_abi_align`.1328 let unadjusted_abi_align = align;1329 if let Some(repr_align) = repr.align {1330 align = align.max(repr_align);1331 }1332 // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.1333 let align = align;13341335 debug!("univariant min_size: {:?}", offset);1336 let min_size = offset;1337 let size = min_size.align_to(align);1338 // FIXME(oli-obk): deduplicate and harden these checks1339 if size.bytes() >= dl.obj_size_bound() {1340 return Err(LayoutCalculatorError::SizeOverflow);1341 }1342 let mut layout_of_single_non_zst_field = None;1343 let sized = unsized_field.is_none();1344 let mut abi = BackendRepr::Memory { sized };13451346 let optimize_abi = !repr.inhibit_newtype_abi_optimization();13471348 // Try to make this a Scalar/ScalarPair.1349 if sized && size.bytes() > 0 {1350 // We skip *all* ZST here and later check if we are good in terms of alignment.1351 // This lets us handle some cases involving aligned ZST.1352 let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());13531354 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {1355 // We have exactly one non-ZST field.1356 (Some((i, field)), None, None) => {1357 layout_of_single_non_zst_field = Some(field);13581359 // Field fills the struct and it has a scalar or scalar pair ABI.1360 if offsets[i].bytes() == 0 && align == field.align.abi && size == field.size {1361 match field.backend_repr {1362 // For plain scalars, or vectors of them, we can't unpack1363 // newtypes for `#[repr(C)]`, as that affects C ABIs.1364 BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. }1365 if optimize_abi =>1366 {1367 abi = field.backend_repr;1368 }1369 // But scalar pairs are Rust-specific and get1370 // treated as aggregates by C ABIs anyway.1371 BackendRepr::ScalarPair(..) => {1372 abi = field.backend_repr;1373 }1374 _ => {}1375 }1376 }1377 }13781379 // Two non-ZST fields, and they're both scalars.1380 (Some((i, a)), Some((j, b)), None) => {1381 match (a.backend_repr, b.backend_repr) {1382 (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {1383 // Order by the memory placement, not source order.1384 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {1385 ((i, a), (j, b))1386 } else {1387 ((j, b), (i, a))1388 };1389 let pair =1390 LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);1391 let pair_offsets = match pair.fields {1392 FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {1393 assert_eq!(1394 in_memory_order.raw,1395 [FieldIdx::new(0), FieldIdx::new(1)]1396 );1397 offsets1398 }1399 FieldsShape::Primitive1400 | FieldsShape::Array { .. }1401 | FieldsShape::Union(..) => {1402 panic!("encountered a non-arbitrary layout during enum layout")1403 }1404 };1405 if offsets[i] == pair_offsets[FieldIdx::new(0)]1406 && offsets[j] == pair_offsets[FieldIdx::new(1)]1407 && align == pair.align.abi1408 && size == pair.size1409 {1410 // We can use `ScalarPair` only when it matches our1411 // already computed layout (including `#[repr(C)]`).1412 abi = pair.backend_repr;1413 }1414 }1415 _ => {}1416 }1417 }14181419 _ => {}1420 }1421 }1422 let uninhabited = fields.iter().any(|f| f.is_uninhabited());14231424 let unadjusted_abi_align = if repr.transparent() {1425 match layout_of_single_non_zst_field {1426 Some(l) => l.unadjusted_abi_align,1427 None => {1428 // `repr(transparent)` with all ZST fields.1429 align1430 }1431 }1432 } else {1433 unadjusted_abi_align1434 };14351436 let seed = field_seed.wrapping_add(repr.field_shuffle_seed);14371438 Ok(LayoutData {1439 variants: Variants::Single { index: VariantIdx::new(0) },1440 fields: FieldsShape::Arbitrary { offsets, in_memory_order },1441 backend_repr: abi,1442 largest_niche,1443 uninhabited,1444 align: AbiAlign::new(align),1445 size,1446 max_repr_align,1447 unadjusted_abi_align,1448 randomization_seed: seed,1449 })1450 }14511452 fn format_field_niches<1453 'a,1454 FieldIdx: Idx,1455 VariantIdx: Idx,1456 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,1457 >(1458 &self,1459 layout: &LayoutData<FieldIdx, VariantIdx>,1460 fields: &IndexSlice<FieldIdx, F>,1461 ) -> String {1462 let dl = self.cx.data_layout();1463 let mut s = String::new();1464 for i in layout.fields.index_by_increasing_offset() {1465 let offset = layout.fields.offset(i);1466 let f = &fields[FieldIdx::new(i)];1467 write!(s, "[o{}a{}s{}", offset.bytes(), f.align.bytes(), f.size.bytes()).unwrap();1468 if let Some(n) = f.largest_niche {1469 write!(1470 s,1471 " n{}b{}s{}",1472 n.offset.bytes(),1473 n.available(dl).ilog2(),1474 n.value.size(dl).bytes()1475 )1476 .unwrap();1477 }1478 write!(s, "] ").unwrap();1479 }1480 s1481 }1482}14831484enum SimdVectorKind {1485 /// `#[rustc_scalable_vector]`1486 Scalable(NumScalableVectors),1487 /// `#[repr(simd, packed)]`1488 PackedFixed,1489 /// `#[repr(simd)]`1490 Fixed,1491}14921493fn vector_type_layout<FieldIdx, VariantIdx, F>(1494 kind: SimdVectorKind,1495 dl: &TargetDataLayout,1496 element: F,1497 count: u64,1498) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>1499where1500 FieldIdx: Idx,1501 VariantIdx: Idx,1502 F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,1503{1504 let elt = element.as_ref();1505 if count == 0 {1506 return Err(LayoutCalculatorError::ZeroLengthSimdType);1507 } else if count > crate::MAX_SIMD_LANES {1508 return Err(LayoutCalculatorError::OversizedSimdType { max_lanes: crate::MAX_SIMD_LANES });1509 }15101511 let BackendRepr::Scalar(element) = elt.backend_repr else {1512 return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));1513 };15141515 // Compute the size and alignment of the vector1516 let size =1517 elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;1518 let (repr, align) = match kind {1519 SimdVectorKind::Scalable(number_of_vectors) => (1520 BackendRepr::SimdScalableVector { element, count, number_of_vectors },1521 dl.llvmlike_vector_align(size),1522 ),1523 // Non-power-of-two vectors have padding up to the next power-of-two.1524 // If we're a packed repr, remove the padding while keeping the alignment as close1525 // to a vector as possible.1526 SimdVectorKind::PackedFixed if !count.is_power_of_two() => {1527 (BackendRepr::Memory { sized: true }, Align::max_aligned_factor(size))1528 }1529 SimdVectorKind::PackedFixed | SimdVectorKind::Fixed => {1530 (BackendRepr::SimdVector { element, count }, dl.llvmlike_vector_align(size))1531 }1532 };1533 let size = size.align_to(align);15341535 Ok(LayoutData {1536 variants: Variants::Single { index: VariantIdx::new(0) },1537 fields: FieldsShape::Arbitrary {1538 offsets: [Size::ZERO].into(),1539 in_memory_order: [FieldIdx::new(0)].into(),1540 },1541 backend_repr: repr,1542 largest_niche: elt.largest_niche,1543 uninhabited: false,1544 size,1545 align: AbiAlign::new(align),1546 max_repr_align: None,1547 unadjusted_abi_align: elt.align.abi,1548 randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),1549 })1550}