compiler/rustc_abi/src/lib.rs RUST 2,322 lines View on github.com → Search inside
File is large — showing lines 1–2,000 of 2,322.
1// tidy-alphabetical-start2#![cfg_attr(feature = "nightly", allow(internal_features))]3#![cfg_attr(feature = "nightly", feature(rustc_attrs))]4#![cfg_attr(feature = "nightly", feature(step_trait))]5// tidy-alphabetical-end67/*! ABI handling for rustc89## What is an "ABI"?1011Literally, "application binary interface", which means it is everything about how code interacts,12at the machine level, with other code. This means it technically covers all of the following:13- object binary format for e.g. relocations or offset tables14- in-memory layout of types15- procedure calling conventions1617When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.18To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.19Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.20You will encounter all of them and more if you study target-specific codegen enough!21Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to22either or both of23- `repr(Rust)` types have a mostly-unspecified layout24- `extern "Rust" fn(A) -> R` has an unspecified calling convention2526## Crate Goal2728ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.29It cannot carry all details relevant to an ABI: those permeate code generation and linkage.30Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.31It should contain traits and types that other crates then use in their implementation.32For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`33but `rustc_abi` contains the types for calculating layout and describing register-passing.34This makes it easier to describe things in the same way across targets, codegen backends, and35even other Rust compilers, such as rust-analyzer!3637*/3839use std::fmt;40#[cfg(feature = "nightly")]41use std::iter::Step;42use std::num::{NonZeroUsize, ParseIntError};43use std::ops::{Add, AddAssign, Deref, Mul, RangeFull, RangeInclusive, Sub};44use std::str::FromStr;4546use bitflags::bitflags;47#[cfg(feature = "nightly")]48use rustc_data_structures::stable_hasher::StableOrd;49#[cfg(feature = "nightly")]50use rustc_error_messages::{DiagArgValue, IntoDiagArg};51#[cfg(feature = "nightly")]52use rustc_errors::{Diag, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level, msg};53use rustc_hashes::Hash64;54use rustc_index::{Idx, IndexSlice, IndexVec};55#[cfg(feature = "nightly")]56use rustc_macros::{Decodable_NoContext, Encodable_NoContext, StableHash};57#[cfg(feature = "nightly")]58use rustc_span::{Symbol, sym};5960mod callconv;61mod canon_abi;62mod extern_abi;63mod layout;64#[cfg(test)]65mod tests;6667pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};68pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};69#[cfg(feature = "nightly")]70pub use extern_abi::CVariadicStatus;71pub use extern_abi::{ExternAbi, all_names};72pub use layout::{FIRST_VARIANT, FieldIdx, LayoutCalculator, LayoutCalculatorError, VariantIdx};73#[cfg(feature = "nightly")]74pub use layout::{Layout, TyAbiInterface, TyAndLayout};7576#[derive(Clone, Copy, PartialEq, Eq, Default)]77#[cfg_attr(feature = "nightly", derive(Encodable_NoContext, Decodable_NoContext, StableHash))]78pub struct ReprFlags(u8);7980bitflags! {81    impl ReprFlags: u8 {82        const IS_C               = 1 << 0;83        const IS_SIMD            = 1 << 1;84        const IS_TRANSPARENT     = 1 << 2;85        /// Internal only for now. If true, don't reorder fields.86        /// On its own it does not prevent ABI optimizations.87        const IS_LINEAR          = 1 << 3;88        /// If true, the type's crate has opted into layout randomization.89        /// Other flags can still inhibit reordering and thus randomization.90        /// The seed stored in `ReprOptions.field_shuffle_seed`.91        const RANDOMIZE_LAYOUT   = 1 << 4;92        /// If true, the type is always passed indirectly by non-Rustic ABIs.93        /// See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details.94        const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS = 1 << 5;95        const IS_SCALABLE        = 1 << 6;96         // Any of these flags being set prevent field reordering optimisation.97        const FIELD_ORDER_UNOPTIMIZABLE = ReprFlags::IS_C.bits()98                                 | ReprFlags::IS_SIMD.bits()99                                 | ReprFlags::IS_SCALABLE.bits()100                                 | ReprFlags::IS_LINEAR.bits();101        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();102    }103}104105// This is the same as `rustc_data_structures::external_bitflags_debug` but without the106// `rustc_data_structures` to make it build on stable.107impl std::fmt::Debug for ReprFlags {108    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {109        bitflags::parser::to_writer(self, f)110    }111}112113#[derive(Copy, Clone, Debug, Eq, PartialEq)]114#[cfg_attr(feature = "nightly", derive(Encodable_NoContext, Decodable_NoContext, StableHash))]115pub enum IntegerType {116    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.117    /// `Pointer(true)` means `isize`.118    Pointer(bool),119    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.120    /// `Fixed(I8, false)` means `u8`.121    Fixed(Integer, bool),122}123124impl IntegerType {125    pub fn is_signed(&self) -> bool {126        match self {127            IntegerType::Pointer(b) => *b,128            IntegerType::Fixed(_, b) => *b,129        }130    }131}132133#[derive(Copy, Clone, Debug, Eq, PartialEq)]134#[cfg_attr(feature = "nightly", derive(Encodable_NoContext, Decodable_NoContext, StableHash))]135pub enum ScalableElt {136    /// `N` in `rustc_scalable_vector(N)` - the element count of the scalable vector137    ElementCount(u16),138    /// `rustc_scalable_vector` w/out `N`, used for tuple types of scalable vectors that only139    /// contain other scalable vectors140    Container,141}142143/// Represents the repr options provided by the user.144#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]145#[cfg_attr(feature = "nightly", derive(Encodable_NoContext, Decodable_NoContext, StableHash))]146pub struct ReprOptions {147    pub int: Option<IntegerType>,148    pub align: Option<Align>,149    pub pack: Option<Align>,150    pub flags: ReprFlags,151    /// `#[rustc_scalable_vector]`152    pub scalable: Option<ScalableElt>,153    /// The seed to be used for randomizing a type's layout154    ///155    /// Note: This could technically be a `u128` which would156    /// be the "most accurate" hash as it'd encompass the item and crate157    /// hash without loss, but it does pay the price of being larger.158    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our159    /// purposes (primarily `-Z randomize-layout`)160    pub field_shuffle_seed: Hash64,161}162163impl ReprOptions {164    #[inline]165    pub fn simd(&self) -> bool {166        self.flags.contains(ReprFlags::IS_SIMD)167    }168169    #[inline]170    pub fn scalable(&self) -> bool {171        self.flags.contains(ReprFlags::IS_SCALABLE)172    }173174    #[inline]175    pub fn c(&self) -> bool {176        self.flags.contains(ReprFlags::IS_C)177    }178179    #[inline]180    pub fn packed(&self) -> bool {181        self.pack.is_some()182    }183184    #[inline]185    pub fn transparent(&self) -> bool {186        self.flags.contains(ReprFlags::IS_TRANSPARENT)187    }188189    #[inline]190    pub fn linear(&self) -> bool {191        self.flags.contains(ReprFlags::IS_LINEAR)192    }193194    /// Returns the discriminant type, given these `repr` options.195    /// This must only be called on enums!196    ///197    /// This is the "typeck type" of the discriminant, which is effectively the maximum size:198    /// discriminant values will be wrapped to fit (with a lint). Layout can later decide to use a199    /// smaller type for the tag that stores the discriminant at runtime and that will work just200    /// fine, it just induces casts when getting/setting the discriminant.201    pub fn discr_type(&self) -> IntegerType {202        self.int.unwrap_or(IntegerType::Pointer(true))203    }204205    /// Returns `true` if this `#[repr()]` should inhabit "smart enum206    /// layout" optimizations, such as representing `Foo<&T>` as a207    /// single pointer.208    pub fn inhibit_enum_layout_opt(&self) -> bool {209        self.c() || self.int.is_some()210    }211212    pub fn inhibit_newtype_abi_optimization(&self) -> bool {213        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)214    }215216    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,217    /// e.g. `repr(C)` or `repr(<int>)`.218    pub fn inhibit_struct_field_reordering(&self) -> bool {219        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()220    }221222    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`223    /// was enabled for its declaration crate.224    pub fn can_randomize_type_layout(&self) -> bool {225        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)226    }227228    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.229    pub fn inhibits_union_abi_opt(&self) -> bool {230        self.c()231    }232}233234/// The maximum supported number of lanes in a SIMD vector.235///236/// This value is selected based on backend support:237/// * LLVM does not appear to have a vector width limit.238/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.239pub const MAX_SIMD_LANES: u64 = 1 << 0xF;240241/// How pointers are represented in a given address space242#[derive(Copy, Clone, Debug, PartialEq, Eq)]243pub struct PointerSpec {244    /// The size of the bitwise representation of the pointer.245    pointer_size: Size,246    /// The alignment of pointers for this address space247    pointer_align: Align,248    /// The size of the value a pointer can be offset by in this address space.249    pointer_offset: Size,250    /// Pointers into this address space contain extra metadata251    /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler?252    _is_fat: bool,253}254255/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)256/// for a target, which contains everything needed to compute layouts.257#[derive(Debug, PartialEq, Eq)]258pub struct TargetDataLayout {259    pub endian: Endian,260    pub i1_align: Align,261    pub i8_align: Align,262    pub i16_align: Align,263    pub i32_align: Align,264    pub i64_align: Align,265    pub i128_align: Align,266    pub f16_align: Align,267    pub f32_align: Align,268    pub f64_align: Align,269    pub f128_align: Align,270    pub aggregate_align: Align,271272    /// Alignments for vector types.273    pub vector_align: Vec<(Size, Align)>,274275    pub default_address_space: AddressSpace,276    pub default_address_space_pointer_spec: PointerSpec,277278    /// Address space information of all known address spaces.279    ///280    /// # Note281    ///282    /// This vector does not contain the [`PointerSpec`] relative to the default address space,283    /// which instead lives in [`Self::default_address_space_pointer_spec`].284    address_space_info: Vec<(AddressSpace, PointerSpec)>,285286    pub instruction_address_space: AddressSpace,287288    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)289    /// Note: This isn't in LLVM's data layout string, it is `short_enum`290    /// so the only valid spec for LLVM is c_int::BITS or 8291    pub c_enum_min_size: Integer,292}293294impl Default for TargetDataLayout {295    /// Creates an instance of `TargetDataLayout`.296    fn default() -> TargetDataLayout {297        let align = |bits| Align::from_bits(bits).unwrap();298        TargetDataLayout {299            endian: Endian::Big,300            i1_align: align(8),301            i8_align: align(8),302            i16_align: align(16),303            i32_align: align(32),304            i64_align: align(32),305            i128_align: align(32),306            f16_align: align(16),307            f32_align: align(32),308            f64_align: align(64),309            f128_align: align(128),310            aggregate_align: align(8),311            vector_align: vec![312                (Size::from_bits(64), align(64)),313                (Size::from_bits(128), align(128)),314            ],315            default_address_space: AddressSpace::ZERO,316            default_address_space_pointer_spec: PointerSpec {317                pointer_size: Size::from_bits(64),318                pointer_align: align(64),319                pointer_offset: Size::from_bits(64),320                _is_fat: false,321            },322            address_space_info: vec![],323            instruction_address_space: AddressSpace::ZERO,324            c_enum_min_size: Integer::I32,325        }326    }327}328329pub enum TargetDataLayoutError<'a> {330    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },331    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },332    MissingAlignment { cause: &'a str },333    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },334    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },335    InconsistentTargetPointerWidth { pointer_size: u64, target: u16 },336    InvalidBitsSize { err: String },337    UnknownPointerSpecification { err: String },338}339340#[cfg(feature = "nightly")]341impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetDataLayoutError<'_> {342    fn into_diag(self, dcx: DiagCtxtHandle<'_>, level: Level) -> Diag<'_, G> {343        match self {344            TargetDataLayoutError::InvalidAddressSpace { addr_space, err, cause } => {345                Diag::new(dcx, level, msg!("invalid address space `{$addr_space}` for `{$cause}` in \"data-layout\": {$err}"))346                    .with_arg("addr_space", addr_space)347                    .with_arg("cause", cause)348                    .with_arg("err", err)349            }350            TargetDataLayoutError::InvalidBits { kind, bit, cause, err } => {351                Diag::new(dcx, level, msg!("invalid {$kind} `{$bit}` for `{$cause}` in \"data-layout\": {$err}"))352                    .with_arg("kind", kind)353                    .with_arg("bit", bit)354                    .with_arg("cause", cause)355                    .with_arg("err", err)356            }357            TargetDataLayoutError::MissingAlignment { cause } => {358                Diag::new(dcx, level, msg!("missing alignment for `{$cause}` in \"data-layout\""))359                    .with_arg("cause", cause)360            }361            TargetDataLayoutError::InvalidAlignment { cause, err } => {362                Diag::new(dcx, level, msg!("invalid alignment for `{$cause}` in \"data-layout\": {$err}"))363                    .with_arg("cause", cause)364                    .with_arg("err", err.to_string())365            }366            TargetDataLayoutError::InconsistentTargetArchitecture { dl, target } => {367                Diag::new(dcx, level, msg!("inconsistent target specification: \"data-layout\" claims architecture is {$dl}-endian, while \"target-endian\" is `{$target}`"))368                    .with_arg("dl", dl).with_arg("target", target)369            }370            TargetDataLayoutError::InconsistentTargetPointerWidth { pointer_size, target } => {371                Diag::new(dcx, level, msg!("inconsistent target specification: \"data-layout\" claims pointers are {$pointer_size}-bit, while \"target-pointer-width\" is `{$target}`"))372                    .with_arg("pointer_size", pointer_size).with_arg("target", target)373            }374            TargetDataLayoutError::InvalidBitsSize { err } => {375                Diag::new(dcx, level, msg!("{$err}")).with_arg("err", err)376            }377            TargetDataLayoutError::UnknownPointerSpecification { err } => {378                Diag::new(dcx, level, msg!("unknown pointer specification `{$err}` in datalayout string"))379                    .with_arg("err", err)380            }381        }382    }383}384385impl TargetDataLayout {386    /// Parse data layout from an387    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)388    ///389    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be390    /// determined from llvm string.391    pub fn parse_from_llvm_datalayout_string<'a>(392        input: &'a str,393        default_address_space: AddressSpace,394    ) -> Result<TargetDataLayout, TargetDataLayoutError<'a>> {395        // Parse an address space index from a string.396        let parse_address_space = |s: &'a str, cause: &'a str| {397            s.parse::<u32>().map(AddressSpace).map_err(|err| {398                TargetDataLayoutError::InvalidAddressSpace { addr_space: s, cause, err }399            })400        };401402        // Parse a bit count from a string.403        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {404            s.parse::<u64>().map_err(|err| TargetDataLayoutError::InvalidBits {405                kind,406                bit: s,407                cause,408                err,409            })410        };411412        // Parse a size string.413        let parse_size =414            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);415416        // Parse an alignment string.417        let parse_align_str = |s: &'a str, cause: &'a str| {418            let align_from_bits = |bits| {419                Align::from_bits(bits)420                    .map_err(|err| TargetDataLayoutError::InvalidAlignment { cause, err })421            };422            let abi = parse_bits(s, "alignment", cause)?;423            Ok(align_from_bits(abi)?)424        };425426        // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,427        // ignoring the secondary alignment specifications.428        let parse_align_seq = |s: &[&'a str], cause: &'a str| {429            if s.is_empty() {430                return Err(TargetDataLayoutError::MissingAlignment { cause });431            }432            parse_align_str(s[0], cause)433        };434435        let mut dl = TargetDataLayout::default();436        dl.default_address_space = default_address_space;437438        let mut i128_align_src = 64;439        for spec in input.split('-') {440            let spec_parts = spec.split(':').collect::<Vec<_>>();441442            match &*spec_parts {443                ["e"] => dl.endian = Endian::Little,444                ["E"] => dl.endian = Endian::Big,445                [p] if p.starts_with('P') => {446                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?447                }448                ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,449                ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,450                ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,451                ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,452                ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,453                [p, s, a @ ..] if p.starts_with("p") => {454                    let mut p = p.strip_prefix('p').unwrap();455                    let mut _is_fat = false;456457                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that458                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.459460                    if p.starts_with('f') {461                        p = p.strip_prefix('f').unwrap();462                        _is_fat = true;463                    }464465                    // However, we currently don't take into account further specifications:466                    // an error is emitted instead.467                    if p.starts_with(char::is_alphabetic) {468                        return Err(TargetDataLayoutError::UnknownPointerSpecification {469                            err: p.to_string(),470                        });471                    }472473                    let addr_space = if !p.is_empty() {474                        parse_address_space(p, "p-")?475                    } else {476                        AddressSpace::ZERO477                    };478479                    let pointer_size = parse_size(s, "p-")?;480                    let pointer_align = parse_align_seq(a, "p-")?;481                    let info = PointerSpec {482                        pointer_offset: pointer_size,483                        pointer_size,484                        pointer_align,485                        _is_fat,486                    };487                    if addr_space == default_address_space {488                        dl.default_address_space_pointer_spec = info;489                    } else {490                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {491                            Some(e) => e.1 = info,492                            None => {493                                dl.address_space_info.push((addr_space, info));494                            }495                        }496                    }497                }498                [p, s, a, _pr, i] if p.starts_with("p") => {499                    let mut p = p.strip_prefix('p').unwrap();500                    let mut _is_fat = false;501502                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that503                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.504505                    if p.starts_with('f') {506                        p = p.strip_prefix('f').unwrap();507                        _is_fat = true;508                    }509510                    // However, we currently don't take into account further specifications:511                    // an error is emitted instead.512                    if p.starts_with(char::is_alphabetic) {513                        return Err(TargetDataLayoutError::UnknownPointerSpecification {514                            err: p.to_string(),515                        });516                    }517518                    let addr_space = if !p.is_empty() {519                        parse_address_space(p, "p")?520                    } else {521                        AddressSpace::ZERO522                    };523524                    let info = PointerSpec {525                        pointer_size: parse_size(s, "p-")?,526                        pointer_align: parse_align_str(a, "p-")?,527                        pointer_offset: parse_size(i, "p-")?,528                        _is_fat,529                    };530531                    if addr_space == default_address_space {532                        dl.default_address_space_pointer_spec = info;533                    } else {534                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {535                            Some(e) => e.1 = info,536                            None => {537                                dl.address_space_info.push((addr_space, info));538                            }539                        }540                    }541                }542543                [s, a @ ..] if s.starts_with('i') => {544                    let Ok(bits) = s[1..].parse::<u64>() else {545                        parse_size(&s[1..], "i")?; // For the user error.546                        continue;547                    };548                    let a = parse_align_seq(a, s)?;549                    match bits {550                        1 => dl.i1_align = a,551                        8 => dl.i8_align = a,552                        16 => dl.i16_align = a,553                        32 => dl.i32_align = a,554                        64 => dl.i64_align = a,555                        _ => {}556                    }557                    if bits >= i128_align_src && bits <= 128 {558                        // Default alignment for i128 is decided by taking the alignment of559                        // largest-sized i{64..=128}.560                        i128_align_src = bits;561                        dl.i128_align = a;562                    }563                }564                [s, a @ ..] if s.starts_with('v') => {565                    let v_size = parse_size(&s[1..], "v")?;566                    let a = parse_align_seq(a, s)?;567                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {568                        v.1 = a;569                        continue;570                    }571                    // No existing entry, add a new one.572                    dl.vector_align.push((v_size, a));573                }574                _ => {} // Ignore everything else.575            }576        }577578        // Inherit, if not given, address space information for specific LLVM elements from the579        // default data address space.580        if (dl.instruction_address_space != dl.default_address_space)581            && dl582                .address_space_info583                .iter()584                .find(|(a, _)| *a == dl.instruction_address_space)585                .is_none()586        {587            dl.address_space_info.push((588                dl.instruction_address_space,589                dl.default_address_space_pointer_spec.clone(),590            ));591        }592593        Ok(dl)594    }595596    /// Returns **exclusive** upper bound on object size in bytes, in the default data address597    /// space.598    ///599    /// The theoretical maximum object size is defined as the maximum positive `isize` value.600    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly601    /// index every address within an object along with one byte past the end, along with allowing602    /// `isize` to store the difference between any two pointers into an object.603    ///604    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,605    /// so we adopt such a more-constrained size bound due to its technical limitations.606    #[inline]607    pub fn obj_size_bound(&self) -> u64 {608        match self.pointer_size().bits() {609            16 => 1 << 15,610            32 => 1 << 31,611            64 => 1 << 61,612            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),613        }614    }615616    /// Returns **exclusive** upper bound on object size in bytes.617    ///618    /// The theoretical maximum object size is defined as the maximum positive `isize` value.619    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly620    /// index every address within an object along with one byte past the end, along with allowing621    /// `isize` to store the difference between any two pointers into an object.622    ///623    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,624    /// so we adopt such a more-constrained size bound due to its technical limitations.625    #[inline]626    pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {627        match self.pointer_size_in(address_space).bits() {628            16 => 1 << 15,629            32 => 1 << 31,630            64 => 1 << 61,631            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),632        }633    }634635    #[inline]636    pub fn ptr_sized_integer(&self) -> Integer {637        use Integer::*;638        match self.pointer_offset().bits() {639            16 => I16,640            32 => I32,641            64 => I64,642            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),643        }644    }645646    #[inline]647    pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {648        use Integer::*;649        match self.pointer_offset_in(address_space).bits() {650            16 => I16,651            32 => I32,652            64 => I64,653            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),654        }655    }656657    /// psABI-mandated alignment for a vector type, if any658    #[inline]659    fn cabi_vector_align(&self, vec_size: Size) -> Option<Align> {660        self.vector_align661            .iter()662            .find(|(size, _align)| *size == vec_size)663            .map(|(_size, align)| *align)664    }665666    /// an alignment resembling the one LLVM would pick for a vector667    #[inline]668    pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align {669        self.cabi_vector_align(vec_size)670            .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())671    }672673    /// Get the pointer size in the default data address space.674    #[inline]675    pub fn pointer_size(&self) -> Size {676        self.default_address_space_pointer_spec.pointer_size677    }678679    /// Get the pointer size in a specific address space.680    #[inline]681    pub fn pointer_size_in(&self, c: AddressSpace) -> Size {682        if c == self.default_address_space {683            return self.default_address_space_pointer_spec.pointer_size;684        }685686        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {687            e.1.pointer_size688        } else {689            panic!("Use of unknown address space {c:?}");690        }691    }692693    /// Get the pointer index in the default data address space.694    #[inline]695    pub fn pointer_offset(&self) -> Size {696        self.default_address_space_pointer_spec.pointer_offset697    }698699    /// Get the pointer index in a specific address space.700    #[inline]701    pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {702        if c == self.default_address_space {703            return self.default_address_space_pointer_spec.pointer_offset;704        }705706        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {707            e.1.pointer_offset708        } else {709            panic!("Use of unknown address space {c:?}");710        }711    }712713    /// Get the pointer alignment in the default data address space.714    #[inline]715    pub fn pointer_align(&self) -> AbiAlign {716        AbiAlign::new(self.default_address_space_pointer_spec.pointer_align)717    }718719    /// Get the pointer alignment in a specific address space.720    #[inline]721    pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {722        AbiAlign::new(if c == self.default_address_space {723            self.default_address_space_pointer_spec.pointer_align724        } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {725            e.1.pointer_align726        } else {727            panic!("Use of unknown address space {c:?}");728        })729    }730}731732pub trait HasDataLayout {733    fn data_layout(&self) -> &TargetDataLayout;734}735736impl HasDataLayout for TargetDataLayout {737    #[inline]738    fn data_layout(&self) -> &TargetDataLayout {739        self740    }741}742743// used by rust-analyzer744impl HasDataLayout for &TargetDataLayout {745    #[inline]746    fn data_layout(&self) -> &TargetDataLayout {747        (**self).data_layout()748    }749}750751/// Endianness of the target, which must match cfg(target-endian).752#[derive(Copy, Clone, PartialEq, Eq)]753pub enum Endian {754    Little,755    Big,756}757758impl Endian {759    pub fn as_str(&self) -> &'static str {760        match self {761            Self::Little => "little",762            Self::Big => "big",763        }764    }765766    #[cfg(feature = "nightly")]767    pub fn desc_symbol(&self) -> Symbol {768        match self {769            Self::Little => sym::little,770            Self::Big => sym::big,771        }772    }773}774775impl fmt::Debug for Endian {776    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {777        f.write_str(self.as_str())778    }779}780781impl FromStr for Endian {782    type Err = String;783784    fn from_str(s: &str) -> Result<Self, Self::Err> {785        match s {786            "little" => Ok(Self::Little),787            "big" => Ok(Self::Big),788            _ => Err(format!(r#"unknown endian: "{s}""#)),789        }790    }791}792793/// Size of a type in bytes.794#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]795#[cfg_attr(feature = "nightly", derive(Encodable_NoContext, Decodable_NoContext, StableHash))]796pub struct Size {797    raw: u64,798}799800#[cfg(feature = "nightly")]801impl StableOrd for Size {802    const CAN_USE_UNSTABLE_SORT: bool = true;803804    // `Ord` is implemented as just comparing numerical values and numerical values805    // are not changed by (de-)serialization.806    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();807}808809// This is debug-printed a lot in larger structs, don't waste too much space there810impl fmt::Debug for Size {811    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {812        write!(f, "Size({} bytes)", self.bytes())813    }814}815816impl Size {817    pub const ZERO: Size = Size { raw: 0 };818819    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is820    /// not a multiple of 8.821    pub fn from_bits(bits: impl TryInto<u64>) -> Size {822        let bits = bits.try_into().ok().unwrap();823        Size { raw: bits.div_ceil(8) }824    }825826    #[inline]827    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {828        let bytes: u64 = bytes.try_into().ok().unwrap();829        Size { raw: bytes }830    }831832    #[inline]833    pub fn bytes(self) -> u64 {834        self.raw835    }836837    #[inline]838    pub fn bytes_usize(self) -> usize {839        self.bytes().try_into().unwrap()840    }841842    #[inline]843    pub fn bits(self) -> u64 {844        #[cold]845        fn overflow(bytes: u64) -> ! {846            panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")847        }848849        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))850    }851852    #[inline]853    pub fn bits_usize(self) -> usize {854        self.bits().try_into().unwrap()855    }856857    #[inline]858    pub fn align_to(self, align: Align) -> Size {859        let mask = align.bytes() - 1;860        Size::from_bytes((self.bytes() + mask) & !mask)861    }862863    #[inline]864    pub fn is_aligned(self, align: Align) -> bool {865        let mask = align.bytes() - 1;866        self.bytes() & mask == 0867    }868869    #[inline]870    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {871        let dl = cx.data_layout();872873        let bytes = self.bytes().checked_add(offset.bytes())?;874875        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }876    }877878    #[inline]879    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {880        let dl = cx.data_layout();881882        let bytes = self.bytes().checked_mul(count)?;883        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }884    }885886    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits887    /// (i.e., if it is negative, fill with 1's on the left).888    #[inline]889    pub fn sign_extend(self, value: u128) -> i128 {890        let size = self.bits();891        if size == 0 {892            // Truncated until nothing is left.893            return 0;894        }895        // Sign-extend it.896        let shift = 128 - size;897        // Shift the unsigned value to the left, then shift back to the right as signed898        // (essentially fills with sign bit on the left).899        ((value << shift) as i128) >> shift900    }901902    /// Truncates `value` to `self` bits.903    #[inline]904    pub fn truncate(self, value: u128) -> u128 {905        let size = self.bits();906        if size == 0 {907            // Truncated until nothing is left.908            return 0;909        }910        let shift = 128 - size;911        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).912        (value << shift) >> shift913    }914915    #[inline]916    pub fn signed_int_min(&self) -> i128 {917        self.sign_extend(1_u128 << (self.bits() - 1))918    }919920    #[inline]921    pub fn signed_int_max(&self) -> i128 {922        i128::MAX >> (128 - self.bits())923    }924925    #[inline]926    pub fn unsigned_int_max(&self) -> u128 {927        u128::MAX >> (128 - self.bits())928    }929}930931// Panicking addition, subtraction and multiplication for convenience.932// Avoid during layout computation, return `LayoutError` instead.933934impl Add for Size {935    type Output = Size;936    #[inline]937    fn add(self, other: Size) -> Size {938        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {939            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())940        }))941    }942}943944impl Sub for Size {945    type Output = Size;946    #[inline]947    fn sub(self, other: Size) -> Size {948        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {949            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())950        }))951    }952}953954impl Mul<Size> for u64 {955    type Output = Size;956    #[inline]957    fn mul(self, size: Size) -> Size {958        size * self959    }960}961962impl Mul<u64> for Size {963    type Output = Size;964    #[inline]965    fn mul(self, count: u64) -> Size {966        match self.bytes().checked_mul(count) {967            Some(bytes) => Size::from_bytes(bytes),968            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),969        }970    }971}972973impl AddAssign for Size {974    #[inline]975    fn add_assign(&mut self, other: Size) {976        *self = *self + other;977    }978}979980#[cfg(feature = "nightly")]981impl Step for Size {982    #[inline]983    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {984        u64::steps_between(&start.bytes(), &end.bytes())985    }986987    #[inline]988    fn forward_checked(start: Self, count: usize) -> Option<Self> {989        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)990    }991992    #[inline]993    fn forward(start: Self, count: usize) -> Self {994        Self::from_bytes(u64::forward(start.bytes(), count))995    }996997    #[inline]998    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {999        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })1000    }10011002    #[inline]1003    fn backward_checked(start: Self, count: usize) -> Option<Self> {1004        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)1005    }10061007    #[inline]1008    fn backward(start: Self, count: usize) -> Self {1009        Self::from_bytes(u64::backward(start.bytes(), count))1010    }10111012    #[inline]1013    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {1014        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })1015    }1016}10171018/// Alignment of a type in bytes (always a power of two).1019#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]1020#[cfg_attr(feature = "nightly", derive(Encodable_NoContext, Decodable_NoContext, StableHash))]1021pub struct Align {1022    pow2: u8,1023}10241025// This is debug-printed a lot in larger structs, don't waste too much space there1026impl fmt::Debug for Align {1027    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {1028        write!(f, "Align({} bytes)", self.bytes())1029    }1030}10311032#[derive(Clone, Copy)]1033pub enum AlignFromBytesError {1034    NotPowerOfTwo(u64),1035    TooLarge(u64),1036}10371038impl fmt::Debug for AlignFromBytesError {1039    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {1040        fmt::Display::fmt(self, f)1041    }1042}10431044impl fmt::Display for AlignFromBytesError {1045    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {1046        match self {1047            AlignFromBytesError::NotPowerOfTwo(align) => write!(f, "{align} is not a power of 2"),1048            AlignFromBytesError::TooLarge(align) => write!(f, "{align} is too large"),1049        }1050    }1051}10521053impl Align {1054    pub const ONE: Align = Align { pow2: 0 };1055    pub const EIGHT: Align = Align { pow2: 3 };1056    // LLVM has a maximal supported alignment of 2^29, we inherit that.1057    pub const MAX: Align = Align { pow2: 29 };10581059    /// Either `1 << (pointer_bits - 1)` or [`Align::MAX`], whichever is smaller.1060    #[inline]1061    pub fn max_for_target(tdl: &TargetDataLayout) -> Align {1062        let pointer_bits = tdl.pointer_size().bits();1063        if let Ok(pointer_bits) = u8::try_from(pointer_bits)1064            && pointer_bits <= Align::MAX.pow21065        {1066            Align { pow2: pointer_bits - 1 }1067        } else {1068            Align::MAX1069        }1070    }10711072    #[inline]1073    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {1074        Align::from_bytes(Size::from_bits(bits).bytes())1075    }10761077    #[inline]1078    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {1079        // Treat an alignment of 0 bytes like 1-byte alignment.1080        if align == 0 {1081            return Ok(Align::ONE);1082        }10831084        #[cold]1085        const fn not_power_of_2(align: u64) -> AlignFromBytesError {1086            AlignFromBytesError::NotPowerOfTwo(align)1087        }10881089        #[cold]1090        const fn too_large(align: u64) -> AlignFromBytesError {1091            AlignFromBytesError::TooLarge(align)1092        }10931094        let tz = align.trailing_zeros();1095        if align != (1 << tz) {1096            return Err(not_power_of_2(align));1097        }10981099        let pow2 = tz as u8;1100        if pow2 > Self::MAX.pow2 {1101            return Err(too_large(align));1102        }11031104        Ok(Align { pow2 })1105    }11061107    #[inline]1108    pub const fn bytes(self) -> u64 {1109        1 << self.pow21110    }11111112    #[inline]1113    pub fn bytes_usize(self) -> usize {1114        self.bytes().try_into().unwrap()1115    }11161117    #[inline]1118    pub const fn bits(self) -> u64 {1119        self.bytes() * 81120    }11211122    #[inline]1123    pub fn bits_usize(self) -> usize {1124        self.bits().try_into().unwrap()1125    }11261127    /// Obtain the greatest factor of `size` that is an alignment1128    /// (the largest power of two the Size is a multiple of).1129    ///1130    /// Note that all numbers are factors of 01131    #[inline]1132    pub fn max_aligned_factor(size: Size) -> Align {1133        Align { pow2: size.bytes().trailing_zeros() as u8 }1134    }11351136    /// Reduces Align to an aligned factor of `size`.1137    #[inline]1138    pub fn restrict_for_offset(self, size: Size) -> Align {1139        self.min(Align::max_aligned_factor(size))1140    }1141}11421143/// A pair of alignments, ABI-mandated and preferred.1144///1145/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:1146/// it is not exposed semantically to programmers nor can they meaningfully affect it.1147/// The only concern for us is that preferred alignment must not be less than the mandated alignment1148/// and thus in practice the two values are almost always identical.1149///1150/// An example of a rare thing actually affected by preferred alignment is aligning of statics.1151/// It is of effectively no consequence for layout in structs and on the stack.1152#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]1153#[cfg_attr(feature = "nightly", derive(StableHash))]1154pub struct AbiAlign {1155    pub abi: Align,1156}11571158impl AbiAlign {1159    #[inline]1160    pub fn new(align: Align) -> AbiAlign {1161        AbiAlign { abi: align }1162    }11631164    #[inline]1165    pub fn min(self, other: AbiAlign) -> AbiAlign {1166        AbiAlign { abi: self.abi.min(other.abi) }1167    }11681169    #[inline]1170    pub fn max(self, other: AbiAlign) -> AbiAlign {1171        AbiAlign { abi: self.abi.max(other.abi) }1172    }1173}11741175impl Deref for AbiAlign {1176    type Target = Align;11771178    fn deref(&self) -> &Self::Target {1179        &self.abi1180    }1181}11821183/// Integers, also used for enum discriminants.1184#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]1185#[cfg_attr(feature = "nightly", derive(Encodable_NoContext, Decodable_NoContext, StableHash))]1186pub enum Integer {1187    I8,1188    I16,1189    I32,1190    I64,1191    I128,1192}11931194impl Integer {1195    pub fn int_ty_str(self) -> &'static str {1196        use Integer::*;1197        match self {1198            I8 => "i8",1199            I16 => "i16",1200            I32 => "i32",1201            I64 => "i64",1202            I128 => "i128",1203        }1204    }12051206    pub fn uint_ty_str(self) -> &'static str {1207        use Integer::*;1208        match self {1209            I8 => "u8",1210            I16 => "u16",1211            I32 => "u32",1212            I64 => "u64",1213            I128 => "u128",1214        }1215    }12161217    #[inline]1218    pub fn size(self) -> Size {1219        use Integer::*;1220        match self {1221            I8 => Size::from_bytes(1),1222            I16 => Size::from_bytes(2),1223            I32 => Size::from_bytes(4),1224            I64 => Size::from_bytes(8),1225            I128 => Size::from_bytes(16),1226        }1227    }12281229    /// Gets the Integer type from an IntegerType.1230    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {1231        let dl = cx.data_layout();12321233        match ity {1234            IntegerType::Pointer(_) => dl.ptr_sized_integer(),1235            IntegerType::Fixed(x, _) => x,1236        }1237    }12381239    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {1240        use Integer::*;1241        let dl = cx.data_layout();12421243        AbiAlign::new(match self {1244            I8 => dl.i8_align,1245            I16 => dl.i16_align,1246            I32 => dl.i32_align,1247            I64 => dl.i64_align,1248            I128 => dl.i128_align,1249        })1250    }12511252    /// Returns the largest signed value that can be represented by this Integer.1253    #[inline]1254    pub fn signed_max(self) -> i128 {1255        use Integer::*;1256        match self {1257            I8 => i8::MAX as i128,1258            I16 => i16::MAX as i128,1259            I32 => i32::MAX as i128,1260            I64 => i64::MAX as i128,1261            I128 => i128::MAX,1262        }1263    }12641265    /// Returns the smallest signed value that can be represented by this Integer.1266    #[inline]1267    pub fn signed_min(self) -> i128 {1268        use Integer::*;1269        match self {1270            I8 => i8::MIN as i128,1271            I16 => i16::MIN as i128,1272            I32 => i32::MIN as i128,1273            I64 => i64::MIN as i128,1274            I128 => i128::MIN,1275        }1276    }12771278    /// Finds the smallest Integer type which can represent the signed value.1279    #[inline]1280    pub fn fit_signed(x: i128) -> Integer {1281        use Integer::*;1282        match x {1283            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,1284            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,1285            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,1286            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,1287            _ => I128,1288        }1289    }12901291    /// Finds the smallest Integer type which can represent the unsigned value.1292    #[inline]1293    pub fn fit_unsigned(x: u128) -> Integer {1294        use Integer::*;1295        match x {1296            0..=0x0000_0000_0000_00ff => I8,1297            0..=0x0000_0000_0000_ffff => I16,1298            0..=0x0000_0000_ffff_ffff => I32,1299            0..=0xffff_ffff_ffff_ffff => I64,1300            _ => I128,1301        }1302    }13031304    /// Finds the smallest integer with the given alignment.1305    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {1306        use Integer::*;1307        let dl = cx.data_layout();13081309        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {1310            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()1311        })1312    }13131314    /// Find the largest integer with the given alignment or less.1315    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {1316        use Integer::*;1317        let dl = cx.data_layout();13181319        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.1320        for candidate in [I64, I32, I16] {1321            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {1322                return candidate;1323            }1324        }1325        I81326    }13271328    // FIXME(eddyb) consolidate this and other methods that find the appropriate1329    // `Integer` given some requirements.1330    #[inline]1331    pub fn from_size(size: Size) -> Result<Self, String> {1332        match size.bits() {1333            8 => Ok(Integer::I8),1334            16 => Ok(Integer::I16),1335            32 => Ok(Integer::I32),1336            64 => Ok(Integer::I64),1337            128 => Ok(Integer::I128),1338            _ => Err(format!("rust does not support integers with {} bits", size.bits())),1339        }1340    }1341}13421343/// Floating-point types.1344#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]1345#[cfg_attr(feature = "nightly", derive(StableHash))]1346pub enum Float {1347    F16,1348    F32,1349    F64,1350    F128,1351}13521353impl Float {1354    pub fn size(self) -> Size {1355        use Float::*;13561357        match self {1358            F16 => Size::from_bits(16),1359            F32 => Size::from_bits(32),1360            F64 => Size::from_bits(64),1361            F128 => Size::from_bits(128),1362        }1363    }13641365    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {1366        use Float::*;1367        let dl = cx.data_layout();13681369        AbiAlign::new(match self {1370            F16 => dl.f16_align,1371            F32 => dl.f32_align,1372            F64 => dl.f64_align,1373            F128 => dl.f128_align,1374        })1375    }1376}13771378/// Fundamental unit of memory access and layout.1379#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]1380#[cfg_attr(feature = "nightly", derive(StableHash))]1381pub enum Primitive {1382    /// The `bool` is the signedness of the `Integer` type.1383    ///1384    /// One would think we would not care about such details this low down,1385    /// but some ABIs are described in terms of C types and ISAs where the1386    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.1387    /// a negative integer passed by zero-extension will appear positive in1388    /// the callee, and most operations on it will produce the wrong values.1389    Int(Integer, bool),1390    Float(Float),1391    Pointer(AddressSpace),1392}13931394impl Primitive {1395    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {1396        use Primitive::*;1397        let dl = cx.data_layout();13981399        match self {1400            Int(i, _) => i.size(),1401            Float(f) => f.size(),1402            Pointer(a) => dl.pointer_size_in(a),1403        }1404    }14051406    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {1407        use Primitive::*;1408        let dl = cx.data_layout();14091410        match self {1411            Int(i, _) => i.align(dl),1412            Float(f) => f.align(dl),1413            Pointer(a) => dl.pointer_align_in(a),1414        }1415    }1416}14171418/// Inclusive wrap-around range of valid values, that is, if1419/// start > end, it represents `start..=MAX`, followed by `0..=end`.1420///1421/// That is, for an i8 primitive, a range of `254..=2` means following1422/// sequence:1423///1424///    254 (-2), 255 (-1), 0, 1, 21425///1426/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.1427#[derive(Clone, Copy, PartialEq, Eq, Hash)]1428#[cfg_attr(feature = "nightly", derive(StableHash))]1429pub struct WrappingRange {1430    pub start: u128,1431    pub end: u128,1432}14331434impl WrappingRange {1435    pub fn full(size: Size) -> Self {1436        Self { start: 0, end: size.unsigned_int_max() }1437    }14381439    /// Returns `true` if `v` is contained in the range.1440    #[inline(always)]1441    pub fn contains(&self, v: u128) -> bool {1442        if self.start <= self.end {1443            self.start <= v && v <= self.end1444        } else {1445            self.start <= v || v <= self.end1446        }1447    }14481449    /// Returns `true` if all the values in `other` are contained in this range,1450    /// when the values are considered as having width `size`.1451    #[inline(always)]1452    pub fn contains_range(&self, other: Self, size: Size) -> bool {1453        if self.is_full_for(size) {1454            true1455        } else {1456            let trunc = |x| size.truncate(x);14571458            let delta = self.start;1459            let max = trunc(self.end.wrapping_sub(delta));14601461            let other_start = trunc(other.start.wrapping_sub(delta));1462            let other_end = trunc(other.end.wrapping_sub(delta));14631464            // Having shifted both input ranges by `delta`, now we only need to check1465            // whether `0..=max` contains `other_start..=other_end`, which can only1466            // happen if the other doesn't wrap since `self` isn't everything.1467            (other_start <= other_end) && (other_end <= max)1468        }1469    }14701471    /// Returns `self` with replaced `start`1472    #[inline(always)]1473    fn with_start(mut self, start: u128) -> Self {1474        self.start = start;1475        self1476    }14771478    /// Returns `self` with replaced `end`1479    #[inline(always)]1480    fn with_end(mut self, end: u128) -> Self {1481        self.end = end;1482        self1483    }14841485    /// Returns `true` if `size` completely fills the range.1486    ///1487    /// Note that this is *not* the same as `self == WrappingRange::full(size)`.1488    /// Niche calculations can produce full ranges which are not the canonical one;1489    /// for example `Option<NonZero<u16>>` gets `valid_range: (..=0) | (1..)`.1490    #[inline]1491    fn is_full_for(&self, size: Size) -> bool {1492        let max_value = size.unsigned_int_max();1493        debug_assert!(self.start <= max_value && self.end <= max_value);1494        self.start == (self.end.wrapping_add(1) & max_value)1495    }14961497    /// Checks whether this range is considered non-wrapping when the values are1498    /// interpreted as *unsigned* numbers of width `size`.1499    ///1500    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,1501    /// and `Err(..)` if the range is full so it depends how you think about it.1502    #[inline]1503    pub fn no_unsigned_wraparound(&self, size: Size) -> Result<bool, RangeFull> {1504        if self.is_full_for(size) { Err(..) } else { Ok(self.start <= self.end) }1505    }15061507    /// Checks whether this range is considered non-wrapping when the values are1508    /// interpreted as *signed* numbers of width `size`.1509    ///1510    /// This is heavily dependent on the `size`, as `100..=200` does wrap when1511    /// interpreted as `i8`, but doesn't when interpreted as `i16`.1512    ///1513    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,1514    /// and `Err(..)` if the range is full so it depends how you think about it.1515    #[inline]1516    pub fn no_signed_wraparound(&self, size: Size) -> Result<bool, RangeFull> {1517        if self.is_full_for(size) {1518            Err(..)1519        } else {1520            let start: i128 = size.sign_extend(self.start);1521            let end: i128 = size.sign_extend(self.end);1522            Ok(start <= end)1523        }1524    }1525}15261527impl fmt::Debug for WrappingRange {1528    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {1529        if self.start > self.end {1530            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;1531        } else {1532            write!(fmt, "{}..={}", self.start, self.end)?;1533        }1534        Ok(())1535    }1536}15371538/// Information about one scalar component of a Rust type.1539#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]1540#[cfg_attr(feature = "nightly", derive(StableHash))]1541pub enum Scalar {1542    Initialized {1543        value: Primitive,15441545        // FIXME(eddyb) always use the shortest range, e.g., by finding1546        // the largest space between two consecutive valid values and1547        // taking everything else as the (shortest) valid range.1548        valid_range: WrappingRange,1549    },1550    Union {1551        /// Even for unions, we need to use the correct registers for the kind of1552        /// values inside the union, so we keep the `Primitive` type around. We1553        /// also use it to compute the size of the scalar.1554        /// However, unions never have niches and even allow undef,1555        /// so there is no `valid_range`.1556        value: Primitive,1557    },1558}15591560impl Scalar {1561    #[inline]1562    pub fn is_bool(&self) -> bool {1563        use Integer::*;1564        matches!(1565            self,1566            Scalar::Initialized {1567                value: Primitive::Int(I8, false),1568                valid_range: WrappingRange { start: 0, end: 1 }1569            }1570        )1571    }15721573    /// Get the primitive representation of this type, ignoring the valid range and whether the1574    /// value is allowed to be undefined (due to being a union).1575    pub fn primitive(&self) -> Primitive {1576        match *self {1577            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,1578        }1579    }15801581    pub fn align(self, cx: &impl HasDataLayout) -> AbiAlign {1582        self.primitive().align(cx)1583    }15841585    pub fn size(self, cx: &impl HasDataLayout) -> Size {1586        self.primitive().size(cx)1587    }15881589    #[inline]1590    pub fn to_union(&self) -> Self {1591        Self::Union { value: self.primitive() }1592    }15931594    #[inline]1595    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {1596        match *self {1597            Scalar::Initialized { valid_range, .. } => valid_range,1598            Scalar::Union { value } => WrappingRange::full(value.size(cx)),1599        }1600    }16011602    #[inline]1603    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a1604    /// union.1605    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {1606        match self {1607            Scalar::Initialized { valid_range, .. } => valid_range,1608            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),1609        }1610    }16111612    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole1613    /// layout.1614    #[inline]1615    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {1616        match *self {1617            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),1618            Scalar::Union { .. } => true,1619        }1620    }16211622    /// Returns `true` if this type can be left uninit.1623    #[inline]1624    pub fn is_uninit_valid(&self) -> bool {1625        match *self {1626            Scalar::Initialized { .. } => false,1627            Scalar::Union { .. } => true,1628        }1629    }16301631    /// Returns `true` if this is a signed integer scalar1632    #[inline]1633    pub fn is_signed(&self) -> bool {1634        match self.primitive() {1635            Primitive::Int(_, signed) => signed,1636            _ => false,1637        }1638    }1639}16401641// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.1642/// Describes how the fields of a type are located in memory.1643#[derive(PartialEq, Eq, Hash, Clone, Debug)]1644#[cfg_attr(feature = "nightly", derive(StableHash))]1645pub enum FieldsShape<FieldIdx: Idx> {1646    /// Scalar primitives and `!`, which never have fields.1647    Primitive,16481649    /// All fields start at no offset. The `usize` is the field count.1650    Union(NonZeroUsize),16511652    /// Array/vector-like placement, with all fields of identical types.1653    Array { stride: Size, count: u64 },16541655    /// Struct-like placement, with precomputed offsets.1656    ///1657    /// Fields are guaranteed to not overlap, but note that gaps1658    /// before, between and after all the fields are NOT always1659    /// padding, and as such their contents may not be discarded.1660    /// For example, enum variants leave a gap at the start,1661    /// where the discriminant field in the enum layout goes.1662    Arbitrary {1663        /// Offsets for the first byte of each field,1664        /// ordered to match the source definition order.1665        /// This vector does not go in increasing order.1666        // FIXME(eddyb) use small vector optimization for the common case.1667        offsets: IndexVec<FieldIdx, Size>,16681669        /// Maps memory order field indices to source order indices,1670        /// depending on how the fields were reordered (if at all).1671        /// This is a permutation, with both the source order and the1672        /// memory order using the same (0..n) index ranges.1673        ///1674        // FIXME(eddyb) build a better abstraction for permutations, if possible.1675        // FIXME(camlorn) also consider small vector optimization here.1676        in_memory_order: IndexVec<u32, FieldIdx>,1677    },1678}16791680impl<FieldIdx: Idx> FieldsShape<FieldIdx> {1681    #[inline]1682    pub fn count(&self) -> usize {1683        match *self {1684            FieldsShape::Primitive => 0,1685            FieldsShape::Union(count) => count.get(),1686            FieldsShape::Array { count, .. } => count.try_into().unwrap(),1687            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),1688        }1689    }16901691    #[inline]1692    pub fn offset(&self, i: usize) -> Size {1693        match *self {1694            FieldsShape::Primitive => {1695                unreachable!("FieldsShape::offset: `Primitive`s have no fields")1696            }1697            FieldsShape::Union(count) => {1698                assert!(i < count.get(), "tried to access field {i} of union with {count} fields");1699                Size::ZERO1700            }1701            FieldsShape::Array { stride, count } => {1702                let i = u64::try_from(i).unwrap();1703                assert!(i < count, "tried to access field {i} of array with {count} fields");1704                stride * i1705            }1706            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],1707        }1708    }17091710    /// Gets source indices of the fields by increasing offsets.1711    #[inline]1712    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {1713        // Primitives don't really have fields in the way that structs do,1714        // but having this return an empty iterator for them is unhelpful1715        // since that makes them look kinda like ZSTs, which they're not.1716        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };17171718        (0..pseudofield_count).map(move |i| match self {1719            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,1720            FieldsShape::Arbitrary { in_memory_order, .. } => in_memory_order[i as u32].index(),1721        })1722    }1723}17241725/// An identifier that specifies the address space that some operation1726/// should operate on. Special address spaces have an effect on code generation,1727/// depending on the target and the address spaces it implements.1728#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]1729#[cfg_attr(feature = "nightly", derive(StableHash))]1730pub struct AddressSpace(pub u32);17311732impl AddressSpace {1733    /// LLVM's `0` address space.1734    pub const ZERO: Self = AddressSpace(0);1735    /// The address space for workgroup memory on nvptx and amdgpu.1736    /// See e.g. the `gpu_launch_sized_workgroup_mem` intrinsic for details.1737    pub const GPU_WORKGROUP: Self = AddressSpace(3);1738}17391740/// How many scalable vectors are in a `BackendRepr::ScalableVector`?1741#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]1742#[cfg_attr(feature = "nightly", derive(StableHash))]1743pub struct NumScalableVectors(pub u8);17441745impl NumScalableVectors {1746    /// Returns a `NumScalableVector` for a non-tuple scalable vector (e.g. a single vector).1747    pub fn for_non_tuple() -> Self {1748        NumScalableVectors(1)1749    }17501751    // Returns `NumScalableVectors` for values of two through eight, which are a valid number of1752    // fields for a tuple of scalable vectors to have. `1` is a valid value of `NumScalableVectors`1753    // but not for a tuple which would have a field count.1754    pub fn from_field_count(count: usize) -> Option<Self> {1755        match count {1756            2..8 => Some(NumScalableVectors(count as u8)),1757            _ => None,1758        }1759    }1760}17611762#[cfg(feature = "nightly")]1763impl IntoDiagArg for NumScalableVectors {1764    fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue {1765        DiagArgValue::Str(std::borrow::Cow::Borrowed(match self.0 {1766            0 => panic!("`NumScalableVectors(0)` is illformed"),1767            1 => "one",1768            2 => "two",1769            3 => "three",1770            4 => "four",1771            5 => "five",1772            6 => "six",1773            7 => "seven",1774            8 => "eight",1775            _ => panic!("`NumScalableVectors(N)` for N>8 is illformed"),1776        }))1777    }1778}17791780/// The way we represent values to the backend1781///1782/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.1783/// In reality, this implies little about that, but is mostly used to describe the syntactic form1784/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.1785/// The psABI may need consideration in doing so, but this enum does not constitute a promise for1786/// how the value will be lowered to the calling convention, in itself.1787///1788/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,1789/// and larger values will usually prefer to be represented as memory.1790#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]1791#[cfg_attr(feature = "nightly", derive(StableHash))]1792pub enum BackendRepr {1793    Scalar(Scalar),1794    ScalarPair(Scalar, Scalar),1795    SimdScalableVector {1796        element: Scalar,1797        count: u64,1798        number_of_vectors: NumScalableVectors,1799    },1800    SimdVector {1801        element: Scalar,1802        count: u64,1803    },1804    // FIXME: I sometimes use memory, sometimes use an IR aggregate!1805    Memory {1806        /// If true, the size is exact, otherwise it's only a lower bound.1807        sized: bool,1808    },1809}18101811impl BackendRepr {1812    /// Returns `true` if the layout corresponds to an unsized type.1813    #[inline]1814    pub fn is_unsized(&self) -> bool {1815        match *self {1816            BackendRepr::Scalar(_)1817            | BackendRepr::ScalarPair(..)1818            // FIXME(rustc_scalable_vector): Scalable vectors are `Sized` while the1819            // `sized_hierarchy` feature is not yet fully implemented. After `sized_hierarchy` is1820            // fully implemented, scalable vectors will remain `Sized`, they just won't be1821            // `const Sized` - whether `is_unsized` continues to return `false` at that point will1822            // need to be revisited and will depend on what `is_unsized` is used for.1823            | BackendRepr::SimdScalableVector { .. }1824            | BackendRepr::SimdVector { .. } => false,1825            BackendRepr::Memory { sized } => !sized,1826        }1827    }18281829    #[inline]1830    pub fn is_sized(&self) -> bool {1831        !self.is_unsized()1832    }18331834    /// Returns `true` if this is a single signed integer scalar.1835    /// Sanity check: panics if this is not a scalar type (see PR #70189).1836    #[inline]1837    pub fn is_signed(&self) -> bool {1838        match self {1839            BackendRepr::Scalar(scal) => scal.is_signed(),1840            _ => panic!("`is_signed` on non-scalar ABI {self:?}"),1841        }1842    }18431844    /// Returns `true` if this is a scalar type1845    #[inline]1846    pub fn is_scalar(&self) -> bool {1847        matches!(*self, BackendRepr::Scalar(_))1848    }18491850    /// Returns `true` if this is a bool1851    #[inline]1852    pub fn is_bool(&self) -> bool {1853        matches!(*self, BackendRepr::Scalar(s) if s.is_bool())1854    }18551856    /// The psABI alignment for a `Scalar` or `ScalarPair`1857    ///1858    /// `None` for other variants.1859    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {1860        match *self {1861            BackendRepr::Scalar(s) => Some(s.align(cx).abi),1862            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),1863            // The align of a Vector can vary in surprising ways1864            BackendRepr::SimdVector { .. }1865            | BackendRepr::Memory { .. }1866            | BackendRepr::SimdScalableVector { .. } => None,1867        }1868    }18691870    /// The psABI size for a `Scalar` or `ScalarPair`1871    ///1872    /// `None` for other variants1873    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {1874        match *self {1875            // No padding in scalars.1876            BackendRepr::Scalar(s) => Some(s.size(cx)),1877            // May have some padding between the pair.1878            BackendRepr::ScalarPair(s1, s2) => {1879                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);1880                let size = (field2_offset + s2.size(cx)).align_to(1881                    self.scalar_align(cx)1882                        // We absolutely must have an answer here or everything is FUBAR.1883                        .unwrap(),1884                );1885                Some(size)1886            }1887            // The size of a Vector can vary in surprising ways1888            BackendRepr::SimdVector { .. }1889            | BackendRepr::Memory { .. }1890            | BackendRepr::SimdScalableVector { .. } => None,1891        }1892    }18931894    /// Discard validity range information and allow undef.1895    pub fn to_union(&self) -> Self {1896        match *self {1897            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),1898            BackendRepr::ScalarPair(s1, s2) => {1899                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())1900            }1901            BackendRepr::SimdVector { element, count } => {1902                BackendRepr::SimdVector { element: element.to_union(), count }1903            }1904            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },1905            BackendRepr::SimdScalableVector { element, count, number_of_vectors } => {1906                BackendRepr::SimdScalableVector {1907                    element: element.to_union(),1908                    count,1909                    number_of_vectors,1910                }1911            }1912        }1913    }19141915    pub fn eq_up_to_validity(&self, other: &Self) -> bool {1916        match (self, other) {1917            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.1918            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).1919            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),1920            (1921                BackendRepr::SimdVector { element: element_l, count: count_l },1922                BackendRepr::SimdVector { element: element_r, count: count_r },1923            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,1924            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {1925                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()1926            }1927            // Everything else must be strictly identical.1928            _ => self == other,1929        }1930    }1931}19321933// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.1934#[derive(PartialEq, Eq, Hash, Clone, Debug)]1935#[cfg_attr(feature = "nightly", derive(StableHash))]1936pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {1937    /// A type with no valid variants. Must be uninhabited.1938    Empty,19391940    /// Single enum variants, structs/tuples, unions, and all non-ADTs.1941    Single {1942        /// Always `0` for types that cannot have multiple variants.1943        index: VariantIdx,1944    },19451946    /// Enum-likes with more than one variant: each variant comes with1947    /// a *discriminant* (usually the same as the variant index but the user can1948    /// assign explicit discriminant values). That discriminant is encoded1949    /// as a *tag* on the machine. The layout of each variant is1950    /// a struct, and they all have space reserved for the tag.1951    /// For enums, the tag is the sole field of the layout.1952    Multiple {1953        tag: Scalar,1954        tag_encoding: TagEncoding<VariantIdx>,1955        tag_field: FieldIdx,1956        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,1957    },1958}19591960// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.1961#[derive(PartialEq, Eq, Hash, Clone, Debug)]1962#[cfg_attr(feature = "nightly", derive(StableHash))]1963pub enum TagEncoding<VariantIdx: Idx> {1964    /// The tag directly stores the discriminant, but possibly with a smaller layout1965    /// (so converting the tag to the discriminant can require sign extension).1966    Direct,19671968    /// Niche (values invalid for a type) encoding the discriminant.1969    /// Note that for this encoding, the discriminant and variant index of each variant coincide!1970    /// This invariant is codified as part of [`layout_sanity_check`](../rustc_ty_utils/layout/invariant/fn.layout_sanity_check.html).1971    ///1972    /// The variant `untagged_variant` contains a niche at an arbitrary1973    /// offset (field [`Variants::Multiple::tag_field`] of the enum).1974    /// For a variant with variant index `i`, such that `i != untagged_variant`,1975    /// the tag is set to `(i - niche_variants.start).wrapping_add(niche_start)`1976    /// (this is wrapping arithmetic using the type of the niche field, cf. the1977    /// [`tag_for_variant`](../rustc_const_eval/interpret/struct.InterpCx.html#method.tag_for_variant)1978    /// query implementation).1979    /// To recover the variant index `i` from a `tag`, the above formula has to be reversed,1980    /// i.e. `i = tag.wrapping_sub(niche_start) + niche_variants.start`. If `i` ends up outside1981    /// `niche_variants`, the tag must have encoded the `untagged_variant`.1982    ///1983    /// For example, `Option<(usize, &T)>`  is represented such that the tag for1984    /// `None` is the null pointer in the second tuple field, and1985    /// `Some` is the identity function (with a non-null reference)1986    /// and has no additional tag, i.e. the reference being non-null uniquely identifies this variant.1987    ///1988    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`1989    /// range cannot be represented; they must be uninhabited.1990    /// Nonetheless, uninhabited variants can also fall into the range of `niche_variants`.1991    Niche {1992        untagged_variant: VariantIdx,1993        /// This range *may* contain `untagged_variant` or uninhabited variants;1994        /// these are then just "dead values" and not used to encode anything.1995        niche_variants: RangeInclusive<VariantIdx>,1996        /// This is inbounds of the type of the niche field1997        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).1998        niche_start: u128,1999    },2000}

Code quality findings 63

Critical: Use of 'unsafe' keyword bypasses Rust's safety guarantees. Requires careful auditing, clear justification (FFI, specific optimizations), and minimal scope.
error safety unsafe-block
unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
Critical: Use of 'unsafe' keyword bypasses Rust's safety guarantees. Requires careful auditing, clear justification (FFI, specific optimizations), and minimal scope.
error safety unsafe-block
Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
Critical: Use of 'unsafe' keyword bypasses Rust's safety guarantees. Requires careful auditing, clear justification (FFI, specific optimizations), and minimal scope.
error safety unsafe-block
unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
Critical: Use of 'unsafe' keyword bypasses Rust's safety guarantees. Requires careful auditing, clear justification (FFI, specific optimizations), and minimal scope.
error safety unsafe-block
Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
/// See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details.
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
/// This vector does not contain the [`PointerSpec`] relative to the default address space,
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
/// which instead lives in [`Self::default_address_space_pointer_spec`].
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let align = |bits| Align::from_bits(bits).unwrap();
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
parse_align_str(s[0], cause)
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
dl.instruction_address_space = parse_address_space(&p[1..], "P")?
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let mut p = p.strip_prefix('p').unwrap();
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
p = p.strip_prefix('f').unwrap();
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let mut p = p.strip_prefix('p').unwrap();
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
p = p.strip_prefix('f').unwrap();
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
let Ok(bits) = s[1..].parse::<u64>() else {
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
parse_size(&s[1..], "i")?; // For the user error.
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
let v_size = parse_size(&s[1..], "v")?;
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
.unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let bits = bits.try_into().ok().unwrap();
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let bytes: u64 = bytes.try_into().ok().unwrap();
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
self.bytes().try_into().unwrap()
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
self.bits().try_into().unwrap()
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
/// Either `1 << (pointer_bits - 1)` or [`Align::MAX`], whichever is smaller.
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
self.bytes().try_into().unwrap()
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
self.bits().try_into().unwrap()
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
for candidate in [I64, I32, I16] {
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
FieldsShape::Array { count, .. } => count.try_into().unwrap(),
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let i = u64::try_from(i).unwrap();
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
FieldsShape::Arbitrary { in_memory_order, .. } => in_memory_order[i as u32].index(),
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
.unwrap(),
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
/// This invariant is codified as part of [`layout_sanity_check`](../rustc_ty_utils/layout/invariant/fn.layout_sanity_check.html).
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
/// offset (field [`Variants::Multiple::tag_field`] of the enum).
Info: This standard library function returns a Result. Ensure the Result is handled properly (e.g., using '?', match, if let) rather than potentially panicking with .unwrap() or .expect().
info correctness unhandled-result
s.parse::<u32>().map(AddressSpace).map_err(|err| {
Info: This standard library function returns a Result. Ensure the Result is handled properly (e.g., using '?', match, if let) rather than potentially panicking with .unwrap() or .expect().
info correctness unhandled-result
s.parse::<u64>().map_err(|err| TargetDataLayoutError::InvalidBits {
Info: This standard library function returns a Result. Ensure the Result is handled properly (e.g., using '?', match, if let) rather than potentially panicking with .unwrap() or .expect().
info correctness unhandled-result
let Ok(bits) = s[1..].parse::<u64>() else {
Info: Ensure 'match' statements are exhaustive. If matching on enums, consider adding a wildcard arm `_ => {}` only if necessary and intentional, as it suppresses warnings about unhandled variants.
info correctness match-wildcard
match bits {
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Ensure 'match' statements are exhaustive. If matching on enums, consider adding a wildcard arm `_ => {}` only if necessary and intentional, as it suppresses warnings about unhandled variants.
info correctness match-wildcard
match s {
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Ensure 'match' statements are exhaustive. If matching on enums, consider adding a wildcard arm `_ => {}` only if necessary and intentional, as it suppresses warnings about unhandled variants.
info correctness match-wildcard
match x {
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Ensure 'match' statements are exhaustive. If matching on enums, consider adding a wildcard arm `_ => {}` only if necessary and intentional, as it suppresses warnings about unhandled variants.
info correctness match-wildcard
match x {
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Ensure 'match' statements are exhaustive. If matching on enums, consider adding a wildcard arm `_ => {}` only if necessary and intentional, as it suppresses warnings about unhandled variants.
info correctness match-wildcard
match size.bits() {
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Float::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Float::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Primitive::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Primitive::*;
Info: Wildcard imports (`use some::path::*;`) can obscure the origin of names and lead to conflicts. Prefer importing specific items explicitly.
info maintainability wildcard-import
use Integer::*;
Info: Ensure 'match' statements are exhaustive. If matching on enums, consider adding a wildcard arm `_ => {}` only if necessary and intentional, as it suppresses warnings about unhandled variants.
info correctness match-wildcard
match count {
Info: Ensure 'match' statements are exhaustive. If matching on enums, consider adding a wildcard arm `_ => {}` only if necessary and intentional, as it suppresses warnings about unhandled variants.
info correctness match-wildcard
DiagArgValue::Str(std::borrow::Cow::Borrowed(match self.0 {
Info: Ensure 'match' statements are exhaustive. If matching on enums, consider adding a wildcard arm `_ => {}` only if necessary and intentional, as it suppresses warnings about unhandled variants.
info correctness match-wildcard
match self {
Info: Ensure 'match' statements are exhaustive. If matching on enums, consider adding a wildcard arm `_ => {}` only if necessary and intentional, as it suppresses warnings about unhandled variants.
info correctness match-wildcard
match self.backend_repr {

Get this view in your editor

Same data, no extra tab — call code_get_file + code_get_findings over MCP from Claude/Cursor/Copilot.