compiler/rustc_const_eval/src/interpret/memory.rs RUST 1,733 lines View on github.com → Search inside
1//! The memory subsystem.2//!3//! Generally, we use `Pointer` to denote memory addresses. However, some operations4//! have a "size"-like parameter, and they take `Scalar` for the address because5//! if the size is 0, then the pointer can also be a (properly aligned, non-null)6//! integer. It is crucial that these operations call `check_align` *before*7//! short-circuiting the empty case!89use std::borrow::{Borrow, Cow};10use std::cell::Cell;11use std::collections::VecDeque;12use std::{assert_matches, fmt, ptr};1314use rustc_abi::{Align, HasDataLayout, Size};15use rustc_ast::Mutability;16use rustc_data_structures::fx::{FxHashSet, FxIndexMap};17use rustc_middle::bug;18use rustc_middle::mir::display_allocation;19use rustc_middle::ty::{self, Instance, Ty, TyCtxt};20use tracing::{debug, instrument, trace};2122use super::{23    AllocBytes, AllocId, AllocInit, AllocMap, AllocRange, Allocation, CheckAlignMsg,24    CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,25    MayLeak, Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, alloc_range, err_ub,26    err_ub_format, interp_ok, throw_ub, throw_ub_format, throw_unsup, throw_unsup_format,27};28use crate::const_eval::ConstEvalErrKind;2930#[derive(Debug, PartialEq, Copy, Clone)]31pub enum MemoryKind<T> {32    /// Stack memory. Error if deallocated except during a stack pop.33    Stack,34    /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.35    CallerLocation,36    /// Additional memory kinds a machine wishes to distinguish from the builtin ones.37    Machine(T),38}3940impl<T: MayLeak> MayLeak for MemoryKind<T> {41    #[inline]42    fn may_leak(self) -> bool {43        match self {44            MemoryKind::Stack => false,45            MemoryKind::CallerLocation => true,46            MemoryKind::Machine(k) => k.may_leak(),47        }48    }49}5051impl<T: fmt::Display> fmt::Display for MemoryKind<T> {52    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {53        match self {54            MemoryKind::Stack => write!(f, "stack variable"),55            MemoryKind::CallerLocation => write!(f, "caller location"),56            MemoryKind::Machine(m) => write!(f, "{m}"),57        }58    }59}6061/// The return value of `get_alloc_info` indicates the "kind" of the allocation.62#[derive(Copy, Clone, PartialEq, Debug)]63pub enum AllocKind {64    /// A regular live data allocation.65    LiveData,66    /// A function allocation (that fn ptrs point to).67    Function,68    /// A variable argument list allocation (used by c-variadic functions).69    VaList,70    /// A vtable allocation.71    VTable,72    /// A TypeId allocation.73    TypeId,74    /// A dead allocation.75    Dead,76}7778/// Metadata about an `AllocId`.79#[derive(Copy, Clone, PartialEq, Debug)]80pub struct AllocInfo {81    pub size: Size,82    pub align: Align,83    pub kind: AllocKind,84    pub mutbl: Mutability,85}8687impl AllocInfo {88    fn new(size: Size, align: Align, kind: AllocKind, mutbl: Mutability) -> Self {89        Self { size, align, kind, mutbl }90    }91}9293/// The value of a function pointer.94#[derive(Debug, Copy, Clone)]95pub enum FnVal<'tcx, Other> {96    Instance(Instance<'tcx>),97    Other(Other),98}99100impl<'tcx, Other> FnVal<'tcx, Other> {101    pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {102        match self {103            FnVal::Instance(instance) => interp_ok(instance),104            FnVal::Other(_) => {105                throw_unsup_format!("'foreign' function pointers are not supported in this context")106            }107        }108    }109}110111// `Memory` has to depend on the `Machine` because some of its operations112// (e.g., `get`) call a `Machine` hook.113pub struct Memory<'tcx, M: Machine<'tcx>> {114    /// Allocations local to this instance of the interpreter. The kind115    /// helps ensure that the same mechanism is used for allocation and116    /// deallocation. When an allocation is not found here, it is a117    /// global and looked up in the `tcx` for read access. Some machines may118    /// have to mutate this map even on a read-only access to a global (because119    /// they do pointer provenance tracking and the allocations in `tcx` have120    /// the wrong type), so we let the machine override this type.121    /// Either way, if the machine allows writing to a global, doing so will122    /// create a copy of the global allocation here.123    // FIXME: this should not be public, but interning currently needs access to it124    pub(super) alloc_map: M::MemoryMap,125126    /// Map for "extra" function pointers.127    extra_fn_ptr_map: FxIndexMap<AllocId, M::ExtraFnVal>,128129    /// Map storing variable argument lists.130    va_list_map: FxIndexMap<AllocId, VecDeque<MPlaceTy<'tcx, M::Provenance>>>,131132    /// To be able to compare pointers with null, and to check alignment for accesses133    /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations134    /// that do not exist any more.135    // FIXME: this should not be public, but interning currently needs access to it136    pub(super) dead_alloc_map: FxIndexMap<AllocId, (Size, Align)>,137138    /// This stores whether we are currently doing reads purely for the purpose of validation.139    /// Those reads do not trigger the machine's hooks for memory reads.140    /// Needless to say, this must only be set with great care!141    validation_in_progress: Cell<bool>,142}143144/// A reference to some allocation that was already bounds-checked for the given region145/// and had the on-access machine hooks run.146#[derive(Copy, Clone)]147pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {148    alloc: &'a Allocation<Prov, Extra, Bytes>,149    range: AllocRange,150    tcx: TyCtxt<'tcx>,151    alloc_id: AllocId,152}153/// A reference to some allocation that was already bounds-checked for the given region154/// and had the on-access machine hooks run.155pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {156    alloc: &'a mut Allocation<Prov, Extra, Bytes>,157    range: AllocRange,158    tcx: TyCtxt<'tcx>,159    alloc_id: AllocId,160}161162impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {163    pub fn new() -> Self {164        Memory {165            alloc_map: M::MemoryMap::default(),166            extra_fn_ptr_map: FxIndexMap::default(),167            va_list_map: FxIndexMap::default(),168            dead_alloc_map: FxIndexMap::default(),169            validation_in_progress: Cell::new(false),170        }171    }172173    /// This is used by [priroda](https://github.com/oli-obk/priroda)174    pub fn alloc_map(&self) -> &M::MemoryMap {175        &self.alloc_map176    }177}178179impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {180    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into181    /// the machine pointer to the allocation. Must never be used182    /// for any other pointers, nor for TLS statics.183    ///184    /// Using the resulting pointer represents a *direct* access to that memory185    /// (e.g. by directly using a `static`),186    /// as opposed to access through a pointer that was created by the program.187    ///188    /// This function can fail only if `ptr` points to an `extern static`.189    #[inline]190    pub fn global_root_pointer(191        &self,192        ptr: Pointer<CtfeProvenance>,193    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {194        let alloc_id = ptr.provenance.alloc_id();195        // We need to handle `extern static`.196        match self.tcx.try_get_global_alloc(alloc_id) {197            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {198                // Thread-local statics do not have a constant address. They *must* be accessed via199                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.200                bug!("global memory cannot point to thread-local static")201            }202            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {203                return M::extern_static_pointer(self, def_id);204            }205            None => {206                let is_fn_ptr = self.memory.extra_fn_ptr_map.contains_key(&alloc_id);207                let is_va_list = self.memory.va_list_map.contains_key(&alloc_id);208                assert!(209                    is_fn_ptr || is_va_list,210                    "{alloc_id:?} is neither global, va_list nor a function pointer"211                );212            }213            _ => {}214        }215        // And we need to get the provenance.216        M::adjust_alloc_root_pointer(self, ptr, M::GLOBAL_KIND.map(MemoryKind::Machine))217    }218219    pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {220        let id = match fn_val {221            FnVal::Instance(instance) => {222                let salt = M::get_global_alloc_salt(self, Some(instance));223                self.tcx.reserve_and_set_fn_alloc(instance, salt)224            }225            FnVal::Other(extra) => {226                // FIXME(RalfJung): Should we have a cache here?227                let id = self.tcx.reserve_alloc_id();228                let old = self.memory.extra_fn_ptr_map.insert(id, extra);229                assert!(old.is_none());230                id231            }232        };233        // Functions are global allocations, so make sure we get the right root pointer.234        // We know this is not an `extern static` so this cannot fail.235        self.global_root_pointer(Pointer::from(id)).unwrap()236    }237238    /// Insert a new variable argument list in the global map of variable argument lists.239    pub fn va_list_ptr(240        &mut self,241        varargs: VecDeque<MPlaceTy<'tcx, M::Provenance>>,242    ) -> Pointer<M::Provenance> {243        let id = self.tcx.reserve_alloc_id();244        let old = self.memory.va_list_map.insert(id, varargs);245        assert!(old.is_none());246        // Variable argument lists are global allocations, so make sure we get the right root247        // pointer. We know this is not an `extern static` so this cannot fail.248        self.global_root_pointer(Pointer::from(id)).unwrap()249    }250251    pub fn allocate_ptr(252        &mut self,253        size: Size,254        align: Align,255        kind: MemoryKind<M::MemoryKind>,256        init: AllocInit,257    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {258        let params = self.machine.get_default_alloc_params();259        let alloc = if M::PANIC_ON_ALLOC_FAIL {260            Allocation::new(size, align, init, params)261        } else {262            Allocation::try_new(size, align, init, params)?263        };264        self.insert_allocation(alloc, kind)265    }266267    pub fn allocate_bytes_ptr(268        &mut self,269        bytes: &[u8],270        align: Align,271        kind: MemoryKind<M::MemoryKind>,272        mutability: Mutability,273    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {274        let params = self.machine.get_default_alloc_params();275        let alloc = Allocation::from_bytes(bytes, align, mutability, params);276        self.insert_allocation(alloc, kind)277    }278279    pub fn insert_allocation(280        &mut self,281        alloc: Allocation<M::Provenance, (), M::Bytes>,282        kind: MemoryKind<M::MemoryKind>,283    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {284        assert!(alloc.size() <= self.max_size_of_val());285        let id = self.tcx.reserve_alloc_id();286        debug_assert_ne!(287            Some(kind),288            M::GLOBAL_KIND.map(MemoryKind::Machine),289            "dynamically allocating global memory"290        );291        // This cannot be merged with the `adjust_global_allocation` code path292        // since here we have an allocation that already uses `M::Bytes`.293        let extra = M::init_local_allocation(self, id, kind, alloc.size(), alloc.align)?;294        let alloc = alloc.with_extra(extra);295        self.memory.alloc_map.insert(id, (kind, alloc));296        M::adjust_alloc_root_pointer(self, Pointer::from(id), Some(kind))297    }298299    /// If this grows the allocation, `init_growth` determines300    /// whether the additional space will be initialized.301    pub fn reallocate_ptr(302        &mut self,303        ptr: Pointer<Option<M::Provenance>>,304        old_size_and_align: Option<(Size, Align)>,305        new_size: Size,306        new_align: Align,307        kind: MemoryKind<M::MemoryKind>,308        init_growth: AllocInit,309    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {310        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;311        if offset.bytes() != 0 {312            throw_ub_format!(313                "reallocating {ptr} which does not point to the beginning of an object"314            );315        }316317        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".318        // This happens so rarely, the perf advantage is outweighed by the maintenance cost.319        // If requested, we zero-init the entire allocation, to ensure that a growing320        // allocation has its new bytes properly set. For the part that is copied,321        // `mem_copy` below will de-initialize things as necessary.322        let new_ptr = self.allocate_ptr(new_size, new_align, kind, init_growth)?;323        let old_size = match old_size_and_align {324            Some((size, _align)) => size,325            None => self.get_alloc_raw(alloc_id)?.size(),326        };327        // This will also call the access hooks.328        self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;329        self.deallocate_ptr(ptr, old_size_and_align, kind)?;330331        interp_ok(new_ptr)332    }333334    /// Mark the `const_allocate`d allocation `ptr` points to as immutable so we can intern it.335    pub fn make_const_heap_ptr_global(336        &mut self,337        ptr: Pointer<Option<CtfeProvenance>>,338    ) -> InterpResult<'tcx>339    where340        M: Machine<'tcx, MemoryKind = crate::const_eval::MemoryKind, Provenance = CtfeProvenance>,341    {342        let (alloc_id, offset, _) = self.ptr_get_alloc_id(ptr, 0)?;343        if offset.bytes() != 0 {344            return Err(ConstEvalErrKind::ConstMakeGlobalWithOffset(ptr)).into();345        }346347        if self.tcx.try_get_global_alloc(alloc_id).is_some() {348            // This points to something outside the current interpreter.349            return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into();350        }351352        // If we can't find it in `alloc_map` it must be dangling (because we don't use353        // `extra_fn_ptr_map` in const-eval).354        let (kind, alloc) = self355            .memory356            .alloc_map357            .get_mut_or(alloc_id, || Err(ConstEvalErrKind::ConstMakeGlobalWithDanglingPtr(ptr)))?;358359        // Ensure this is actually a *heap* allocation, and record it as made-global.360        match kind {361            MemoryKind::Stack | MemoryKind::CallerLocation => {362                return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into();363            }364            MemoryKind::Machine(crate::const_eval::MemoryKind::Heap { was_made_global }) => {365                if *was_made_global {366                    return Err(ConstEvalErrKind::ConstMakeGlobalPtrAlreadyMadeGlobal(alloc_id))367                        .into();368                }369                *was_made_global = true;370            }371        }372373        // Prevent further mutation, this is now an immutable global.374        alloc.mutability = Mutability::Not;375376        interp_ok(())377    }378379    #[instrument(skip(self), level = "debug")]380    pub fn deallocate_ptr(381        &mut self,382        ptr: Pointer<Option<M::Provenance>>,383        old_size_and_align: Option<(Size, Align)>,384        kind: MemoryKind<M::MemoryKind>,385    ) -> InterpResult<'tcx> {386        let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr, 0)?;387        trace!("deallocating: {alloc_id}");388389        if offset.bytes() != 0 {390            throw_ub_format!(391                "deallocating {ptr} which does not point to the beginning of an object"392            );393        }394395        let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {396            // Deallocating global memory -- always an error397            return Err(match self.tcx.try_get_global_alloc(alloc_id) {398                Some(GlobalAlloc::Function { .. }) => {399                    err_ub_format!("deallocating {alloc_id}, which is a function")400                }401                Some(GlobalAlloc::VTable(..)) => {402                    err_ub_format!("deallocating {alloc_id}, which is a vtable")403                }404                Some(GlobalAlloc::TypeId { .. }) => {405                    err_ub_format!("deallocating {alloc_id}, which is a type id")406                }407                Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {408                    err_ub_format!("deallocating {alloc_id}, which is static memory")409                }410                None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccess)),411            })412            .into();413        };414415        if alloc.mutability.is_not() {416            throw_ub_format!("deallocating immutable allocation {alloc_id}");417        }418        if alloc_kind != kind {419            throw_ub_format!(420                "deallocating {alloc_id}, which is {alloc_kind} memory, using {kind} deallocation operation",421            );422        }423        if let Some((size, align)) = old_size_and_align {424            if size != alloc.size() || align != alloc.align {425                throw_ub_format!(426                    "incorrect layout on deallocation: {alloc_id} has size {size} and alignment {align}, but gave size {size_found} and alignment {align_found}",427                    size = alloc.size().bytes(),428                    align = alloc.align.bytes(),429                    size_found = size.bytes(),430                    align_found = align.bytes(),431                )432            }433        }434435        // Let the machine take some extra action436        let size = alloc.size();437        M::before_memory_deallocation(438            self.tcx,439            &mut self.machine,440            &mut alloc.extra,441            ptr,442            (alloc_id, prov),443            size,444            alloc.align,445            kind,446        )?;447448        // Don't forget to remember size and align of this now-dead allocation449        let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));450        if old.is_some() {451            bug!("Nothing can be deallocated twice");452        }453454        interp_ok(())455    }456457    /// Internal helper function to determine the allocation and offset of a pointer (if any).458    #[inline(always)]459    fn get_ptr_access(460        &self,461        ptr: Pointer<Option<M::Provenance>>,462        size: Size,463    ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {464        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes465        Self::check_and_deref_ptr(466            self,467            ptr,468            size,469            CheckInAllocMsg::MemoryAccess,470            |this, alloc_id, offset, prov| {471                let (size, align) =472                    this.get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccess)?;473                interp_ok((size, align, (alloc_id, offset, prov)))474            },475        )476    }477478    /// Check if the given pointer points to live memory of the given `size`.479    /// The caller can control the error message for the out-of-bounds case.480    #[inline(always)]481    pub fn check_ptr_access(482        &self,483        ptr: Pointer<Option<M::Provenance>>,484        size: Size,485        msg: CheckInAllocMsg,486    ) -> InterpResult<'tcx> {487        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes488        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {489            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;490            interp_ok((size, align, ()))491        })?;492        interp_ok(())493    }494495    /// Check whether the given pointer points to live memory for a signed amount of bytes.496    /// A negative amounts means that the given range of memory to the left of the pointer497    /// needs to be dereferenceable.498    pub fn check_ptr_access_signed(499        &self,500        ptr: Pointer<Option<M::Provenance>>,501        size: i64,502        msg: CheckInAllocMsg,503    ) -> InterpResult<'tcx> {504        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {505            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;506            interp_ok((size, align, ()))507        })?;508        interp_ok(())509    }510511    /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference512    /// to the allocation it points to. Supports both shared and mutable references, as the actual513    /// checking is offloaded to a helper closure. Supports signed sizes for checks "to the left" of514    /// a pointer.515    ///516    /// `alloc_size` will only get called for non-zero-sized accesses.517    ///518    /// Returns `None` if and only if the size is 0.519    fn check_and_deref_ptr<T, R: Borrow<Self>>(520        this: R,521        ptr: Pointer<Option<M::Provenance>>,522        size: i64,523        msg: CheckInAllocMsg,524        alloc_size: impl FnOnce(525            R,526            AllocId,527            Size,528            M::ProvenanceExtra,529        ) -> InterpResult<'tcx, (Size, Align, T)>,530    ) -> InterpResult<'tcx, Option<T>> {531        // Everything is okay with size 0.532        if size == 0 {533            return interp_ok(None);534        }535536        interp_ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {537            Err(addr) => {538                // We couldn't get a proper allocation.539                throw_ub!(DanglingIntPointer { addr, inbounds_size: size, msg });540            }541            Ok((alloc_id, offset, prov)) => {542                let tcx = this.borrow().tcx;543                let (alloc_size, _alloc_align, ret_val) = alloc_size(this, alloc_id, offset, prov)?;544                let offset = offset.bytes();545                // Compute absolute begin and end of the range.546                let (begin, end) = if size >= 0 {547                    (Some(offset), offset.checked_add(size as u64))548                } else {549                    (offset.checked_sub(size.unsigned_abs()), Some(offset))550                };551                // Ensure both are within bounds.552                let in_bounds = begin.is_some() && end.is_some_and(|e| e <= alloc_size.bytes());553                if !in_bounds {554                    throw_ub!(PointerOutOfBounds {555                        alloc_id,556                        alloc_size,557                        ptr_offset: tcx.sign_extend_to_target_isize(offset),558                        inbounds_size: size,559                        msg,560                    })561                }562563                Some(ret_val)564            }565        })566    }567568    pub(super) fn check_misalign(569        &self,570        misaligned: Option<Misalignment>,571        msg: CheckAlignMsg,572    ) -> InterpResult<'tcx> {573        if let Some(misaligned) = misaligned {574            throw_ub!(AlignmentCheckFailed(misaligned, msg))575        }576        interp_ok(())577    }578579    pub(super) fn is_ptr_misaligned(580        &self,581        ptr: Pointer<Option<M::Provenance>>,582        align: Align,583    ) -> Option<Misalignment> {584        if !M::enforce_alignment(self) || align.bytes() == 1 {585            return None;586        }587588        #[inline]589        fn is_offset_misaligned(offset: u64, align: Align) -> Option<Misalignment> {590            if offset.is_multiple_of(align.bytes()) {591                None592            } else {593                // The biggest power of two through which `offset` is divisible.594                let offset_pow2 = 1 << offset.trailing_zeros();595                Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })596            }597        }598599        match self.ptr_try_get_alloc_id(ptr, 0) {600            Err(addr) => is_offset_misaligned(addr, align),601            Ok((alloc_id, offset, _prov)) => {602                let alloc_info = self.get_alloc_info(alloc_id);603                if let Some(misalign) = M::alignment_check(604                    self,605                    alloc_id,606                    alloc_info.align,607                    alloc_info.kind,608                    offset,609                    align,610                ) {611                    Some(misalign)612                } else if M::Provenance::OFFSET_IS_ADDR {613                    is_offset_misaligned(ptr.addr().bytes(), align)614                } else {615                    // Check allocation alignment and offset alignment.616                    if alloc_info.align.bytes() < align.bytes() {617                        Some(Misalignment { has: alloc_info.align, required: align })618                    } else {619                        is_offset_misaligned(offset.bytes(), align)620                    }621                }622            }623        }624    }625626    /// Checks a pointer for misalignment.627    ///628    /// The error assumes this is checking the pointer used directly for an access.629    pub fn check_ptr_align(630        &self,631        ptr: Pointer<Option<M::Provenance>>,632        align: Align,633    ) -> InterpResult<'tcx> {634        self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)635    }636}637638impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {639    /// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map.640    pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) {641        // Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or642        // is live, here all the IDs in the map are for dead allocations so we don't643        // need to check for liveness.644        #[allow(rustc::potential_query_instability)] // Only used from Miri, not queries.645        self.memory.dead_alloc_map.retain(|id, _| reachable_allocs.contains(id));646    }647}648649/// Allocation accessors650impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {651    /// Helper function to obtain a global (tcx) allocation.652    /// This attempts to return a reference to an existing allocation if653    /// one can be found in `tcx`. That, however, is only possible if `tcx` and654    /// this machine use the same pointer provenance, so it is indirected through655    /// `M::adjust_allocation`.656    fn get_global_alloc(657        &self,658        id: AllocId,659        is_write: bool,660    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> {661        let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {662            Some(GlobalAlloc::Memory(mem)) => {663                // Memory of a constant or promoted or anonymous memory referenced by a static.664                (mem, None)665            }666            Some(GlobalAlloc::Function { .. }) => throw_ub!(DerefFunctionPointer(id)),667            Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),668            Some(GlobalAlloc::TypeId { .. }) => throw_ub!(DerefTypeIdPointer(id)),669            None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)),670            Some(GlobalAlloc::Static(def_id)) => {671                assert!(self.tcx.is_static(def_id));672                // Thread-local statics do not have a constant address. They *must* be accessed via673                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.674                assert!(!self.tcx.is_thread_local_static(def_id));675                // Notice that every static has two `AllocId` that will resolve to the same676                // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,677                // and the other one is maps to `GlobalAlloc::Memory`, this is returned by678                // `eval_static_initializer` and it is the "resolved" ID.679                // The resolved ID is never used by the interpreted program, it is hidden.680                // This is relied upon for soundness of const-patterns; a pointer to the resolved681                // ID would "sidestep" the checks that make sure consts do not point to statics!682                // The `GlobalAlloc::Memory` branch here is still reachable though; when a static683                // contains a reference to memory that was created during its evaluation (i.e., not684                // to another static), those inner references only exist in "resolved" form.685                if self.tcx.is_foreign_item(def_id) {686                    // This is unreachable in Miri, but can happen in CTFE where we actually *do* support687                    // referencing arbitrary (declared) extern statics.688                    throw_unsup!(ExternStatic(def_id));689                }690691                // We don't give a span -- statics don't need that, they cannot be generic or associated.692                let val = self.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;693                (val, Some(def_id))694            }695        };696        M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?;697        // We got tcx memory. Let the machine initialize its "extra" stuff.698        M::adjust_global_allocation(699            self,700            id, // always use the ID we got as input, not the "hidden" one.701            alloc.inner(),702        )703    }704705    /// Gives raw access to the `Allocation`, without bounds or alignment checks.706    /// The caller is responsible for calling the access hooks!707    ///708    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.709    pub fn get_alloc_raw(710        &self,711        id: AllocId,712    ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> {713        // The error type of the inner closure here is somewhat funny. We have two714        // ways of "erroring": An actual error, or because we got a reference from715        // `get_global_alloc` that we can actually use directly without inserting anything anywhere.716        // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.717        let a = self.memory.alloc_map.get_or(id, || {718            // We have to funnel the `InterpErrorInfo` through a `Result` to match the `get_or` API,719            // so we use `report_err` for that.720            let alloc = self.get_global_alloc(id, /*is_write*/ false).report_err().map_err(Err)?;721            match alloc {722                Cow::Borrowed(alloc) => {723                    // We got a ref, cheaply return that as an "error" so that the724                    // map does not get mutated.725                    Err(Ok(alloc))726                }727                Cow::Owned(alloc) => {728                    // Need to put it into the map and return a ref to that729                    let kind = M::GLOBAL_KIND.expect(730                        "I got a global allocation that I have to copy but the machine does \731                            not expect that to happen",732                    );733                    Ok((MemoryKind::Machine(kind), alloc))734                }735            }736        });737        // Now unpack that funny error type738        match a {739            Ok(a) => interp_ok(&a.1),740            Err(a) => a.into(),741        }742    }743744    /// Gives raw, immutable access to the `Allocation` address, without bounds or alignment checks.745    /// The caller is responsible for calling the access hooks!746    pub fn get_alloc_bytes_unchecked_raw(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {747        let alloc = self.get_alloc_raw(id)?;748        interp_ok(alloc.get_bytes_unchecked_raw())749    }750751    /// Bounds-checked *but not align-checked* allocation access.752    pub fn get_ptr_alloc<'a>(753        &'a self,754        ptr: Pointer<Option<M::Provenance>>,755        size: Size,756    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>757    {758        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes759        let ptr_and_alloc = Self::check_and_deref_ptr(760            self,761            ptr,762            size_i64,763            CheckInAllocMsg::MemoryAccess,764            |this, alloc_id, offset, prov| {765                let alloc = this.get_alloc_raw(alloc_id)?;766                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))767            },768        )?;769        // We want to call the hook on *all* accesses that involve an AllocId, including zero-sized770        // accesses. That means we cannot rely on the closure above or the `Some` branch below. We771        // do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked.772        if !self.memory.validation_in_progress.get() {773            if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(ptr, size_i64) {774                M::before_alloc_access(self.tcx, &self.machine, alloc_id)?;775            }776        }777778        if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {779            let range = alloc_range(offset, size);780            if !self.memory.validation_in_progress.get() {781                M::before_memory_read(782                    self.tcx,783                    &self.machine,784                    &alloc.extra,785                    ptr,786                    (alloc_id, prov),787                    range,788                )?;789            }790            interp_ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))791        } else {792            interp_ok(None)793        }794    }795796    /// Return the `extra` field of the given allocation.797    pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {798        interp_ok(&self.get_alloc_raw(id)?.extra)799    }800801    /// Return the `mutability` field of the given allocation.802    pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {803        interp_ok(self.get_alloc_raw(id)?.mutability)804    }805806    /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.807    /// The caller is responsible for calling the access hooks!808    ///809    /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the810    /// allocation.811    ///812    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.813    pub fn get_alloc_raw_mut(814        &mut self,815        id: AllocId,816    ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> {817        // We have "NLL problem case #3" here, which cannot be worked around without loss of818        // efficiency even for the common case where the key is in the map.819        // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>820        // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`, and that boils down to821        // Miri's `adjust_alloc_root_pointer` needing to look up the size of the allocation.822        // It could be avoided with a totally separate codepath in Miri for handling the absolute address823        // of global allocations, but that's not worth it.)824        if self.memory.alloc_map.get_mut(id).is_none() {825            // Slow path.826            // Allocation not found locally, go look global.827            let alloc = self.get_global_alloc(id, /*is_write*/ true)?;828            let kind = M::GLOBAL_KIND.expect(829                "I got a global allocation that I have to copy but the machine does \830                    not expect that to happen",831            );832            self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));833        }834835        let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();836        if alloc.mutability.is_not() {837            throw_ub!(WriteToReadOnly(id))838        }839        interp_ok((alloc, &mut self.machine))840    }841842    /// Gives raw, mutable access to the `Allocation` address, without bounds or alignment checks.843    /// The caller is responsible for calling the access hooks!844    pub fn get_alloc_bytes_unchecked_raw_mut(845        &mut self,846        id: AllocId,847    ) -> InterpResult<'tcx, *mut u8> {848        let alloc = self.get_alloc_raw_mut(id)?.0;849        interp_ok(alloc.get_bytes_unchecked_raw_mut())850    }851852    /// Bounds-checked *but not align-checked* allocation access.853    pub fn get_ptr_alloc_mut<'a>(854        &'a mut self,855        ptr: Pointer<Option<M::Provenance>>,856        size: Size,857    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>858    {859        let tcx = self.tcx;860        let validation_in_progress = self.memory.validation_in_progress.get();861862        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes863        let ptr_and_alloc = Self::check_and_deref_ptr(864            self,865            ptr,866            size_i64,867            CheckInAllocMsg::MemoryAccess,868            |this, alloc_id, offset, prov| {869                let (alloc, machine) = this.get_alloc_raw_mut(alloc_id)?;870                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))871            },872        )?;873874        if let Some((alloc_id, offset, prov, alloc, machine)) = ptr_and_alloc {875            let range = alloc_range(offset, size);876            if !validation_in_progress {877                // For writes, it's okay to only call those when there actually is a non-zero878                // amount of bytes to be written: a zero-sized write doesn't manifest anything.879                M::before_alloc_access(tcx, machine, alloc_id)?;880                M::before_memory_write(881                    tcx,882                    machine,883                    &mut alloc.extra,884                    ptr,885                    (alloc_id, prov),886                    range,887                )?;888            }889            interp_ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))890        } else {891            interp_ok(None)892        }893    }894895    /// Return the `extra` field of the given allocation.896    pub fn get_alloc_extra_mut<'a>(897        &'a mut self,898        id: AllocId,899    ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {900        let (alloc, machine) = self.get_alloc_raw_mut(id)?;901        interp_ok((&mut alloc.extra, machine))902    }903904    /// Check whether an allocation is live. This is faster than calling905    /// [`InterpCx::get_alloc_info`] if all you need to check is whether the kind is906    /// [`AllocKind::Dead`] because it doesn't have to look up the type and layout of statics.907    pub fn is_alloc_live(&self, id: AllocId) -> bool {908        self.memory.alloc_map.contains_key_ref(&id)909            || self.memory.extra_fn_ptr_map.contains_key(&id)910            || self.memory.va_list_map.contains_key(&id)911            // We check `tcx` last as that has to acquire a lock in `many-seeds` mode.912            // This also matches the order in `get_alloc_info`.913            || self.tcx.try_get_global_alloc(id).is_some()914    }915916    /// Obtain the size and alignment of an allocation, even if that allocation has917    /// been deallocated.918    pub fn get_alloc_info(&self, id: AllocId) -> AllocInfo {919        // # Regular allocations920        // Don't use `self.get_raw` here as that will921        // a) cause cycles in case `id` refers to a static922        // b) duplicate a global's allocation in miri923        if let Some((_, alloc)) = self.memory.alloc_map.get(id) {924            return AllocInfo::new(925                alloc.size(),926                alloc.align,927                AllocKind::LiveData,928                alloc.mutability,929            );930        }931932        // # Function pointers933        // (both global from `alloc_map` and local from `extra_fn_ptr_map`)934        if let Some(fn_val) = self.get_fn_alloc(id) {935            let align = match fn_val {936                FnVal::Instance(_instance) => {937                    // FIXME: Until we have a clear design for the effects of align(N) functions938                    // on the address of function pointers, we don't consider the align(N)939                    // attribute on functions in the interpreter.940                    // See <https://github.com/rust-lang/rust/issues/144661> for more context.941                    Align::ONE942                }943                // Machine-specific extra functions currently do not support alignment restrictions.944                FnVal::Other(_) => Align::ONE,945            };946947            return AllocInfo::new(Size::ZERO, align, AllocKind::Function, Mutability::Not);948        }949950        // # Variable argument lists951        if self.memory.va_list_map.contains_key(&id) {952            return AllocInfo::new(Size::ZERO, Align::ONE, AllocKind::VaList, Mutability::Not);953        }954955        // # Global allocations956        if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) {957            // NOTE: `static` alignment from attributes has already been applied to the allocation.958            let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env);959            let mutbl = global_alloc.mutability(*self.tcx, self.typing_env);960            let kind = match global_alloc {961                GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData,962                GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"),963                GlobalAlloc::VTable { .. } => AllocKind::VTable,964                GlobalAlloc::TypeId { .. } => AllocKind::TypeId,965            };966            return AllocInfo::new(size, align, kind, mutbl);967        }968969        // # Dead pointers970        let (size, align) = *self971            .memory972            .dead_alloc_map973            .get(&id)974            .expect("deallocated pointers should all be recorded in `dead_alloc_map`");975        AllocInfo::new(size, align, AllocKind::Dead, Mutability::Not)976    }977978    /// Obtain the size and alignment of a *live* allocation.979    fn get_live_alloc_size_and_align(980        &self,981        id: AllocId,982        msg: CheckInAllocMsg,983    ) -> InterpResult<'tcx, (Size, Align)> {984        let info = self.get_alloc_info(id);985        if info.kind == AllocKind::Dead {986            throw_ub!(PointerUseAfterFree(id, msg))987        }988        interp_ok((info.size, info.align))989    }990991    fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {992        if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {993            Some(FnVal::Other(*extra))994        } else {995            match self.tcx.try_get_global_alloc(id) {996                Some(GlobalAlloc::Function { instance, .. }) => Some(FnVal::Instance(instance)),997                _ => None,998            }999        }1000    }10011002    /// Takes a pointer that is the first chunk of a `TypeId` and return the type that its1003    /// provenance refers to, as well as the segment of the hash that this pointer covers.1004    pub fn get_ptr_type_id(1005        &self,1006        ptr: Pointer<Option<M::Provenance>>,1007    ) -> InterpResult<'tcx, (Ty<'tcx>, u64)> {1008        let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?;1009        let Some(GlobalAlloc::TypeId { ty }) = self.tcx.try_get_global_alloc(alloc_id) else {1010            throw_ub_format!("invalid `TypeId` value: not all bytes carry type id metadata")1011        };1012        interp_ok((ty, offset.bytes()))1013    }10141015    pub fn get_ptr_fn(1016        &self,1017        ptr: Pointer<Option<M::Provenance>>,1018    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {1019        trace!("get_ptr_fn({:?})", ptr);1020        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;1021        if offset.bytes() != 0 {1022            throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))1023        }1024        self.get_fn_alloc(alloc_id)1025            .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))))1026            .into()1027    }10281029    pub fn get_ptr_va_list(1030        &self,1031        ptr: Pointer<Option<M::Provenance>>,1032    ) -> InterpResult<'tcx, &VecDeque<MPlaceTy<'tcx, M::Provenance>>> {1033        trace!("get_ptr_va_list({:?})", ptr);1034        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;1035        if offset.bytes() != 0 {1036            throw_ub!(InvalidVaListPointer(Pointer::new(alloc_id, offset)))1037        }10381039        let Some(va_list) = self.memory.va_list_map.get(&alloc_id) else {1040            throw_ub!(InvalidVaListPointer(Pointer::new(alloc_id, offset)))1041        };10421043        interp_ok(va_list)1044    }10451046    /// Removes this VaList from the global map of variable argument lists. This does not deallocate1047    /// the VaList elements, that happens when the Frame is popped.1048    pub fn deallocate_va_list(1049        &mut self,1050        ptr: Pointer<Option<M::Provenance>>,1051    ) -> InterpResult<'tcx, VecDeque<MPlaceTy<'tcx, M::Provenance>>> {1052        trace!("deallocate_va_list({:?})", ptr);1053        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;1054        if offset.bytes() != 0 {1055            throw_ub!(InvalidVaListPointer(Pointer::new(alloc_id, offset)))1056        }10571058        let Some(va_list) = self.memory.va_list_map.swap_remove(&alloc_id) else {1059            throw_ub!(InvalidVaListPointer(Pointer::new(alloc_id, offset)))1060        };10611062        self.memory.dead_alloc_map.insert(alloc_id, (Size::ZERO, Align::ONE));1063        interp_ok(va_list)1064    }10651066    /// Get the dynamic type of the given vtable pointer.1067    /// If `expected_trait` is `Some`, it must be a vtable for the given trait.1068    pub fn get_ptr_vtable_ty(1069        &self,1070        ptr: Pointer<Option<M::Provenance>>,1071        expected_trait: Option<&'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>>,1072    ) -> InterpResult<'tcx, Ty<'tcx>> {1073        trace!("get_ptr_vtable({:?})", ptr);1074        let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr, 0)?;1075        if offset.bytes() != 0 {1076            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))1077        }1078        let Some(GlobalAlloc::VTable(ty, vtable_dyn_type)) =1079            self.tcx.try_get_global_alloc(alloc_id)1080        else {1081            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))1082        };1083        if let Some(expected_dyn_type) = expected_trait {1084            self.check_vtable_for_type(vtable_dyn_type, expected_dyn_type)?;1085        }1086        interp_ok(ty)1087    }10881089    pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {1090        self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;1091        interp_ok(())1092    }10931094    /// Visit all allocations reachable from the given start set, by recursively traversing the1095    /// provenance information of those allocations.1096    pub fn visit_reachable_allocs(1097        &mut self,1098        start: Vec<AllocId>,1099        mut visit: impl FnMut(&mut Self, AllocId, &AllocInfo) -> InterpResult<'tcx>,1100    ) -> InterpResult<'tcx> {1101        let mut done = FxHashSet::default();1102        let mut todo = start;1103        while let Some(id) = todo.pop() {1104            if !done.insert(id) {1105                // We already saw this allocation before, don't process it again.1106                continue;1107            }1108            let info = self.get_alloc_info(id);11091110            // Recurse, if there is data here.1111            // Do this *before* invoking the callback, as the callback might mutate the1112            // allocation and e.g. replace all provenance by wildcards!1113            if info.kind == AllocKind::LiveData {1114                let alloc = self.get_alloc_raw(id)?;1115                for prov in alloc.provenance().provenances() {1116                    if let Some(id) = prov.get_alloc_id() {1117                        todo.push(id);1118                    }1119                }1120            }11211122            // Call the callback.1123            visit(self, id, &info)?;1124        }1125        interp_ok(())1126    }11271128    /// Create a lazy debug printer that prints the given allocation and all allocations it points1129    /// to, recursively.1130    #[must_use]1131    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'tcx, M> {1132        self.dump_allocs(vec![id])1133    }11341135    /// Create a lazy debug printer for a list of allocations and all allocations they point to,1136    /// recursively.1137    #[must_use]1138    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'tcx, M> {1139        allocs.sort();1140        allocs.dedup();1141        DumpAllocs { ecx: self, allocs }1142    }11431144    /// Print the allocation's bytes, without any nested allocations.1145    pub fn print_alloc_bytes_for_diagnostics(&self, id: AllocId) -> String {1146        // Using the "raw" access to avoid the `before_alloc_read` hook, we specifically1147        // want to be able to read all memory for diagnostics, even if that is cyclic.1148        let alloc = self.get_alloc_raw(id).unwrap();1149        let mut bytes = String::new();1150        if alloc.size() != Size::ZERO {1151            bytes = "\n".into();1152            // FIXME(translation) there might be pieces that are translatable.1153            rustc_middle::mir::pretty::write_allocation_bytes(*self.tcx, alloc, &mut bytes, "    ")1154                .unwrap();1155        }1156        bytes1157    }11581159    /// Find leaked allocations, remove them from memory and return them. Allocations reachable from1160    /// `static_roots` or a `Global` allocation are not considered leaked, as well as leaks whose1161    /// kind's `may_leak()` returns true.1162    ///1163    /// This is highly destructive, no more execution can happen after this!1164    pub fn take_leaked_allocations(1165        &mut self,1166        static_roots: impl FnOnce(&Self) -> &[AllocId],1167    ) -> Vec<(AllocId, MemoryKind<M::MemoryKind>, Allocation<M::Provenance, M::AllocExtra, M::Bytes>)>1168    {1169        // Collect the set of allocations that are *reachable* from `Global` allocations.1170        let reachable = {1171            let mut reachable = FxHashSet::default();1172            let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);1173            let mut todo: Vec<_> =1174                self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {1175                    if Some(kind) == global_kind { Some(id) } else { None }1176                });1177            todo.extend(static_roots(self));1178            while let Some(id) = todo.pop() {1179                if reachable.insert(id) {1180                    // This is a new allocation, add the allocations it points to `todo`.1181                    // We only need to care about `alloc_map` memory here, as entirely unchanged1182                    // global memory cannot point to memory relevant for the leak check.1183                    if let Some((_, alloc)) = self.memory.alloc_map.get(id) {1184                        todo.extend(1185                            alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),1186                        );1187                    }1188                }1189            }1190            reachable1191        };11921193        // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.1194        let leaked: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {1195            if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }1196        });1197        let mut result = Vec::new();1198        for &id in leaked.iter() {1199            let (kind, alloc) = self.memory.alloc_map.remove(&id).unwrap();1200            result.push((id, kind, alloc));1201        }1202        result1203    }12041205    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be1206    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.1207    ///1208    /// We do this so Miri's allocation access tracking does not show the validation1209    /// reads as spurious accesses.1210    pub fn run_for_validation_mut<R>(&mut self, f: impl FnOnce(&mut Self) -> R) -> R {1211        // This deliberately uses `==` on `bool` to follow the pattern1212        // `assert!(val.replace(new) == old)`.1213        assert!(1214            self.memory.validation_in_progress.replace(true) == false,1215            "`validation_in_progress` was already set"1216        );1217        let res = f(self);1218        assert!(1219            self.memory.validation_in_progress.replace(false) == true,1220            "`validation_in_progress` was unset by someone else"1221        );1222        res1223    }12241225    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be1226    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.1227    ///1228    /// We do this so Miri's allocation access tracking does not show the validation1229    /// reads as spurious accesses.1230    pub fn run_for_validation_ref<R>(&self, f: impl FnOnce(&Self) -> R) -> R {1231        // This deliberately uses `==` on `bool` to follow the pattern1232        // `assert!(val.replace(new) == old)`.1233        assert!(1234            self.memory.validation_in_progress.replace(true) == false,1235            "`validation_in_progress` was already set"1236        );1237        let res = f(self);1238        assert!(1239            self.memory.validation_in_progress.replace(false) == true,1240            "`validation_in_progress` was unset by someone else"1241        );1242        res1243    }12441245    pub(super) fn validation_in_progress(&self) -> bool {1246        self.memory.validation_in_progress.get()1247    }1248}12491250#[doc(hidden)]1251/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.1252pub struct DumpAllocs<'a, 'tcx, M: Machine<'tcx>> {1253    ecx: &'a InterpCx<'tcx, M>,1254    allocs: Vec<AllocId>,1255}12561257impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> {1258    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {1259        // Cannot be a closure because it is generic in `Prov`, `Extra`.1260        fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(1261            fmt: &mut std::fmt::Formatter<'_>,1262            tcx: TyCtxt<'tcx>,1263            allocs_to_print: &mut VecDeque<AllocId>,1264            alloc: &Allocation<Prov, Extra, Bytes>,1265        ) -> std::fmt::Result {1266            for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())1267            {1268                allocs_to_print.push_back(alloc_id);1269            }1270            write!(fmt, "{}", display_allocation(tcx, alloc))1271        }12721273        let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();1274        // `allocs_printed` contains all allocations that we have already printed.1275        let mut allocs_printed = FxHashSet::default();12761277        while let Some(id) = allocs_to_print.pop_front() {1278            if !allocs_printed.insert(id) {1279                // Already printed, so skip this.1280                continue;1281            }12821283            write!(fmt, "{id:?}")?;1284            match self.ecx.memory.alloc_map.get(id) {1285                Some((kind, alloc)) => {1286                    // normal alloc1287                    write!(fmt, " ({kind}, ")?;1288                    write_allocation_track_relocs(1289                        &mut *fmt,1290                        *self.ecx.tcx,1291                        &mut allocs_to_print,1292                        alloc,1293                    )?;1294                }1295                None => {1296                    // global alloc1297                    match self.ecx.tcx.try_get_global_alloc(id) {1298                        Some(GlobalAlloc::Memory(alloc)) => {1299                            write!(fmt, " (unchanged global, ")?;1300                            write_allocation_track_relocs(1301                                &mut *fmt,1302                                *self.ecx.tcx,1303                                &mut allocs_to_print,1304                                alloc.inner(),1305                            )?;1306                        }1307                        Some(GlobalAlloc::Function { instance, .. }) => {1308                            write!(fmt, " (fn: {instance})")?;1309                        }1310                        Some(GlobalAlloc::VTable(ty, dyn_ty)) => {1311                            write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?;1312                        }1313                        Some(GlobalAlloc::TypeId { ty }) => {1314                            write!(fmt, " (typeid for {ty})")?;1315                        }1316                        Some(GlobalAlloc::Static(did)) => {1317                            write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;1318                        }1319                        None => {1320                            write!(fmt, " (deallocated)")?;1321                        }1322                    }1323                }1324            }1325            writeln!(fmt)?;1326        }1327        Ok(())1328    }1329}13301331/// Reading and writing.1332impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>1333    AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>1334{1335    pub fn as_ref<'b>(&'b self) -> AllocRef<'b, 'tcx, Prov, Extra, Bytes> {1336        AllocRef { alloc: self.alloc, range: self.range, tcx: self.tcx, alloc_id: self.alloc_id }1337    }13381339    /// `range` is relative to this allocation reference, not the base of the allocation.1340    pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {1341        let range = self.range.subrange(range);1342        debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);13431344        self.alloc1345            .write_scalar(&self.tcx, range, val)1346            .map_err(|e| e.to_interp_error(self.alloc_id))1347            .into()1348    }13491350    /// `offset` is relative to this allocation reference, not the base of the allocation.1351    pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {1352        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size()), val)1353    }13541355    /// Mark the given sub-range (relative to this allocation reference) as uninitialized.1356    pub fn write_uninit(&mut self, range: AllocRange) {1357        let range = self.range.subrange(range);13581359        self.alloc.write_uninit(&self.tcx, range);1360    }13611362    /// Mark the entire referenced range as uninitialized1363    pub fn write_uninit_full(&mut self) {1364        self.alloc.write_uninit(&self.tcx, self.range);1365    }13661367    /// Remove all provenance in the reference range.1368    pub fn clear_provenance(&mut self) {1369        self.alloc.clear_provenance(&self.tcx, self.range);1370    }1371}13721373impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {1374    /// `range` is relative to this allocation reference, not the base of the allocation.1375    pub fn read_scalar(1376        &self,1377        range: AllocRange,1378        read_provenance: bool,1379    ) -> InterpResult<'tcx, Scalar<Prov>> {1380        let range = self.range.subrange(range);1381        self.alloc1382            .read_scalar(&self.tcx, range, read_provenance)1383            .map_err(|e| e.to_interp_error(self.alloc_id))1384            .into()1385    }13861387    /// `range` is relative to this allocation reference, not the base of the allocation.1388    pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {1389        self.read_scalar(range, /*read_provenance*/ false)1390    }13911392    /// `offset` is relative to this allocation reference, not the base of the allocation.1393    pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {1394        self.read_scalar(1395            alloc_range(offset, self.tcx.data_layout().pointer_size()),1396            /*read_provenance*/ true,1397        )1398    }13991400    /// `range` is relative to this allocation reference, not the base of the allocation.1401    pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {1402        self.alloc1403            .get_bytes_strip_provenance(&self.tcx, self.range)1404            .map_err(|e| e.to_interp_error(self.alloc_id))1405            .into()1406    }14071408    /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.1409    pub fn has_provenance(&self) -> bool {1410        !self.alloc.provenance().range_empty(self.range, &self.tcx)1411    }1412}14131414impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {1415    /// Reads the given number of bytes from memory, and strips their provenance if possible.1416    /// Returns them as a slice.1417    ///1418    /// Performs appropriate bounds checks.1419    pub fn read_bytes_ptr_strip_provenance(1420        &self,1421        ptr: Pointer<Option<M::Provenance>>,1422        size: Size,1423    ) -> InterpResult<'tcx, &[u8]> {1424        let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {1425            // zero-sized access1426            return interp_ok(&[]);1427        };1428        // Side-step AllocRef and directly access the underlying bytes more efficiently.1429        // (We are staying inside the bounds here so all is good.)1430        interp_ok(1431            alloc_ref1432                .alloc1433                .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)1434                .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?,1435        )1436    }14371438    /// Writes the given stream of bytes into memory.1439    ///1440    /// Performs appropriate bounds checks.1441    pub fn write_bytes_ptr(1442        &mut self,1443        ptr: Pointer<Option<M::Provenance>>,1444        src: impl IntoIterator<Item = u8>,1445    ) -> InterpResult<'tcx> {1446        let mut src = src.into_iter();1447        let (lower, upper) = src.size_hint();1448        let len = upper.expect("can only write bounded iterators");1449        assert_eq!(lower, len, "can only write iterators with a precise length");14501451        let size = Size::from_bytes(len);1452        let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {1453            // zero-sized access1454            assert_matches!(src.next(), None, "iterator said it was empty but returned an element");1455            return interp_ok(());1456        };14571458        // Side-step AllocRef and directly access the underlying bytes more efficiently.1459        // (We are staying inside the bounds here and all bytes do get overwritten so all is good.)1460        let bytes =1461            alloc_ref.alloc.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range);1462        // `zip` would stop when the first iterator ends; we want to definitely1463        // cover all of `bytes`.1464        for dest in bytes {1465            *dest = src.next().expect("iterator was shorter than it said it would be");1466        }1467        assert_matches!(src.next(), None, "iterator was longer than it said it would be");1468        interp_ok(())1469    }14701471    pub fn mem_copy(1472        &mut self,1473        src: Pointer<Option<M::Provenance>>,1474        dest: Pointer<Option<M::Provenance>>,1475        size: Size,1476        nonoverlapping: bool,1477    ) -> InterpResult<'tcx> {1478        self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)1479    }14801481    /// Performs `num_copies` many copies of `size` many bytes from `src` to `dest + i*size` (where1482    /// `i` is the index of the copy).1483    ///1484    /// Either `nonoverlapping` must be true or `num_copies` must be 1; doing repeated copies that1485    /// may overlap is not supported.1486    pub fn mem_copy_repeatedly(1487        &mut self,1488        src: Pointer<Option<M::Provenance>>,1489        dest: Pointer<Option<M::Provenance>>,1490        size: Size,1491        num_copies: u64,1492        nonoverlapping: bool,1493    ) -> InterpResult<'tcx> {1494        let tcx = self.tcx;1495        // We need to do our own bounds-checks.1496        let src_parts = self.get_ptr_access(src, size)?;1497        let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication14981499        // Similar to `get_ptr_alloc`, we need to call `before_alloc_access` even for zero-sized1500        // reads. However, just like in `get_ptr_alloc_mut`, the write part is okay to skip for1501        // zero-sized writes.1502        if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(src, size.bytes().try_into().unwrap())1503        {1504            M::before_alloc_access(tcx, &self.machine, alloc_id)?;1505        }15061507        // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`1508        // and once below to get the underlying `&[mut] Allocation`.15091510        // Source alloc preparations and access hooks.1511        let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {1512            // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.1513            return interp_ok(());1514        };1515        let src_alloc = self.get_alloc_raw(src_alloc_id)?;1516        let src_range = alloc_range(src_offset, size);1517        assert!(!self.memory.validation_in_progress.get(), "we can't be copying during validation");15181519        // Trigger read hook.1520        // For the overlapping case, it is crucial that we trigger the read hook1521        // before the write hook -- the aliasing model cares about the order.1522        M::before_memory_read(1523            tcx,1524            &self.machine,1525            &src_alloc.extra,1526            src,1527            (src_alloc_id, src_prov),1528            src_range,1529        )?;1530        // We need the `dest` ptr for the next operation, so we get it now.1531        // We already did the source checks and called the hooks so we are good to return early.1532        let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {1533            // Zero-sized *destination*.1534            return interp_ok(());1535        };15361537        // Prepare getting source provenance.1538        let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation1539        // First copy the provenance to a temporary buffer, because1540        // `get_bytes_unchecked_for_overwrite_ptr` will clear the provenance (in preparation for1541        // inserting the new provenance), and that can overlap with the source range.1542        let provenance = src_alloc.provenance_prepare_copy(src_range, self);1543        // Prepare a copy of the initialization mask.1544        let init = src_alloc.init_mask().prepare_copy(src_range);15451546        // Destination alloc preparations...1547        let (dest_alloc, machine) = self.get_alloc_raw_mut(dest_alloc_id)?;1548        let dest_range = alloc_range(dest_offset, size * num_copies);1549        // ...and access hooks.1550        M::before_alloc_access(tcx, machine, dest_alloc_id)?;1551        M::before_memory_write(1552            tcx,1553            machine,1554            &mut dest_alloc.extra,1555            dest,1556            (dest_alloc_id, dest_prov),1557            dest_range,1558        )?;1559        // Yes we do overwrite all bytes in `dest_bytes`.1560        let dest_bytes =1561            dest_alloc.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range).as_mut_ptr();15621563        if init.no_bytes_init() {1564            // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range1565            // is marked as uninitialized but we otherwise omit changing the byte representation which may1566            // be arbitrary for uninitialized bytes.1567            // This also avoids writing to the target bytes so that the backing allocation is never1568            // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary1569            // operating system this can avoid physically allocating the page.1570            dest_alloc.write_uninit(&tcx, dest_range);1571            // `write_uninit` also resets the provenance, so we are done.1572            return interp_ok(());1573        }15741575        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes1576        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and1577        // `dest` could possibly overlap.1578        // The pointers above remain valid even if the `HashMap` table is moved around because they1579        // point into the `Vec` storing the bytes.1580        unsafe {1581            if src_alloc_id == dest_alloc_id {1582                if nonoverlapping {1583                    // `Size` additions1584                    if (src_offset <= dest_offset && src_offset + size > dest_offset)1585                        || (dest_offset <= src_offset && dest_offset + size > src_offset)1586                    {1587                        throw_ub_format!("`copy_nonoverlapping` called on overlapping ranges");1588                    }1589                }1590            }1591            if num_copies > 1 {1592                assert!(nonoverlapping, "multi-copy only supported in non-overlapping mode");1593            }15941595            let size_in_bytes = size.bytes_usize();1596            // For particularly large arrays (where this is perf-sensitive) it's common that1597            // we're writing a single byte repeatedly. So, optimize that case to a memset.1598            if size_in_bytes == 1 {1599                debug_assert!(num_copies >= 1); // we already handled the zero-sized cases above.1600                // SAFETY: `src_bytes` would be read from anyway by `copy` below (num_copies >= 1).1601                let value = *src_bytes;1602                dest_bytes.write_bytes(value, (size * num_copies).bytes_usize());1603            } else if src_alloc_id == dest_alloc_id {1604                let mut dest_ptr = dest_bytes;1605                for _ in 0..num_copies {1606                    // Here we rely on `src` and `dest` being non-overlapping if there is more than1607                    // one copy.1608                    ptr::copy(src_bytes, dest_ptr, size_in_bytes);1609                    dest_ptr = dest_ptr.add(size_in_bytes);1610                }1611            } else {1612                let mut dest_ptr = dest_bytes;1613                for _ in 0..num_copies {1614                    ptr::copy_nonoverlapping(src_bytes, dest_ptr, size_in_bytes);1615                    dest_ptr = dest_ptr.add(size_in_bytes);1616                }1617            }1618        }16191620        // now fill in all the "init" data1621        dest_alloc.init_mask_apply_copy(1622            init,1623            alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)1624            num_copies,1625        );1626        // copy the provenance to the destination1627        dest_alloc.provenance_apply_copy(provenance, alloc_range(dest_offset, size), num_copies);16281629        interp_ok(())1630    }1631}16321633/// Machine pointer introspection.1634impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {1635    /// Test if this value might be null.1636    /// If the machine does not support ptr-to-int casts, this is conservative.1637    pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {1638        match scalar.try_to_scalar_int() {1639            Ok(int) => interp_ok(int.is_null()),1640            Err(_) => {1641                // We can't cast this pointer to an integer. Can only happen during CTFE.1642                let ptr = scalar.to_pointer(self)?;1643                match self.ptr_try_get_alloc_id(ptr, 0) {1644                    Ok((alloc_id, offset, _)) => {1645                        let info = self.get_alloc_info(alloc_id);1646                        if info.kind == AllocKind::TypeId {1647                            // We *could* actually precisely answer this question since here,1648                            // the offset *is* the integer value. But the entire point of making1649                            // this a pointer is not to leak the integer value, so we say everything1650                            // might be null.1651                            return interp_ok(true);1652                        }1653                        // If the pointer is in-bounds (including "at the end"), it is definitely not null.1654                        if offset <= info.size {1655                            return interp_ok(false);1656                        }1657                        // If the allocation is N-aligned, and the offset is not divisible by N,1658                        // then `base + offset` has a non-zero remainder after division by `N`,1659                        // which means `base + offset` cannot be null.1660                        if !offset.bytes().is_multiple_of(info.align.bytes()) {1661                            return interp_ok(false);1662                        }1663                        // We don't know enough, this might be null.1664                        interp_ok(true)1665                    }1666                    Err(_offset) => bug!("a non-int scalar is always a pointer"),1667                }1668            }1669        }1670    }16711672    /// Turning a "maybe pointer" into a proper pointer (and some information1673    /// about where it points), or an absolute address.1674    ///1675    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used1676    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations1677    /// where a wildcard pointer sits right in between two allocations.1678    /// It is almost always okay to just set the size to 0; this will be treated like a positive size1679    /// for handling wildcard pointers.1680    ///1681    /// The result must be used immediately; it is not allowed to convert1682    /// the returned data back into a `Pointer` and store that in machine state.1683    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and1684    /// we don't have an operation to turn it back into `M::Provenance`.)1685    pub fn ptr_try_get_alloc_id(1686        &self,1687        ptr: Pointer<Option<M::Provenance>>,1688        size: i64,1689    ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {1690        match ptr.into_pointer_or_addr() {1691            Ok(ptr) => match M::ptr_get_alloc(self, ptr, size) {1692                Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),1693                None => {1694                    assert!(M::Provenance::OFFSET_IS_ADDR);1695                    // Offset is absolute, as we just asserted.1696                    let (_, addr) = ptr.into_raw_parts();1697                    Err(addr.bytes())1698                }1699            },1700            Err(addr) => Err(addr.bytes()),1701        }1702    }17031704    /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).1705    ///1706    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used1707    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations1708    /// where a wildcard pointer sits right in between two allocations.1709    /// It is almost always okay to just set the size to 0; this will be treated like a positive size1710    /// for handling wildcard pointers.1711    ///1712    /// The result must be used immediately; it is not allowed to convert1713    /// the returned data back into a `Pointer` and store that in machine state.1714    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and1715    /// we don't have an operation to turn it back into `M::Provenance`.)1716    #[inline(always)]1717    pub fn ptr_get_alloc_id(1718        &self,1719        ptr: Pointer<Option<M::Provenance>>,1720        size: i64,1721    ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {1722        self.ptr_try_get_alloc_id(ptr, size)1723            .map_err(|offset| {1724                err_ub!(DanglingIntPointer {1725                    addr: offset,1726                    inbounds_size: size,1727                    msg: CheckInAllocMsg::Dereferenceable1728                })1729            })1730            .into()1731    }1732}

Code quality findings 26

Critical: Use of 'unsafe' keyword bypasses Rust's safety guarantees. Requires careful auditing, clear justification (FFI, specific optimizations), and minimal scope.
error safety unsafe-block
unsafe {
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
/// This is used by [priroda](https://github.com/oli-obk/priroda)
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
self.global_root_pointer(Pointer::from(id)).unwrap()
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
self.global_root_pointer(Pointer::from(id)).unwrap()
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
Warning: '.expect()' will panic with a custom message on None/Err. While better than unwrap() for debugging, prefer non-panicking error handling in production code (match, if let, ?).
warning correctness expect-usage
let kind = M::GLOBAL_KIND.expect(
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
Warning: '.expect()' will panic with a custom message on None/Err. While better than unwrap() for debugging, prefer non-panicking error handling in production code (match, if let, ?).
warning correctness expect-usage
let kind = M::GLOBAL_KIND.expect(
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
Warning: '.expect()' will panic with a custom message on None/Err. While better than unwrap() for debugging, prefer non-panicking error handling in production code (match, if let, ?).
warning correctness expect-usage
.expect("deallocated pointers should all be recorded in `dead_alloc_map`");
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let alloc = self.get_alloc_raw(id).unwrap();
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
.unwrap();
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
let (kind, alloc) = self.memory.alloc_map.remove(&id).unwrap();
Warning: Direct indexing (e.g., `vec[i]`, `slice[i]`) panics on out-of-bounds access. Prefer using `.get(index)` or `.get_mut(index)` which return Option<&T>/Option<&mut T>.
warning correctness unchecked-indexing
pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
Warning: '.expect()' will panic with a custom message on None/Err. While better than unwrap() for debugging, prefer non-panicking error handling in production code (match, if let, ?).
warning correctness expect-usage
let len = upper.expect("can only write bounded iterators");
Warning: '.expect()' will panic with a custom message on None/Err. While better than unwrap() for debugging, prefer non-panicking error handling in production code (match, if let, ?).
warning correctness expect-usage
*dest = src.next().expect("iterator was shorter than it said it would be");
Warning: '.unwrap()' will panic on None/Err variants. Prefer using pattern matching (match, if let), combinators (map, and_then), or the '?' operator for robust error handling.
warning correctness unwrap-usage
if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(src, size.bytes().try_into().unwrap())
Info: Usage of `#[allow(...)]` suppresses compiler lints. Ensure the allowance is justified, well-scoped, and ideally temporary. Overuse can hide potential issues.
info maintainability allow-lint
#[allow(rustc::potential_query_instability)] // Only used from Miri, not queries.
Info: Use of raw pointers (*const T, *mut T) typically requires 'unsafe' blocks for dereferencing. Ensure usage is justified (FFI, low-level optimizations) and memory safety is manually upheld.
info safety raw-pointer
pub fn get_alloc_bytes_unchecked_raw(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
Info: Use of raw pointers (*const T, *mut T) typically requires 'unsafe' blocks for dereferencing. Ensure usage is justified (FFI, low-level optimizations) and memory safety is manually upheld.
info safety raw-pointer
) -> InterpResult<'tcx, *mut u8> {
Info: Ensure 'match' statements are exhaustive. If matching on enums, consider adding a wildcard arm `_ => {}` only if necessary and intentional, as it suppresses warnings about unhandled variants.
info correctness match-wildcard
match self.tcx.try_get_global_alloc(id) {
Performance Info: Calling .push() repeatedly inside a loop without prior capacity reservation can lead to multiple reallocations. Consider using `Vec::with_capacity(n)` or `vec.reserve(n)` if the approximate number of elements is known.
info performance push-without-reserve
todo.push(id);
Performance Info: Calling .push() repeatedly inside a loop without prior capacity reservation can lead to multiple reallocations. Consider using `Vec::with_capacity(n)` or `vec.reserve(n)` if the approximate number of elements is known.
info performance push-without-reserve
result.push((id, kind, alloc));

Get this view in your editor

Same data, no extra tab — call code_get_file + code_get_findings over MCP from Claude/Cursor/Copilot.