diff --git a/src/eval_context.rs b/src/eval_context.rs index 6ed6b6bebc066..2dd6662e63086 100644 --- a/src/eval_context.rs +++ b/src/eval_context.rs @@ -24,24 +24,24 @@ pub type MirRef<'tcx> = Ref<'tcx, mir::Mir<'tcx>>; pub struct EvalContext<'a, 'tcx: 'a> { /// The results of the type checker, from rustc. - pub(super) tcx: TyCtxt<'a, 'tcx, 'tcx>, + pub(crate) tcx: TyCtxt<'a, 'tcx, 'tcx>, /// The virtual memory system. - pub(super) memory: Memory<'a, 'tcx>, + pub(crate) memory: Memory<'a, 'tcx>, /// Precomputed statics, constants and promoteds. - pub(super) globals: HashMap, Global<'tcx>>, + pub(crate) globals: HashMap, Global<'tcx>>, /// The virtual call stack. - pub(super) stack: Vec>, + pub(crate) stack: Vec>, /// The maximum number of stack frames allowed - pub(super) stack_limit: usize, + pub(crate) stack_limit: usize, /// The maximum number of operations that may be executed. /// This prevents infinite loops and huge computations from freezing up const eval. /// Remove once halting problem is solved. - pub(super) steps_remaining: u64, + pub(crate) steps_remaining: u64, } /// A stack frame. diff --git a/src/memory.rs b/src/memory.rs index 0d3bec62e9939..0c7b4971e1df7 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -152,33 +152,45 @@ impl<'tcx> Function<'tcx> { //////////////////////////////////////////////////////////////////////////////// pub struct Memory<'a, 'tcx> { - /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations) + /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations). alloc_map: HashMap, - /// Set of statics, constants, promoteds, vtables, ... to prevent `mark_static_initalized` from stepping - /// out of its own allocations. - /// This set only contains statics backed by an allocation. If they are ByVal or ByValPair they - /// are not here, but will be inserted once they become ByRef. + + /// The AllocId to assign to the next new allocation. Always incremented, never gets smaller. + next_id: AllocId, + + /// Set of statics, constants, promoteds, vtables, ... to prevent `mark_static_initalized` from + /// stepping out of its own allocations. This set only contains statics backed by an + /// allocation. If they are ByVal or ByValPair they are not here, but will be inserted once + /// they become ByRef. static_alloc: HashSet, - /// Number of virtual bytes allocated + + /// Number of virtual bytes allocated. memory_usage: u64, - /// Maximum number of virtual bytes that may be allocated + + /// Maximum number of virtual bytes that may be allocated. memory_size: u64, + /// Function "allocations". They exist solely so pointers have something to point to, and /// we can figure out what they point to. functions: HashMap>, + /// Inverse map of `functions` so we don't allocate a new pointer every time we need one function_alloc_cache: HashMap, AllocId>, - next_id: AllocId, + + /// Target machine data layout to emulate. pub layout: &'a TargetDataLayout, - /// List of memory regions containing packed structures - /// We mark memory as "packed" or "unaligned" for a single statement, and clear the marking afterwards. - /// In the case where no packed structs are present, it's just a single emptyness check of a set - /// instead of heavily influencing all memory access code as other solutions would. + + /// List of memory regions containing packed structures. + /// + /// We mark memory as "packed" or "unaligned" for a single statement, and clear the marking + /// afterwards. In the case where no packed structs are present, it's just a single emptyness + /// check of a set instead of heavily influencing all memory access code as other solutions + /// would. /// - /// One disadvantage of this solution is the fact that you can cast a pointer to a packed struct - /// to a pointer to a normal struct and if you access a field of both in the same MIR statement, - /// the normal struct access will succeed even though it shouldn't. - /// But even with mir optimizations, that situation is hard/impossible to produce. + /// One disadvantage of this solution is the fact that you can cast a pointer to a packed + /// struct to a pointer to a normal struct and if you access a field of both in the same MIR + /// statement, the normal struct access will succeed even though it shouldn't. But even with + /// mir optimizations, that situation is hard/impossible to produce. packed: BTreeSet, /// A cache for basic byte allocations keyed by their contents. This is used to deduplicate