From 4c4318634a344581236b72ebb77b3718b51e6fb5 Mon Sep 17 00:00:00 2001 From: Aleks Zi Date: Thu, 21 Nov 2024 00:29:22 -0500 Subject: [PATCH 1/2] Refactoring interpreter and paranoid mode, introducing traits to allow generic interpreter as well as compile time switch for the runtime type checks (formerly called paranoid mode) --- third_party/move/move-vm/runtime/src/debug.rs | 4 +- .../move-vm/runtime/src/frame_type_cache.rs | 194 ++++ .../move/move-vm/runtime/src/interpreter.rs | 1025 ++--------------- third_party/move/move-vm/runtime/src/lib.rs | 2 + .../move-vm/runtime/src/native_functions.rs | 9 +- .../runtime/src/runtime_type_checks.rs | 684 +++++++++++ .../implementations/unsync_module_storage.rs | 7 +- .../move/move-vm/runtime/src/tracing.rs | 4 +- 8 files changed, 1005 insertions(+), 924 deletions(-) create mode 100644 third_party/move/move-vm/runtime/src/frame_type_cache.rs create mode 100644 third_party/move/move-vm/runtime/src/runtime_type_checks.rs diff --git a/third_party/move/move-vm/runtime/src/debug.rs b/third_party/move/move-vm/runtime/src/debug.rs index 4e36ed02df36e..7d0f5aadcda22 100644 --- a/third_party/move/move-vm/runtime/src/debug.rs +++ b/third_party/move/move-vm/runtime/src/debug.rs @@ -2,7 +2,7 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::{interpreter::Interpreter, loader::Resolver, LoadedFunction}; +use crate::{interpreter::InterpreterDebugInterface, loader::Resolver, LoadedFunction}; use move_binary_format::file_format::Bytecode; use move_vm_types::values::{self, Locals}; use std::{ @@ -102,7 +102,7 @@ impl DebugContext { pc: u16, instr: &Bytecode, resolver: &Resolver, - interp: &Interpreter, + interp: &dyn InterpreterDebugInterface, ) { let instr_string = format!("{:?}", instr); let function_string = function.name_as_pretty_string(); diff --git a/third_party/move/move-vm/runtime/src/frame_type_cache.rs b/third_party/move/move-vm/runtime/src/frame_type_cache.rs new file mode 100644 index 0000000000000..c16eaaf382690 --- /dev/null +++ b/third_party/move/move-vm/runtime/src/frame_type_cache.rs @@ -0,0 +1,194 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::loader::Resolver; +use move_binary_format::{ + errors::*, + file_format::{ + FieldInstantiationIndex, SignatureIndex, StructDefInstantiationIndex, + StructVariantInstantiationIndex, VariantFieldInstantiationIndex, + }, +}; +use move_core_types::gas_algebra::NumTypeNodes; +use move_vm_types::loaded_data::runtime_types::Type; +use std::collections::BTreeMap; + +#[derive(Default)] +pub(crate) struct FrameTypeCache { + struct_field_type_instantiation: + BTreeMap>, + struct_variant_field_type_instantiation: + BTreeMap>, + struct_def_instantiation_type: BTreeMap, + struct_variant_instantiation_type: + BTreeMap, + /// For a given field instantiation, the: + /// ((Type of the field, size of the field type) and (Type of its defining struct, size of its defining struct) + field_instantiation: + BTreeMap, + /// Same as above, bot for variant field instantiations + variant_field_instantiation: + BTreeMap, + single_sig_token_type: BTreeMap, +} + +impl FrameTypeCache { + #[inline(always)] + fn get_or( + map: &mut BTreeMap, + idx: K, + ty_func: F, + ) -> PartialVMResult<&V> + where + F: FnOnce(K) -> PartialVMResult, + { + match map.entry(idx) { + std::collections::btree_map::Entry::Occupied(entry) => Ok(entry.into_mut()), + std::collections::btree_map::Entry::Vacant(entry) => { + let v = ty_func(idx)?; + Ok(entry.insert(v)) + }, + } + } + + #[inline(always)] + pub(crate) fn get_field_type_and_struct_type( + &mut self, + idx: FieldInstantiationIndex, + resolver: &Resolver, + ty_args: &[Type], + ) -> PartialVMResult<((&Type, NumTypeNodes), (&Type, NumTypeNodes))> { + let ((field_ty, field_ty_count), (struct_ty, struct_ty_count)) = + Self::get_or(&mut self.field_instantiation, idx, |idx| { + let struct_type = resolver.field_instantiation_to_struct(idx, ty_args)?; + let struct_ty_count = NumTypeNodes::new(struct_type.num_nodes() as u64); + let field_ty = resolver.get_generic_field_ty(idx, ty_args)?; + let field_ty_count = NumTypeNodes::new(field_ty.num_nodes() as u64); + Ok(((field_ty, field_ty_count), (struct_type, struct_ty_count))) + })?; + Ok(((field_ty, *field_ty_count), (struct_ty, *struct_ty_count))) + } + + pub(crate) fn get_variant_field_type_and_struct_type( + &mut self, + idx: VariantFieldInstantiationIndex, + resolver: &Resolver, + ty_args: &[Type], + ) -> PartialVMResult<((&Type, NumTypeNodes), (&Type, NumTypeNodes))> { + let ((field_ty, field_ty_count), (struct_ty, struct_ty_count)) = + Self::get_or(&mut self.variant_field_instantiation, idx, |idx| { + let info = resolver.variant_field_instantiation_info_at(idx); + let struct_type = resolver.create_struct_instantiation_ty( + &info.definition_struct_type, + &info.instantiation, + ty_args, + )?; + let struct_ty_count = NumTypeNodes::new(struct_type.num_nodes() as u64); + let field_ty = resolver.instantiate_ty( + &info.uninstantiated_field_ty, + ty_args, + &info.instantiation, + )?; + let field_ty_count = NumTypeNodes::new(field_ty.num_nodes() as u64); + Ok(((field_ty, field_ty_count), (struct_type, struct_ty_count))) + })?; + Ok(((field_ty, *field_ty_count), (struct_ty, *struct_ty_count))) + } + + #[inline(always)] + pub(crate) fn get_struct_type( + &mut self, + idx: StructDefInstantiationIndex, + resolver: &Resolver, + ty_args: &[Type], + ) -> PartialVMResult<(&Type, NumTypeNodes)> { + let (ty, ty_count) = Self::get_or(&mut self.struct_def_instantiation_type, idx, |idx| { + let ty = resolver.get_generic_struct_ty(idx, ty_args)?; + let ty_count = NumTypeNodes::new(ty.num_nodes() as u64); + Ok((ty, ty_count)) + })?; + Ok((ty, *ty_count)) + } + + #[inline(always)] + pub(crate) fn get_struct_variant_type( + &mut self, + idx: StructVariantInstantiationIndex, + resolver: &Resolver, + ty_args: &[Type], + ) -> PartialVMResult<(&Type, NumTypeNodes)> { + let (ty, ty_count) = + Self::get_or(&mut self.struct_variant_instantiation_type, idx, |idx| { + let info = resolver.get_struct_variant_instantiation_at(idx); + let ty = resolver.create_struct_instantiation_ty( + &info.definition_struct_type, + &info.instantiation, + ty_args, + )?; + let ty_count = NumTypeNodes::new(ty.num_nodes() as u64); + Ok((ty, ty_count)) + })?; + Ok((ty, *ty_count)) + } + + #[inline(always)] + pub(crate) fn get_struct_fields_types( + &mut self, + idx: StructDefInstantiationIndex, + resolver: &Resolver, + ty_args: &[Type], + ) -> PartialVMResult<&[(Type, NumTypeNodes)]> { + Ok(Self::get_or( + &mut self.struct_field_type_instantiation, + idx, + |idx| { + Ok(resolver + .instantiate_generic_struct_fields(idx, ty_args)? + .into_iter() + .map(|ty| { + let num_nodes = NumTypeNodes::new(ty.num_nodes() as u64); + (ty, num_nodes) + }) + .collect::>()) + }, + )?) + } + + #[inline(always)] + pub(crate) fn get_struct_variant_fields_types( + &mut self, + idx: StructVariantInstantiationIndex, + resolver: &Resolver, + ty_args: &[Type], + ) -> PartialVMResult<&[(Type, NumTypeNodes)]> { + Ok(Self::get_or( + &mut self.struct_variant_field_type_instantiation, + idx, + |idx| { + Ok(resolver + .instantiate_generic_struct_variant_fields(idx, ty_args)? + .into_iter() + .map(|ty| { + let num_nodes = NumTypeNodes::new(ty.num_nodes() as u64); + (ty, num_nodes) + }) + .collect::>()) + }, + )?) + } + + #[inline(always)] + pub(crate) fn get_signature_index_type( + &mut self, + idx: SignatureIndex, + resolver: &Resolver, + ty_args: &[Type], + ) -> PartialVMResult<(&Type, NumTypeNodes)> { + let (ty, ty_count) = Self::get_or(&mut self.single_sig_token_type, idx, |idx| { + let ty = resolver.instantiate_single_type(idx, ty_args)?; + let ty_count = NumTypeNodes::new(ty.num_nodes() as u64); + Ok((ty, ty_count)) + })?; + Ok((ty, *ty_count)) + } +} diff --git a/third_party/move/move-vm/runtime/src/interpreter.rs b/third_party/move/move-vm/runtime/src/interpreter.rs index f45aa01f5fd2b..00310f994e7bf 100644 --- a/third_party/move/move-vm/runtime/src/interpreter.rs +++ b/third_party/move/move-vm/runtime/src/interpreter.rs @@ -5,19 +5,21 @@ use crate::{ access_control::AccessControlState, data_cache::TransactionDataCache, + frame_type_cache::FrameTypeCache, loader::{LegacyModuleStorageAdapter, Loader, Resolver}, module_traversal::TraversalContext, native_extensions::NativeContextExtensions, native_functions::NativeContext, + runtime_type_checks::FullRuntimeTypeCheck, + runtime_type_checks::NullRuntimeTypeCheck, + runtime_type_checks::RuntimeTypeCheck, trace, LoadedFunction, ModuleStorage, }; use fail::fail_point; use move_binary_format::{ errors::*, file_format::{ - Ability, AbilitySet, AccessKind, Bytecode, FieldInstantiationIndex, FunctionHandleIndex, - FunctionInstantiationIndex, LocalIndex, SignatureIndex, StructDefInstantiationIndex, - StructVariantInstantiationIndex, VariantFieldInstantiationIndex, + AccessKind, Bytecode, FunctionHandleIndex, FunctionInstantiationIndex, LocalIndex, }, }; use move_core_types::{ @@ -42,7 +44,7 @@ use move_vm_types::{ }; use std::{ cmp::min, - collections::{BTreeMap, HashSet, VecDeque}, + collections::{HashSet, VecDeque}, fmt::Write, }; @@ -57,9 +59,21 @@ macro_rules! set_err_info { /// /// An `Interpreter` instance is a stand alone execution context for a function. /// It mimics execution on a single thread, with an call stack and an operand stack. -pub(crate) struct Interpreter { +pub(crate) struct Interpreter; + +pub(crate) trait InterpreterDebugInterface { + fn get_stack_frames(&self, count: usize) -> ExecutionState; + fn debug_print_stack_trace(&self, buf: &mut String, resolver: &Resolver) + -> PartialVMResult<()>; +} + +/// `InterpreterImpl` instances can execute Move functions. +/// +/// An `Interpreter` instance is a stand alone execution context for a function. +/// It mimics execution on a single thread, with an call stack and an operand stack. +pub(crate) struct InterpreterImpl { /// Operand stack, where Move `Value`s are stored for stack operations. - operand_stack: Stack, + pub(crate) operand_stack: Stack, /// The stack of active functions. call_stack: CallStack, /// Whether to perform a paranoid type safety checks at runtime. @@ -98,25 +112,68 @@ impl Interpreter { extensions: &mut NativeContextExtensions, loader: &Loader, ) -> VMResult> { - Interpreter { - operand_stack: Stack::new(), - call_stack: CallStack::new(), - paranoid_type_checks: loader.vm_config().paranoid_type_checks, - access_control: AccessControlState::default(), - active_modules: HashSet::new(), - } - .execute_main( - loader, + InterpreterImpl::entrypoint( + function, + args, data_store, module_store, module_storage, gas_meter, traversal_context, extensions, - function, - args, + loader, ) } +} + +impl InterpreterImpl { + /// Entrypoint into the interpreter. All external calls need to be routed through this + /// function. + pub(crate) fn entrypoint( + function: LoadedFunction, + args: Vec, + data_store: &mut TransactionDataCache, + module_store: &LegacyModuleStorageAdapter, + module_storage: &impl ModuleStorage, + gas_meter: &mut impl GasMeter, + traversal_context: &mut TraversalContext, + extensions: &mut NativeContextExtensions, + loader: &Loader, + ) -> VMResult> { + let interpreter = InterpreterImpl { + operand_stack: Stack::new(), + call_stack: CallStack::new(), + paranoid_type_checks: loader.vm_config().paranoid_type_checks, + access_control: AccessControlState::default(), + active_modules: HashSet::new(), + }; + + if loader.vm_config().paranoid_type_checks { + interpreter.execute_main::( + loader, + data_store, + module_store, + module_storage, + gas_meter, + traversal_context, + extensions, + function, + args, + ) + } else { + interpreter.execute_main::( + loader, + data_store, + module_store, + module_storage, + gas_meter, + traversal_context, + extensions, + function, + args, + ) + } + } /// Main loop for the execution of a function. /// @@ -124,7 +181,7 @@ impl Interpreter { /// function represented by the frame. Control comes back to this function on return or /// on call. When that happens the frame is changes to a new one (call) or to the one /// at the top of the stack (return). If the call stack is empty execution is completed. - fn execute_main( + fn execute_main( mut self, loader: &Loader, data_store: &mut TransactionDataCache, @@ -159,7 +216,7 @@ impl Interpreter { loop { let resolver = current_frame.resolver(loader, module_store, module_storage); let exit_code = current_frame - .execute_code(&resolver, &mut self, data_store, gas_meter) + .execute_code::(&resolver, &mut self, data_store, gas_meter) .map_err(|err| self.attach_state_if_invariant_violation(err, ¤t_frame))?; match exit_code { @@ -1036,27 +1093,6 @@ impl Interpreter { Ok(()) } - #[allow(dead_code)] - pub(crate) fn debug_print_stack_trace( - &self, - buf: &mut B, - resolver: &Resolver, - ) -> PartialVMResult<()> { - debug_writeln!(buf, "Call Stack:")?; - for (i, frame) in self.call_stack.0.iter().enumerate() { - self.debug_print_frame(buf, resolver, i, frame)?; - } - debug_writeln!(buf, "Operand Stack:")?; - for (idx, val) in self.operand_stack.value.iter().enumerate() { - // TODO: Currently we do not know the types of the values on the operand stack. - // Revisit. - debug_write!(buf, " [{}] ", idx)?; - values::debug::print_value(buf, val)?; - debug_writeln!(buf)?; - } - Ok(()) - } - /// Generate a string which is the status of the interpreter: call stack, current bytecode /// stream, locals and operand stack. /// @@ -1117,9 +1153,32 @@ impl Interpreter { fn get_internal_state(&self) -> ExecutionState { self.get_stack_frames(usize::MAX) } +} + +impl InterpreterDebugInterface for InterpreterImpl { + #[allow(dead_code)] + fn debug_print_stack_trace( + &self, + buf: &mut String, + resolver: &Resolver, + ) -> PartialVMResult<()> { + debug_writeln!(buf, "Call Stack:")?; + for (i, frame) in self.call_stack.0.iter().enumerate() { + self.debug_print_frame(buf, resolver, i, frame)?; + } + debug_writeln!(buf, "Operand Stack:")?; + for (idx, val) in self.operand_stack.value.iter().enumerate() { + // TODO: Currently we do not know the types of the values on the operand stack. + // Revisit. + debug_write!(buf, " [{}] ", idx)?; + values::debug::print_value(buf, val)?; + debug_writeln!(buf)?; + } + Ok(()) + } /// Get count stack frames starting from the top of the stack. - pub(crate) fn get_stack_frames(&self, count: usize) -> ExecutionState { + fn get_stack_frames(&self, count: usize) -> ExecutionState { // collect frames in the reverse order as this is what is // normally expected from the stack trace (outermost frame // is the last one) @@ -1147,7 +1206,7 @@ const CALL_STACK_SIZE_LIMIT: usize = 1024; pub(crate) const ACCESS_STACK_SIZE_LIMIT: usize = 256; /// The operand stack. -struct Stack { +pub(crate) struct Stack { value: Vec, types: Vec, } @@ -1209,7 +1268,7 @@ impl Stack { /// Push a type on the stack if the max stack size has not been reached. Abort execution /// otherwise. - fn push_ty(&mut self, ty: Type) -> PartialVMResult<()> { + pub(crate) fn push_ty(&mut self, ty: Type) -> PartialVMResult<()> { if self.types.len() < OPERAND_STACK_SIZE_LIMIT { self.types.push(ty); Ok(()) @@ -1219,14 +1278,14 @@ impl Stack { } /// Pop a type off the stack or abort execution if the stack is empty. - fn pop_ty(&mut self) -> PartialVMResult { + pub(crate) fn pop_ty(&mut self) -> PartialVMResult { self.types .pop() .ok_or_else(|| PartialVMError::new(StatusCode::EMPTY_VALUE_STACK)) } /// Pop n types off the stack. - fn popn_tys(&mut self, n: u16) -> PartialVMResult> { + pub(crate) fn popn_tys(&mut self, n: u16) -> PartialVMResult> { let remaining_stack_size = self .types .len() @@ -1373,25 +1432,6 @@ struct Frame { ty_cache: FrameTypeCache, } -#[derive(Default)] -struct FrameTypeCache { - struct_field_type_instantiation: - BTreeMap>, - struct_variant_field_type_instantiation: - BTreeMap>, - struct_def_instantiation_type: BTreeMap, - struct_variant_instantiation_type: - BTreeMap, - /// For a given field instantiation, the: - /// ((Type of the field, size of the field type) and (Type of its defining struct, size of its defining struct) - field_instantiation: - BTreeMap, - /// Same as above, bot for variant field instantiations - variant_field_instantiation: - BTreeMap, - single_sig_token_type: BTreeMap, -} - /// An `ExitCode` from `execute_code_unit`. #[derive(Debug)] enum ExitCode { @@ -1400,167 +1440,6 @@ enum ExitCode { CallGeneric(FunctionInstantiationIndex), } -impl FrameTypeCache { - #[inline(always)] - fn get_or( - map: &mut BTreeMap, - idx: K, - ty_func: F, - ) -> PartialVMResult<&V> - where - F: FnOnce(K) -> PartialVMResult, - { - match map.entry(idx) { - std::collections::btree_map::Entry::Occupied(entry) => Ok(entry.into_mut()), - std::collections::btree_map::Entry::Vacant(entry) => { - let v = ty_func(idx)?; - Ok(entry.insert(v)) - }, - } - } - - #[inline(always)] - fn get_field_type_and_struct_type( - &mut self, - idx: FieldInstantiationIndex, - resolver: &Resolver, - ty_args: &[Type], - ) -> PartialVMResult<((&Type, NumTypeNodes), (&Type, NumTypeNodes))> { - let ((field_ty, field_ty_count), (struct_ty, struct_ty_count)) = - Self::get_or(&mut self.field_instantiation, idx, |idx| { - let struct_type = resolver.field_instantiation_to_struct(idx, ty_args)?; - let struct_ty_count = NumTypeNodes::new(struct_type.num_nodes() as u64); - let field_ty = resolver.get_generic_field_ty(idx, ty_args)?; - let field_ty_count = NumTypeNodes::new(field_ty.num_nodes() as u64); - Ok(((field_ty, field_ty_count), (struct_type, struct_ty_count))) - })?; - Ok(((field_ty, *field_ty_count), (struct_ty, *struct_ty_count))) - } - - fn get_variant_field_type_and_struct_type( - &mut self, - idx: VariantFieldInstantiationIndex, - resolver: &Resolver, - ty_args: &[Type], - ) -> PartialVMResult<((&Type, NumTypeNodes), (&Type, NumTypeNodes))> { - let ((field_ty, field_ty_count), (struct_ty, struct_ty_count)) = - Self::get_or(&mut self.variant_field_instantiation, idx, |idx| { - let info = resolver.variant_field_instantiation_info_at(idx); - let struct_type = resolver.create_struct_instantiation_ty( - &info.definition_struct_type, - &info.instantiation, - ty_args, - )?; - let struct_ty_count = NumTypeNodes::new(struct_type.num_nodes() as u64); - let field_ty = resolver.instantiate_ty( - &info.uninstantiated_field_ty, - ty_args, - &info.instantiation, - )?; - let field_ty_count = NumTypeNodes::new(field_ty.num_nodes() as u64); - Ok(((field_ty, field_ty_count), (struct_type, struct_ty_count))) - })?; - Ok(((field_ty, *field_ty_count), (struct_ty, *struct_ty_count))) - } - - #[inline(always)] - fn get_struct_type( - &mut self, - idx: StructDefInstantiationIndex, - resolver: &Resolver, - ty_args: &[Type], - ) -> PartialVMResult<(&Type, NumTypeNodes)> { - let (ty, ty_count) = Self::get_or(&mut self.struct_def_instantiation_type, idx, |idx| { - let ty = resolver.get_generic_struct_ty(idx, ty_args)?; - let ty_count = NumTypeNodes::new(ty.num_nodes() as u64); - Ok((ty, ty_count)) - })?; - Ok((ty, *ty_count)) - } - - #[inline(always)] - fn get_struct_variant_type( - &mut self, - idx: StructVariantInstantiationIndex, - resolver: &Resolver, - ty_args: &[Type], - ) -> PartialVMResult<(&Type, NumTypeNodes)> { - let (ty, ty_count) = - Self::get_or(&mut self.struct_variant_instantiation_type, idx, |idx| { - let info = resolver.get_struct_variant_instantiation_at(idx); - let ty = resolver.create_struct_instantiation_ty( - &info.definition_struct_type, - &info.instantiation, - ty_args, - )?; - let ty_count = NumTypeNodes::new(ty.num_nodes() as u64); - Ok((ty, ty_count)) - })?; - Ok((ty, *ty_count)) - } - - #[inline(always)] - fn get_struct_fields_types( - &mut self, - idx: StructDefInstantiationIndex, - resolver: &Resolver, - ty_args: &[Type], - ) -> PartialVMResult<&[(Type, NumTypeNodes)]> { - Ok(Self::get_or( - &mut self.struct_field_type_instantiation, - idx, - |idx| { - Ok(resolver - .instantiate_generic_struct_fields(idx, ty_args)? - .into_iter() - .map(|ty| { - let num_nodes = NumTypeNodes::new(ty.num_nodes() as u64); - (ty, num_nodes) - }) - .collect::>()) - }, - )?) - } - - #[inline(always)] - fn get_struct_variant_fields_types( - &mut self, - idx: StructVariantInstantiationIndex, - resolver: &Resolver, - ty_args: &[Type], - ) -> PartialVMResult<&[(Type, NumTypeNodes)]> { - Ok(Self::get_or( - &mut self.struct_variant_field_type_instantiation, - idx, - |idx| { - Ok(resolver - .instantiate_generic_struct_variant_fields(idx, ty_args)? - .into_iter() - .map(|ty| { - let num_nodes = NumTypeNodes::new(ty.num_nodes() as u64); - (ty, num_nodes) - }) - .collect::>()) - }, - )?) - } - - #[inline(always)] - fn get_signature_index_type( - &mut self, - idx: SignatureIndex, - resolver: &Resolver, - ty_args: &[Type], - ) -> PartialVMResult<(&Type, NumTypeNodes)> { - let (ty, ty_count) = Self::get_or(&mut self.single_sig_token_type, idx, |idx| { - let ty = resolver.instantiate_single_type(idx, ty_args)?; - let ty_count = NumTypeNodes::new(ty.num_nodes() as u64); - Ok((ty, ty_count)) - })?; - Ok((ty, *ty_count)) - } -} - impl AccessSpecifierEnv for Frame { fn eval_address_specifier_function( &self, @@ -1573,14 +1452,14 @@ impl AccessSpecifierEnv for Frame { impl Frame { /// Execute a Move function until a return or a call opcode is found. - fn execute_code( + fn execute_code( &mut self, resolver: &Resolver, - interpreter: &mut Interpreter, + interpreter: &mut InterpreterImpl, data_store: &mut TransactionDataCache, gas_meter: &mut impl GasMeter, ) -> VMResult { - self.execute_code_impl(resolver, interpreter, data_store, gas_meter) + self.execute_code_impl::(resolver, interpreter, data_store, gas_meter) .map_err(|e| { let e = if cfg!(feature = "testing") || cfg!(feature = "stacktrace") { e.with_exec_state(interpreter.get_internal_state()) @@ -1591,688 +1470,10 @@ impl Frame { }) } - /// Paranoid type checks to perform before instruction execution. - /// - /// Note that most of the checks should happen after instruction execution, because gas charging will happen during - /// instruction execution and we want to avoid running code without charging proper gas as much as possible. - fn pre_execution_type_stack_transition( - local_tys: &[Type], - locals: &Locals, - _ty_args: &[Type], - _resolver: &Resolver, - interpreter: &mut Interpreter, - instruction: &Bytecode, - ) -> PartialVMResult<()> { - match instruction { - // Call instruction will be checked at execute_main. - Bytecode::Call(_) | Bytecode::CallGeneric(_) => (), - Bytecode::BrFalse(_) | Bytecode::BrTrue(_) => { - interpreter.operand_stack.pop_ty()?; - }, - Bytecode::Branch(_) => (), - Bytecode::Ret => { - for (idx, ty) in local_tys.iter().enumerate() { - if !locals.is_invalid(idx)? { - ty.paranoid_check_has_ability(Ability::Drop)?; - } - } - }, - Bytecode::Abort => { - interpreter.operand_stack.pop_ty()?; - }, - // StLoc needs to check before execution as we need to check the drop ability of values. - Bytecode::StLoc(idx) => { - let ty = local_tys[*idx as usize].clone(); - let val_ty = interpreter.operand_stack.pop_ty()?; - ty.paranoid_check_eq(&val_ty)?; - if !locals.is_invalid(*idx as usize)? { - ty.paranoid_check_has_ability(Ability::Drop)?; - } - }, - // We will check the rest of the instructions after execution phase. - Bytecode::Pop - | Bytecode::LdU8(_) - | Bytecode::LdU16(_) - | Bytecode::LdU32(_) - | Bytecode::LdU64(_) - | Bytecode::LdU128(_) - | Bytecode::LdU256(_) - | Bytecode::LdTrue - | Bytecode::LdFalse - | Bytecode::LdConst(_) - | Bytecode::CopyLoc(_) - | Bytecode::MoveLoc(_) - | Bytecode::MutBorrowLoc(_) - | Bytecode::ImmBorrowLoc(_) - | Bytecode::ImmBorrowField(_) - | Bytecode::MutBorrowField(_) - | Bytecode::ImmBorrowFieldGeneric(_) - | Bytecode::MutBorrowFieldGeneric(_) - | Bytecode::Pack(_) - | Bytecode::PackGeneric(_) - | Bytecode::Unpack(_) - | Bytecode::UnpackGeneric(_) - | Bytecode::ReadRef - | Bytecode::WriteRef - | Bytecode::CastU8 - | Bytecode::CastU16 - | Bytecode::CastU32 - | Bytecode::CastU64 - | Bytecode::CastU128 - | Bytecode::CastU256 - | Bytecode::Add - | Bytecode::Sub - | Bytecode::Mul - | Bytecode::Mod - | Bytecode::Div - | Bytecode::BitOr - | Bytecode::BitAnd - | Bytecode::Xor - | Bytecode::Or - | Bytecode::And - | Bytecode::Shl - | Bytecode::Shr - | Bytecode::Lt - | Bytecode::Le - | Bytecode::Gt - | Bytecode::Ge - | Bytecode::Eq - | Bytecode::Neq - | Bytecode::MutBorrowGlobal(_) - | Bytecode::ImmBorrowGlobal(_) - | Bytecode::MutBorrowGlobalGeneric(_) - | Bytecode::ImmBorrowGlobalGeneric(_) - | Bytecode::Exists(_) - | Bytecode::ExistsGeneric(_) - | Bytecode::MoveTo(_) - | Bytecode::MoveToGeneric(_) - | Bytecode::MoveFrom(_) - | Bytecode::MoveFromGeneric(_) - | Bytecode::FreezeRef - | Bytecode::Nop - | Bytecode::Not - | Bytecode::VecPack(_, _) - | Bytecode::VecLen(_) - | Bytecode::VecImmBorrow(_) - | Bytecode::VecMutBorrow(_) - | Bytecode::VecPushBack(_) - | Bytecode::VecPopBack(_) - | Bytecode::VecUnpack(_, _) - | Bytecode::VecSwap(_) => (), - - // Since bytecode version 7 - Bytecode::PackVariant(_) - | Bytecode::PackVariantGeneric(_) - | Bytecode::UnpackVariant(_) - | Bytecode::UnpackVariantGeneric(_) - | Bytecode::TestVariant(_) - | Bytecode::TestVariantGeneric(_) - | Bytecode::MutBorrowVariantField(_) - | Bytecode::MutBorrowVariantFieldGeneric(_) - | Bytecode::ImmBorrowVariantField(_) - | Bytecode::ImmBorrowVariantFieldGeneric(_) => (), - }; - Ok(()) - } - - /// Paranoid type checks to perform after instruction execution. - /// - /// This function and `pre_execution_type_stack_transition` should constitute the full type stack transition for the paranoid mode. - fn post_execution_type_stack_transition( - local_tys: &[Type], - ty_args: &[Type], - resolver: &Resolver, - interpreter: &mut Interpreter, - ty_cache: &mut FrameTypeCache, - instruction: &Bytecode, - ) -> PartialVMResult<()> { - let ty_builder = resolver.loader().ty_builder(); - - match instruction { - Bytecode::BrTrue(_) | Bytecode::BrFalse(_) => (), - Bytecode::Branch(_) - | Bytecode::Ret - | Bytecode::Call(_) - | Bytecode::CallGeneric(_) - | Bytecode::Abort => { - // Invariants hold because all of the instructions above will force VM to break from the interpreter loop and thus not hit this code path. - unreachable!("control flow instruction encountered during type check") - }, - Bytecode::Pop => { - let ty = interpreter.operand_stack.pop_ty()?; - ty.paranoid_check_has_ability(Ability::Drop)?; - }, - Bytecode::LdU8(_) => { - let u8_ty = ty_builder.create_u8_ty(); - interpreter.operand_stack.push_ty(u8_ty)? - }, - Bytecode::LdU16(_) => { - let u16_ty = ty_builder.create_u16_ty(); - interpreter.operand_stack.push_ty(u16_ty)? - }, - Bytecode::LdU32(_) => { - let u32_ty = ty_builder.create_u32_ty(); - interpreter.operand_stack.push_ty(u32_ty)? - }, - Bytecode::LdU64(_) => { - let u64_ty = ty_builder.create_u64_ty(); - interpreter.operand_stack.push_ty(u64_ty)? - }, - Bytecode::LdU128(_) => { - let u128_ty = ty_builder.create_u128_ty(); - interpreter.operand_stack.push_ty(u128_ty)? - }, - Bytecode::LdU256(_) => { - let u256_ty = ty_builder.create_u256_ty(); - interpreter.operand_stack.push_ty(u256_ty)? - }, - Bytecode::LdTrue | Bytecode::LdFalse => { - let bool_ty = ty_builder.create_bool_ty(); - interpreter.operand_stack.push_ty(bool_ty)? - }, - Bytecode::LdConst(i) => { - let constant = resolver.constant_at(*i); - let ty = ty_builder.create_constant_ty(&constant.type_)?; - interpreter.operand_stack.push_ty(ty)?; - }, - Bytecode::CopyLoc(idx) => { - let ty = local_tys[*idx as usize].clone(); - ty.paranoid_check_has_ability(Ability::Copy)?; - interpreter.operand_stack.push_ty(ty)?; - }, - Bytecode::MoveLoc(idx) => { - let ty = local_tys[*idx as usize].clone(); - interpreter.operand_stack.push_ty(ty)?; - }, - Bytecode::StLoc(_) => (), - Bytecode::MutBorrowLoc(idx) => { - let ty = &local_tys[*idx as usize]; - let mut_ref_ty = ty_builder.create_ref_ty(ty, true)?; - interpreter.operand_stack.push_ty(mut_ref_ty)?; - }, - Bytecode::ImmBorrowLoc(idx) => { - let ty = &local_tys[*idx as usize]; - let ref_ty = ty_builder.create_ref_ty(ty, false)?; - interpreter.operand_stack.push_ty(ref_ty)?; - }, - Bytecode::ImmBorrowField(fh_idx) => { - let ty = interpreter.operand_stack.pop_ty()?; - let expected_ty = resolver.field_handle_to_struct(*fh_idx); - ty.paranoid_check_ref_eq(&expected_ty, false)?; - - let field_ty = resolver.get_field_ty(*fh_idx)?; - let field_ref_ty = ty_builder.create_ref_ty(field_ty, false)?; - interpreter.operand_stack.push_ty(field_ref_ty)?; - }, - Bytecode::MutBorrowField(fh_idx) => { - let ref_ty = interpreter.operand_stack.pop_ty()?; - let expected_inner_ty = resolver.field_handle_to_struct(*fh_idx); - ref_ty.paranoid_check_ref_eq(&expected_inner_ty, true)?; - - let field_ty = resolver.get_field_ty(*fh_idx)?; - let field_mut_ref_ty = ty_builder.create_ref_ty(field_ty, true)?; - interpreter.operand_stack.push_ty(field_mut_ref_ty)?; - }, - Bytecode::ImmBorrowFieldGeneric(idx) => { - let struct_ty = interpreter.operand_stack.pop_ty()?; - let ((field_ty, _), (expected_struct_ty, _)) = - ty_cache.get_field_type_and_struct_type(*idx, resolver, ty_args)?; - struct_ty.paranoid_check_ref_eq(expected_struct_ty, false)?; - - let field_ref_ty = ty_builder.create_ref_ty(field_ty, false)?; - interpreter.operand_stack.push_ty(field_ref_ty)?; - }, - Bytecode::MutBorrowFieldGeneric(idx) => { - let struct_ty = interpreter.operand_stack.pop_ty()?; - let ((field_ty, _), (expected_struct_ty, _)) = - ty_cache.get_field_type_and_struct_type(*idx, resolver, ty_args)?; - struct_ty.paranoid_check_ref_eq(expected_struct_ty, true)?; - - let field_mut_ref_ty = ty_builder.create_ref_ty(field_ty, true)?; - interpreter.operand_stack.push_ty(field_mut_ref_ty)?; - }, - Bytecode::ImmBorrowVariantField(fh_idx) | Bytecode::MutBorrowVariantField(fh_idx) => { - let is_mut = matches!(instruction, Bytecode::MutBorrowVariantField(..)); - let field_info = resolver.variant_field_info_at(*fh_idx); - let ty = interpreter.operand_stack.pop_ty()?; - let expected_ty = resolver.create_struct_ty(&field_info.definition_struct_type); - ty.paranoid_check_ref_eq(&expected_ty, is_mut)?; - let field_ty = &field_info.uninstantiated_field_ty; - let field_ref_ty = ty_builder.create_ref_ty(field_ty, is_mut)?; - interpreter.operand_stack.push_ty(field_ref_ty)?; - }, - Bytecode::ImmBorrowVariantFieldGeneric(idx) - | Bytecode::MutBorrowVariantFieldGeneric(idx) => { - let is_mut = matches!(instruction, Bytecode::MutBorrowVariantFieldGeneric(..)); - let struct_ty = interpreter.operand_stack.pop_ty()?; - let ((field_ty, _), (expected_struct_ty, _)) = - ty_cache.get_variant_field_type_and_struct_type(*idx, resolver, ty_args)?; - struct_ty.paranoid_check_ref_eq(expected_struct_ty, is_mut)?; - let field_ref_ty = ty_builder.create_ref_ty(field_ty, is_mut)?; - interpreter.operand_stack.push_ty(field_ref_ty)?; - }, - Bytecode::Pack(idx) => { - let field_count = resolver.field_count(*idx); - let args_ty = resolver.get_struct(*idx)?; - let field_tys = args_ty.fields(None)?.iter().map(|(_, ty)| ty); - let output_ty = resolver.get_struct_ty(*idx); - Self::verify_pack(interpreter, field_count, field_tys, output_ty)?; - }, - Bytecode::PackGeneric(idx) => { - let field_count = resolver.field_instantiation_count(*idx); - let output_ty = ty_cache.get_struct_type(*idx, resolver, ty_args)?.0.clone(); - let args_ty = ty_cache.get_struct_fields_types(*idx, resolver, ty_args)?; - - if field_count as usize != args_ty.len() { - // This is an inconsistency between the cache and the actual - // type declaration. We would crash if for some reason this invariant does - // not hold. It seems impossible to hit, but we keep it here for safety - // reasons, as a previous version of this code had this too. - return Err( - PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) - .with_message("Args count mismatch".to_string()), - ); - } - - Self::verify_pack( - interpreter, - field_count, - args_ty.iter().map(|(ty, _)| ty), - output_ty, - )?; - }, - Bytecode::Unpack(idx) => { - let struct_ty = interpreter.operand_stack.pop_ty()?; - struct_ty.paranoid_check_eq(&resolver.get_struct_ty(*idx))?; - let struct_decl = resolver.get_struct(*idx)?; - for (_name, ty) in struct_decl.fields(None)?.iter() { - interpreter.operand_stack.push_ty(ty.clone())?; - } - }, - Bytecode::UnpackGeneric(idx) => { - let struct_ty = interpreter.operand_stack.pop_ty()?; - - struct_ty - .paranoid_check_eq(ty_cache.get_struct_type(*idx, resolver, ty_args)?.0)?; - - let struct_fields_types = - ty_cache.get_struct_fields_types(*idx, resolver, ty_args)?; - for (ty, _) in struct_fields_types { - interpreter.operand_stack.push_ty(ty.clone())?; - } - }, - Bytecode::PackVariant(idx) => { - let info = resolver.get_struct_variant_at(*idx); - let field_tys = info - .definition_struct_type - .fields(Some(info.variant))? - .iter() - .map(|(_, ty)| ty); - let output_ty = resolver.create_struct_ty(&info.definition_struct_type); - Self::verify_pack(interpreter, info.field_count, field_tys, output_ty)?; - }, - Bytecode::PackVariantGeneric(idx) => { - let info = resolver.get_struct_variant_instantiation_at(*idx); - let output_ty = ty_cache - .get_struct_variant_type(*idx, resolver, ty_args)? - .0 - .clone(); - let args_ty = ty_cache.get_struct_variant_fields_types(*idx, resolver, ty_args)?; - Self::verify_pack( - interpreter, - info.field_count, - args_ty.iter().map(|(ty, _)| ty), - output_ty, - )?; - }, - Bytecode::UnpackVariant(idx) => { - let info = resolver.get_struct_variant_at(*idx); - let expected_struct_ty = resolver.create_struct_ty(&info.definition_struct_type); - let actual_struct_ty = interpreter.operand_stack.pop_ty()?; - actual_struct_ty.paranoid_check_eq(&expected_struct_ty)?; - for (_name, ty) in info - .definition_struct_type - .fields(Some(info.variant))? - .iter() - { - interpreter.operand_stack.push_ty(ty.clone())?; - } - }, - Bytecode::UnpackVariantGeneric(idx) => { - let expected_struct_type = - ty_cache.get_struct_variant_type(*idx, resolver, ty_args)?.0; - let actual_struct_type = interpreter.operand_stack.pop_ty()?; - actual_struct_type.paranoid_check_eq(expected_struct_type)?; - let struct_fields_types = - ty_cache.get_struct_variant_fields_types(*idx, resolver, ty_args)?; - for (ty, _) in struct_fields_types { - interpreter.operand_stack.push_ty(ty.clone())?; - } - }, - Bytecode::TestVariant(idx) => { - let info = resolver.get_struct_variant_at(*idx); - let expected_struct_ty = resolver.create_struct_ty(&info.definition_struct_type); - let actual_struct_ty = interpreter.operand_stack.pop_ty()?; - actual_struct_ty.paranoid_check_ref_eq(&expected_struct_ty, false)?; - interpreter - .operand_stack - .push_ty(ty_builder.create_bool_ty())?; - }, - Bytecode::TestVariantGeneric(idx) => { - let expected_struct_ty = - ty_cache.get_struct_variant_type(*idx, resolver, ty_args)?.0; - let actual_struct_ty = interpreter.operand_stack.pop_ty()?; - actual_struct_ty.paranoid_check_ref_eq(expected_struct_ty, false)?; - interpreter - .operand_stack - .push_ty(ty_builder.create_bool_ty())?; - }, - Bytecode::ReadRef => { - let ref_ty = interpreter.operand_stack.pop_ty()?; - let inner_ty = ref_ty.paranoid_read_ref()?; - interpreter.operand_stack.push_ty(inner_ty)?; - }, - Bytecode::WriteRef => { - let mut_ref_ty = interpreter.operand_stack.pop_ty()?; - let val_ty = interpreter.operand_stack.pop_ty()?; - mut_ref_ty.paranoid_write_ref(&val_ty)?; - }, - Bytecode::CastU8 => { - interpreter.operand_stack.pop_ty()?; - let u8_ty = ty_builder.create_u8_ty(); - interpreter.operand_stack.push_ty(u8_ty)?; - }, - Bytecode::CastU16 => { - interpreter.operand_stack.pop_ty()?; - let u16_ty = ty_builder.create_u16_ty(); - interpreter.operand_stack.push_ty(u16_ty)?; - }, - Bytecode::CastU32 => { - interpreter.operand_stack.pop_ty()?; - let u32_ty = ty_builder.create_u32_ty(); - interpreter.operand_stack.push_ty(u32_ty)?; - }, - Bytecode::CastU64 => { - interpreter.operand_stack.pop_ty()?; - let u64_ty = ty_builder.create_u64_ty(); - interpreter.operand_stack.push_ty(u64_ty)?; - }, - Bytecode::CastU128 => { - interpreter.operand_stack.pop_ty()?; - let u128_ty = ty_builder.create_u128_ty(); - interpreter.operand_stack.push_ty(u128_ty)?; - }, - Bytecode::CastU256 => { - interpreter.operand_stack.pop_ty()?; - let u256_ty = ty_builder.create_u256_ty(); - interpreter.operand_stack.push_ty(u256_ty)?; - }, - Bytecode::Add - | Bytecode::Sub - | Bytecode::Mul - | Bytecode::Mod - | Bytecode::Div - | Bytecode::BitOr - | Bytecode::BitAnd - | Bytecode::Xor - | Bytecode::Or - | Bytecode::And => { - let rhs_ty = interpreter.operand_stack.pop_ty()?; - let lhs_ty = interpreter.operand_stack.pop_ty()?; - rhs_ty.paranoid_check_eq(&lhs_ty)?; - interpreter.operand_stack.push_ty(rhs_ty)?; - }, - Bytecode::Shl | Bytecode::Shr => { - let _rhs = interpreter.operand_stack.pop_ty()?; - let lhs = interpreter.operand_stack.pop_ty()?; - interpreter.operand_stack.push_ty(lhs)?; - }, - Bytecode::Lt | Bytecode::Le | Bytecode::Gt | Bytecode::Ge => { - let rhs_ty = interpreter.operand_stack.pop_ty()?; - let lhs_ty = interpreter.operand_stack.pop_ty()?; - rhs_ty.paranoid_check_eq(&lhs_ty)?; - - let bool_ty = ty_builder.create_bool_ty(); - interpreter.operand_stack.push_ty(bool_ty)?; - }, - Bytecode::Eq | Bytecode::Neq => { - let rhs_ty = interpreter.operand_stack.pop_ty()?; - let lhs_ty = interpreter.operand_stack.pop_ty()?; - rhs_ty.paranoid_check_eq(&lhs_ty)?; - rhs_ty.paranoid_check_has_ability(Ability::Drop)?; - - let bool_ty = ty_builder.create_bool_ty(); - interpreter.operand_stack.push_ty(bool_ty)?; - }, - Bytecode::MutBorrowGlobal(idx) => { - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_address_ty()?; - let struct_ty = resolver.get_struct_ty(*idx); - struct_ty.paranoid_check_has_ability(Ability::Key)?; - - let struct_mut_ref_ty = ty_builder.create_ref_ty(&struct_ty, true)?; - interpreter.operand_stack.push_ty(struct_mut_ref_ty)?; - }, - Bytecode::ImmBorrowGlobal(idx) => { - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_address_ty()?; - let struct_ty = resolver.get_struct_ty(*idx); - struct_ty.paranoid_check_has_ability(Ability::Key)?; - - let struct_ref_ty = ty_builder.create_ref_ty(&struct_ty, false)?; - interpreter.operand_stack.push_ty(struct_ref_ty)?; - }, - Bytecode::MutBorrowGlobalGeneric(idx) => { - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_address_ty()?; - let struct_ty = ty_cache.get_struct_type(*idx, resolver, ty_args)?.0; - struct_ty.paranoid_check_has_ability(Ability::Key)?; - - let struct_mut_ref_ty = ty_builder.create_ref_ty(struct_ty, true)?; - interpreter.operand_stack.push_ty(struct_mut_ref_ty)?; - }, - Bytecode::ImmBorrowGlobalGeneric(idx) => { - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_address_ty()?; - let struct_ty = ty_cache.get_struct_type(*idx, resolver, ty_args)?.0; - struct_ty.paranoid_check_has_ability(Ability::Key)?; - - let struct_ref_ty = ty_builder.create_ref_ty(struct_ty, false)?; - interpreter.operand_stack.push_ty(struct_ref_ty)?; - }, - Bytecode::Exists(_) | Bytecode::ExistsGeneric(_) => { - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_address_ty()?; - - let bool_ty = ty_builder.create_bool_ty(); - interpreter.operand_stack.push_ty(bool_ty)?; - }, - Bytecode::MoveTo(idx) => { - let ty = interpreter.operand_stack.pop_ty()?; - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_signer_ref_ty()?; - ty.paranoid_check_eq(&resolver.get_struct_ty(*idx))?; - ty.paranoid_check_has_ability(Ability::Key)?; - }, - Bytecode::MoveToGeneric(idx) => { - let ty = interpreter.operand_stack.pop_ty()?; - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_signer_ref_ty()?; - ty.paranoid_check_eq(ty_cache.get_struct_type(*idx, resolver, ty_args)?.0)?; - ty.paranoid_check_has_ability(Ability::Key)?; - }, - Bytecode::MoveFrom(idx) => { - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_address_ty()?; - let ty = resolver.get_struct_ty(*idx); - ty.paranoid_check_has_ability(Ability::Key)?; - interpreter.operand_stack.push_ty(ty)?; - }, - Bytecode::MoveFromGeneric(idx) => { - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_address_ty()?; - let ty = ty_cache.get_struct_type(*idx, resolver, ty_args)?.0.clone(); - ty.paranoid_check_has_ability(Ability::Key)?; - interpreter.operand_stack.push_ty(ty)?; - }, - Bytecode::FreezeRef => { - let mut_ref_ty = interpreter.operand_stack.pop_ty()?; - let ref_ty = mut_ref_ty.paranoid_freeze_ref_ty()?; - interpreter.operand_stack.push_ty(ref_ty)?; - }, - Bytecode::Nop => (), - Bytecode::Not => { - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_bool_ty()?; - let bool_ty = ty_builder.create_bool_ty(); - interpreter.operand_stack.push_ty(bool_ty)?; - }, - Bytecode::VecPack(si, num) => { - let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; - let elem_tys = interpreter.operand_stack.popn_tys(*num as u16)?; - for elem_ty in elem_tys.iter() { - elem_ty.paranoid_check_eq(ty)?; - } - - let vec_ty = ty_builder.create_vec_ty(ty)?; - interpreter.operand_stack.push_ty(vec_ty)?; - }, - Bytecode::VecLen(si) => { - let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_vec_ref_ty(ty, false)?; - - let u64_ty = ty_builder.create_u64_ty(); - interpreter.operand_stack.push_ty(u64_ty)?; - }, - Bytecode::VecImmBorrow(si) => { - let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_u64_ty()?; - let elem_ref_ty = interpreter - .operand_stack - .pop_ty()? - .paranoid_check_and_get_vec_elem_ref_ty(ty, false)?; - - interpreter.operand_stack.push_ty(elem_ref_ty)?; - }, - Bytecode::VecMutBorrow(si) => { - let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_u64_ty()?; - let elem_ref_ty = interpreter - .operand_stack - .pop_ty()? - .paranoid_check_and_get_vec_elem_ref_ty(ty, true)?; - interpreter.operand_stack.push_ty(elem_ref_ty)?; - }, - Bytecode::VecPushBack(si) => { - let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; - interpreter.operand_stack.pop_ty()?.paranoid_check_eq(ty)?; - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_vec_ref_ty(ty, true)?; - }, - Bytecode::VecPopBack(si) => { - let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; - let elem_ty = interpreter - .operand_stack - .pop_ty()? - .paranoid_check_and_get_vec_elem_ty(ty, true)?; - interpreter.operand_stack.push_ty(elem_ty)?; - }, - Bytecode::VecUnpack(si, num) => { - let (expected_elem_ty, _) = - ty_cache.get_signature_index_type(*si, resolver, ty_args)?; - let vec_ty = interpreter.operand_stack.pop_ty()?; - vec_ty.paranoid_check_is_vec_ty(expected_elem_ty)?; - for _ in 0..*num { - interpreter - .operand_stack - .push_ty(expected_elem_ty.clone())?; - } - }, - Bytecode::VecSwap(si) => { - let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_u64_ty()?; - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_u64_ty()?; - interpreter - .operand_stack - .pop_ty()? - .paranoid_check_is_vec_ref_ty(ty, true)?; - }, - } - Ok(()) - } - - fn verify_pack<'a>( - interpreter: &mut Interpreter, - field_count: u16, - field_tys: impl Iterator, - output_ty: Type, - ) -> PartialVMResult<()> { - let ability = output_ty.abilities()?; - - // If the struct has a key ability, we expect all of its field to have store ability but not key ability. - let field_expected_abilities = if ability.has_key() { - ability - .remove(Ability::Key) - .union(AbilitySet::singleton(Ability::Store)) - } else { - ability - }; - for (ty, expected_ty) in interpreter - .operand_stack - .popn_tys(field_count)? - .into_iter() - .zip(field_tys) - { - // Fields ability should be a subset of the struct ability because abilities can be weakened but not the other direction. - // For example, it is ok to have a struct that doesn't have a copy capability where its field is a struct that has copy capability but not vice versa. - ty.paranoid_check_abilities(field_expected_abilities)?; - ty.paranoid_check_eq(expected_ty)?; - } - - interpreter.operand_stack.push_ty(output_ty) - } - - fn execute_code_impl( + fn execute_code_impl( &mut self, resolver: &Resolver, - interpreter: &mut Interpreter, + interpreter: &mut InterpreterImpl, data_store: &mut TransactionDataCache, gas_meter: &mut impl GasMeter, ) -> PartialVMResult { @@ -2314,12 +1515,12 @@ impl Frame { if interpreter.paranoid_type_checks { interpreter.operand_stack.check_balance()?; - Self::pre_execution_type_stack_transition( + RTTCheck::pre_execution_type_stack_transition( &self.local_tys, &self.locals, self.function.ty_args(), resolver, - interpreter, + &mut interpreter.operand_stack, instruction, )?; } @@ -3102,11 +2303,11 @@ impl Frame { }, } if interpreter.paranoid_type_checks { - Self::post_execution_type_stack_transition( + RTTCheck::post_execution_type_stack_transition( &self.local_tys, self.function.ty_args(), resolver, - interpreter, + &mut interpreter.operand_stack, &mut self.ty_cache, instruction, )?; diff --git a/third_party/move/move-vm/runtime/src/lib.rs b/third_party/move/move-vm/runtime/src/lib.rs index 5229117b63b50..e9dd6f69a1f20 100644 --- a/third_party/move/move-vm/runtime/src/lib.rs +++ b/third_party/move/move-vm/runtime/src/lib.rs @@ -30,6 +30,8 @@ pub mod module_traversal; mod debug; mod access_control; +mod frame_type_cache; +mod runtime_type_checks; mod storage; pub use loader::{LoadedFunction, Module, Script}; diff --git a/third_party/move/move-vm/runtime/src/native_functions.rs b/third_party/move/move-vm/runtime/src/native_functions.rs index 9a5f4ead8eaa8..8cce249a6ebdd 100644 --- a/third_party/move/move-vm/runtime/src/native_functions.rs +++ b/third_party/move/move-vm/runtime/src/native_functions.rs @@ -4,7 +4,7 @@ use crate::{ data_cache::TransactionDataCache, - interpreter::Interpreter, + interpreter::InterpreterDebugInterface, loader::{Function, Loader, Resolver}, module_traversal::TraversalContext, native_extensions::NativeContextExtensions, @@ -25,7 +25,6 @@ use move_vm_types::{ }; use std::{ collections::{HashMap, VecDeque}, - fmt::Write, sync::Arc, }; @@ -97,7 +96,7 @@ impl NativeFunctions { } pub struct NativeContext<'a, 'b, 'c> { - interpreter: &'a mut Interpreter, + interpreter: &'a mut dyn InterpreterDebugInterface, data_store: &'a mut TransactionDataCache<'c>, resolver: &'a Resolver<'a>, extensions: &'a mut NativeContextExtensions<'b>, @@ -107,7 +106,7 @@ pub struct NativeContext<'a, 'b, 'c> { impl<'a, 'b, 'c> NativeContext<'a, 'b, 'c> { pub(crate) fn new( - interpreter: &'a mut Interpreter, + interpreter: &'a mut dyn InterpreterDebugInterface, data_store: &'a mut TransactionDataCache<'c>, resolver: &'a Resolver<'a>, extensions: &'a mut NativeContextExtensions<'b>, @@ -126,7 +125,7 @@ impl<'a, 'b, 'c> NativeContext<'a, 'b, 'c> { } impl<'a, 'b, 'c> NativeContext<'a, 'b, 'c> { - pub fn print_stack_trace(&self, buf: &mut B) -> PartialVMResult<()> { + pub fn print_stack_trace(&self, buf: &mut String) -> PartialVMResult<()> { self.interpreter.debug_print_stack_trace(buf, self.resolver) } diff --git a/third_party/move/move-vm/runtime/src/runtime_type_checks.rs b/third_party/move/move-vm/runtime/src/runtime_type_checks.rs new file mode 100644 index 0000000000000..38ebb169bc8b2 --- /dev/null +++ b/third_party/move/move-vm/runtime/src/runtime_type_checks.rs @@ -0,0 +1,684 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{frame_type_cache::FrameTypeCache, interpreter::Stack, loader::Resolver}; +use move_binary_format::{ + errors::*, + file_format::{Ability, AbilitySet, Bytecode}, +}; +use move_core_types::vm_status::StatusCode; +use move_vm_types::loaded_data::runtime_types::Type; +use move_vm_types::values::Locals; + +pub(crate) trait RuntimeTypeCheck { + fn pre_execution_type_stack_transition( + local_tys: &[Type], + locals: &Locals, + _ty_args: &[Type], + _resolver: &Resolver, + operand_stack: &mut Stack, + instruction: &Bytecode, + ) -> PartialVMResult<()>; + + fn post_execution_type_stack_transition( + local_tys: &[Type], + ty_args: &[Type], + resolver: &Resolver, + operand_stack: &mut Stack, + ty_cache: &mut FrameTypeCache, + instruction: &Bytecode, + ) -> PartialVMResult<()>; +} + +/// Paranoid type checks to perform before instruction execution. +/// + +fn verify_pack<'a>( + operand_stack: &mut Stack, + field_count: u16, + field_tys: impl Iterator, + output_ty: Type, +) -> PartialVMResult<()> { + let ability = output_ty.abilities()?; + + // If the struct has a key ability, we expect all of its field to have store ability but not key ability. + let field_expected_abilities = if ability.has_key() { + ability + .remove(Ability::Key) + .union(AbilitySet::singleton(Ability::Store)) + } else { + ability + }; + for (ty, expected_ty) in operand_stack + .popn_tys(field_count)? + .into_iter() + .zip(field_tys) + { + // Fields ability should be a subset of the struct ability because abilities can be weakened but not the other direction. + // For example, it is ok to have a struct that doesn't have a copy capability where its field is a struct that has copy capability but not vice versa. + ty.paranoid_check_abilities(field_expected_abilities)?; + ty.paranoid_check_eq(expected_ty)?; + } + + operand_stack.push_ty(output_ty) +} + +pub(crate) struct NullRuntimeTypeCheck; +pub(crate) struct FullRuntimeTypeCheck; + +impl RuntimeTypeCheck for NullRuntimeTypeCheck { + fn pre_execution_type_stack_transition( + _local_tys: &[Type], + _locals: &Locals, + _ty_args: &[Type], + _resolver: &Resolver, + _operand_stack: &mut Stack, + _instruction: &Bytecode, + ) -> PartialVMResult<()> { + Ok(()) + } + + fn post_execution_type_stack_transition( + _local_tys: &[Type], + _ty_args: &[Type], + _resolver: &Resolver, + _operand_stack: &mut Stack, + _ty_cache: &mut FrameTypeCache, + _instruction: &Bytecode, + ) -> PartialVMResult<()> { + Ok(()) + } +} + +impl RuntimeTypeCheck for FullRuntimeTypeCheck { + /// Note that most of the checks should happen after instruction execution, because gas charging will happen during + /// instruction execution and we want to avoid running code without charging proper gas as much as possible. + fn pre_execution_type_stack_transition( + local_tys: &[Type], + locals: &Locals, + _ty_args: &[Type], + _resolver: &Resolver, + operand_stack: &mut Stack, + instruction: &Bytecode, + ) -> PartialVMResult<()> { + match instruction { + // Call instruction will be checked at execute_main. + Bytecode::Call(_) | Bytecode::CallGeneric(_) => (), + Bytecode::BrFalse(_) | Bytecode::BrTrue(_) => { + operand_stack.pop_ty()?; + }, + Bytecode::Branch(_) => (), + Bytecode::Ret => { + for (idx, ty) in local_tys.iter().enumerate() { + if !locals.is_invalid(idx)? { + ty.paranoid_check_has_ability(Ability::Drop)?; + } + } + }, + Bytecode::Abort => { + operand_stack.pop_ty()?; + }, + // StLoc needs to check before execution as we need to check the drop ability of values. + Bytecode::StLoc(idx) => { + let ty = local_tys[*idx as usize].clone(); + let val_ty = operand_stack.pop_ty()?; + ty.paranoid_check_eq(&val_ty)?; + if !locals.is_invalid(*idx as usize)? { + ty.paranoid_check_has_ability(Ability::Drop)?; + } + }, + // We will check the rest of the instructions after execution phase. + Bytecode::Pop + | Bytecode::LdU8(_) + | Bytecode::LdU16(_) + | Bytecode::LdU32(_) + | Bytecode::LdU64(_) + | Bytecode::LdU128(_) + | Bytecode::LdU256(_) + | Bytecode::LdTrue + | Bytecode::LdFalse + | Bytecode::LdConst(_) + | Bytecode::CopyLoc(_) + | Bytecode::MoveLoc(_) + | Bytecode::MutBorrowLoc(_) + | Bytecode::ImmBorrowLoc(_) + | Bytecode::ImmBorrowField(_) + | Bytecode::MutBorrowField(_) + | Bytecode::ImmBorrowFieldGeneric(_) + | Bytecode::MutBorrowFieldGeneric(_) + | Bytecode::Pack(_) + | Bytecode::PackGeneric(_) + | Bytecode::Unpack(_) + | Bytecode::UnpackGeneric(_) + | Bytecode::ReadRef + | Bytecode::WriteRef + | Bytecode::CastU8 + | Bytecode::CastU16 + | Bytecode::CastU32 + | Bytecode::CastU64 + | Bytecode::CastU128 + | Bytecode::CastU256 + | Bytecode::Add + | Bytecode::Sub + | Bytecode::Mul + | Bytecode::Mod + | Bytecode::Div + | Bytecode::BitOr + | Bytecode::BitAnd + | Bytecode::Xor + | Bytecode::Or + | Bytecode::And + | Bytecode::Shl + | Bytecode::Shr + | Bytecode::Lt + | Bytecode::Le + | Bytecode::Gt + | Bytecode::Ge + | Bytecode::Eq + | Bytecode::Neq + | Bytecode::MutBorrowGlobal(_) + | Bytecode::ImmBorrowGlobal(_) + | Bytecode::MutBorrowGlobalGeneric(_) + | Bytecode::ImmBorrowGlobalGeneric(_) + | Bytecode::Exists(_) + | Bytecode::ExistsGeneric(_) + | Bytecode::MoveTo(_) + | Bytecode::MoveToGeneric(_) + | Bytecode::MoveFrom(_) + | Bytecode::MoveFromGeneric(_) + | Bytecode::FreezeRef + | Bytecode::Nop + | Bytecode::Not + | Bytecode::VecPack(_, _) + | Bytecode::VecLen(_) + | Bytecode::VecImmBorrow(_) + | Bytecode::VecMutBorrow(_) + | Bytecode::VecPushBack(_) + | Bytecode::VecPopBack(_) + | Bytecode::VecUnpack(_, _) + | Bytecode::VecSwap(_) => (), + + // Since bytecode version 7 + Bytecode::PackVariant(_) + | Bytecode::PackVariantGeneric(_) + | Bytecode::UnpackVariant(_) + | Bytecode::UnpackVariantGeneric(_) + | Bytecode::TestVariant(_) + | Bytecode::TestVariantGeneric(_) + | Bytecode::MutBorrowVariantField(_) + | Bytecode::MutBorrowVariantFieldGeneric(_) + | Bytecode::ImmBorrowVariantField(_) + | Bytecode::ImmBorrowVariantFieldGeneric(_) => (), + }; + Ok(()) + } + + /// Paranoid type checks to perform after instruction execution. + /// + /// This function and `pre_execution_type_stack_transition` should constitute the full type stack transition for the paranoid mode. + fn post_execution_type_stack_transition( + local_tys: &[Type], + ty_args: &[Type], + resolver: &Resolver, + operand_stack: &mut Stack, + ty_cache: &mut FrameTypeCache, + instruction: &Bytecode, + ) -> PartialVMResult<()> { + let ty_builder = resolver.loader().ty_builder(); + + match instruction { + Bytecode::BrTrue(_) | Bytecode::BrFalse(_) => (), + Bytecode::Branch(_) + | Bytecode::Ret + | Bytecode::Call(_) + | Bytecode::CallGeneric(_) + | Bytecode::Abort => { + // Invariants hold because all of the instructions above will force VM to break from the interpreter loop and thus not hit this code path. + unreachable!("control flow instruction encountered during type check") + }, + Bytecode::Pop => { + let ty = operand_stack.pop_ty()?; + ty.paranoid_check_has_ability(Ability::Drop)?; + }, + Bytecode::LdU8(_) => { + let u8_ty = ty_builder.create_u8_ty(); + operand_stack.push_ty(u8_ty)? + }, + Bytecode::LdU16(_) => { + let u16_ty = ty_builder.create_u16_ty(); + operand_stack.push_ty(u16_ty)? + }, + Bytecode::LdU32(_) => { + let u32_ty = ty_builder.create_u32_ty(); + operand_stack.push_ty(u32_ty)? + }, + Bytecode::LdU64(_) => { + let u64_ty = ty_builder.create_u64_ty(); + operand_stack.push_ty(u64_ty)? + }, + Bytecode::LdU128(_) => { + let u128_ty = ty_builder.create_u128_ty(); + operand_stack.push_ty(u128_ty)? + }, + Bytecode::LdU256(_) => { + let u256_ty = ty_builder.create_u256_ty(); + operand_stack.push_ty(u256_ty)? + }, + Bytecode::LdTrue | Bytecode::LdFalse => { + let bool_ty = ty_builder.create_bool_ty(); + operand_stack.push_ty(bool_ty)? + }, + Bytecode::LdConst(i) => { + let constant = resolver.constant_at(*i); + let ty = ty_builder.create_constant_ty(&constant.type_)?; + operand_stack.push_ty(ty)?; + }, + Bytecode::CopyLoc(idx) => { + let ty = local_tys[*idx as usize].clone(); + ty.paranoid_check_has_ability(Ability::Copy)?; + operand_stack.push_ty(ty)?; + }, + Bytecode::MoveLoc(idx) => { + let ty = local_tys[*idx as usize].clone(); + operand_stack.push_ty(ty)?; + }, + Bytecode::StLoc(_) => (), + Bytecode::MutBorrowLoc(idx) => { + let ty = &local_tys[*idx as usize]; + let mut_ref_ty = ty_builder.create_ref_ty(ty, true)?; + operand_stack.push_ty(mut_ref_ty)?; + }, + Bytecode::ImmBorrowLoc(idx) => { + let ty = &local_tys[*idx as usize]; + let ref_ty = ty_builder.create_ref_ty(ty, false)?; + operand_stack.push_ty(ref_ty)?; + }, + Bytecode::ImmBorrowField(fh_idx) => { + let ty = operand_stack.pop_ty()?; + let expected_ty = resolver.field_handle_to_struct(*fh_idx); + ty.paranoid_check_ref_eq(&expected_ty, false)?; + + let field_ty = resolver.get_field_ty(*fh_idx)?; + let field_ref_ty = ty_builder.create_ref_ty(field_ty, false)?; + operand_stack.push_ty(field_ref_ty)?; + }, + Bytecode::MutBorrowField(fh_idx) => { + let ref_ty = operand_stack.pop_ty()?; + let expected_inner_ty = resolver.field_handle_to_struct(*fh_idx); + ref_ty.paranoid_check_ref_eq(&expected_inner_ty, true)?; + + let field_ty = resolver.get_field_ty(*fh_idx)?; + let field_mut_ref_ty = ty_builder.create_ref_ty(field_ty, true)?; + operand_stack.push_ty(field_mut_ref_ty)?; + }, + Bytecode::ImmBorrowFieldGeneric(idx) => { + let struct_ty = operand_stack.pop_ty()?; + let ((field_ty, _), (expected_struct_ty, _)) = + ty_cache.get_field_type_and_struct_type(*idx, resolver, ty_args)?; + struct_ty.paranoid_check_ref_eq(expected_struct_ty, false)?; + + let field_ref_ty = ty_builder.create_ref_ty(field_ty, false)?; + operand_stack.push_ty(field_ref_ty)?; + }, + Bytecode::MutBorrowFieldGeneric(idx) => { + let struct_ty = operand_stack.pop_ty()?; + let ((field_ty, _), (expected_struct_ty, _)) = + ty_cache.get_field_type_and_struct_type(*idx, resolver, ty_args)?; + struct_ty.paranoid_check_ref_eq(expected_struct_ty, true)?; + + let field_mut_ref_ty = ty_builder.create_ref_ty(field_ty, true)?; + operand_stack.push_ty(field_mut_ref_ty)?; + }, + Bytecode::ImmBorrowVariantField(fh_idx) | Bytecode::MutBorrowVariantField(fh_idx) => { + let is_mut = matches!(instruction, Bytecode::MutBorrowVariantField(..)); + let field_info = resolver.variant_field_info_at(*fh_idx); + let ty = operand_stack.pop_ty()?; + let expected_ty = resolver.create_struct_ty(&field_info.definition_struct_type); + ty.paranoid_check_ref_eq(&expected_ty, is_mut)?; + let field_ty = &field_info.uninstantiated_field_ty; + let field_ref_ty = ty_builder.create_ref_ty(field_ty, is_mut)?; + operand_stack.push_ty(field_ref_ty)?; + }, + Bytecode::ImmBorrowVariantFieldGeneric(idx) + | Bytecode::MutBorrowVariantFieldGeneric(idx) => { + let is_mut = matches!(instruction, Bytecode::MutBorrowVariantFieldGeneric(..)); + let struct_ty = operand_stack.pop_ty()?; + let ((field_ty, _), (expected_struct_ty, _)) = + ty_cache.get_variant_field_type_and_struct_type(*idx, resolver, ty_args)?; + struct_ty.paranoid_check_ref_eq(expected_struct_ty, is_mut)?; + let field_ref_ty = ty_builder.create_ref_ty(field_ty, is_mut)?; + operand_stack.push_ty(field_ref_ty)?; + }, + Bytecode::Pack(idx) => { + let field_count = resolver.field_count(*idx); + let args_ty = resolver.get_struct(*idx)?; + let field_tys = args_ty.fields(None)?.iter().map(|(_, ty)| ty); + let output_ty = resolver.get_struct_ty(*idx); + verify_pack(operand_stack, field_count, field_tys, output_ty)?; + }, + Bytecode::PackGeneric(idx) => { + let field_count = resolver.field_instantiation_count(*idx); + let output_ty = ty_cache.get_struct_type(*idx, resolver, ty_args)?.0.clone(); + let args_ty = ty_cache.get_struct_fields_types(*idx, resolver, ty_args)?; + + if field_count as usize != args_ty.len() { + // This is an inconsistency between the cache and the actual + // type declaration. We would crash if for some reason this invariant does + // not hold. It seems impossible to hit, but we keep it here for safety + // reasons, as a previous version of this code had this too. + return Err( + PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) + .with_message("Args count mismatch".to_string()), + ); + } + + verify_pack( + operand_stack, + field_count, + args_ty.iter().map(|(ty, _)| ty), + output_ty, + )?; + }, + Bytecode::Unpack(idx) => { + let struct_ty = operand_stack.pop_ty()?; + struct_ty.paranoid_check_eq(&resolver.get_struct_ty(*idx))?; + let struct_decl = resolver.get_struct(*idx)?; + for (_name, ty) in struct_decl.fields(None)?.iter() { + operand_stack.push_ty(ty.clone())?; + } + }, + Bytecode::UnpackGeneric(idx) => { + let struct_ty = operand_stack.pop_ty()?; + + struct_ty + .paranoid_check_eq(ty_cache.get_struct_type(*idx, resolver, ty_args)?.0)?; + + let struct_fields_types = + ty_cache.get_struct_fields_types(*idx, resolver, ty_args)?; + for (ty, _) in struct_fields_types { + operand_stack.push_ty(ty.clone())?; + } + }, + Bytecode::PackVariant(idx) => { + let info = resolver.get_struct_variant_at(*idx); + let field_tys = info + .definition_struct_type + .fields(Some(info.variant))? + .iter() + .map(|(_, ty)| ty); + let output_ty = resolver.create_struct_ty(&info.definition_struct_type); + verify_pack(operand_stack, info.field_count, field_tys, output_ty)?; + }, + Bytecode::PackVariantGeneric(idx) => { + let info = resolver.get_struct_variant_instantiation_at(*idx); + let output_ty = ty_cache + .get_struct_variant_type(*idx, resolver, ty_args)? + .0 + .clone(); + let args_ty = ty_cache.get_struct_variant_fields_types(*idx, resolver, ty_args)?; + verify_pack( + operand_stack, + info.field_count, + args_ty.iter().map(|(ty, _)| ty), + output_ty, + )?; + }, + Bytecode::UnpackVariant(idx) => { + let info = resolver.get_struct_variant_at(*idx); + let expected_struct_ty = resolver.create_struct_ty(&info.definition_struct_type); + let actual_struct_ty = operand_stack.pop_ty()?; + actual_struct_ty.paranoid_check_eq(&expected_struct_ty)?; + for (_name, ty) in info + .definition_struct_type + .fields(Some(info.variant))? + .iter() + { + operand_stack.push_ty(ty.clone())?; + } + }, + Bytecode::UnpackVariantGeneric(idx) => { + let expected_struct_type = + ty_cache.get_struct_variant_type(*idx, resolver, ty_args)?.0; + let actual_struct_type = operand_stack.pop_ty()?; + actual_struct_type.paranoid_check_eq(expected_struct_type)?; + let struct_fields_types = + ty_cache.get_struct_variant_fields_types(*idx, resolver, ty_args)?; + for (ty, _) in struct_fields_types { + operand_stack.push_ty(ty.clone())?; + } + }, + Bytecode::TestVariant(idx) => { + let info = resolver.get_struct_variant_at(*idx); + let expected_struct_ty = resolver.create_struct_ty(&info.definition_struct_type); + let actual_struct_ty = operand_stack.pop_ty()?; + actual_struct_ty.paranoid_check_ref_eq(&expected_struct_ty, false)?; + operand_stack.push_ty(ty_builder.create_bool_ty())?; + }, + Bytecode::TestVariantGeneric(idx) => { + let expected_struct_ty = + ty_cache.get_struct_variant_type(*idx, resolver, ty_args)?.0; + let actual_struct_ty = operand_stack.pop_ty()?; + actual_struct_ty.paranoid_check_ref_eq(expected_struct_ty, false)?; + operand_stack.push_ty(ty_builder.create_bool_ty())?; + }, + Bytecode::ReadRef => { + let ref_ty = operand_stack.pop_ty()?; + let inner_ty = ref_ty.paranoid_read_ref()?; + operand_stack.push_ty(inner_ty)?; + }, + Bytecode::WriteRef => { + let mut_ref_ty = operand_stack.pop_ty()?; + let val_ty = operand_stack.pop_ty()?; + mut_ref_ty.paranoid_write_ref(&val_ty)?; + }, + Bytecode::CastU8 => { + operand_stack.pop_ty()?; + let u8_ty = ty_builder.create_u8_ty(); + operand_stack.push_ty(u8_ty)?; + }, + Bytecode::CastU16 => { + operand_stack.pop_ty()?; + let u16_ty = ty_builder.create_u16_ty(); + operand_stack.push_ty(u16_ty)?; + }, + Bytecode::CastU32 => { + operand_stack.pop_ty()?; + let u32_ty = ty_builder.create_u32_ty(); + operand_stack.push_ty(u32_ty)?; + }, + Bytecode::CastU64 => { + operand_stack.pop_ty()?; + let u64_ty = ty_builder.create_u64_ty(); + operand_stack.push_ty(u64_ty)?; + }, + Bytecode::CastU128 => { + operand_stack.pop_ty()?; + let u128_ty = ty_builder.create_u128_ty(); + operand_stack.push_ty(u128_ty)?; + }, + Bytecode::CastU256 => { + operand_stack.pop_ty()?; + let u256_ty = ty_builder.create_u256_ty(); + operand_stack.push_ty(u256_ty)?; + }, + Bytecode::Add + | Bytecode::Sub + | Bytecode::Mul + | Bytecode::Mod + | Bytecode::Div + | Bytecode::BitOr + | Bytecode::BitAnd + | Bytecode::Xor + | Bytecode::Or + | Bytecode::And => { + let rhs_ty = operand_stack.pop_ty()?; + let lhs_ty = operand_stack.pop_ty()?; + rhs_ty.paranoid_check_eq(&lhs_ty)?; + operand_stack.push_ty(rhs_ty)?; + }, + Bytecode::Shl | Bytecode::Shr => { + let _rhs = operand_stack.pop_ty()?; + let lhs = operand_stack.pop_ty()?; + operand_stack.push_ty(lhs)?; + }, + Bytecode::Lt | Bytecode::Le | Bytecode::Gt | Bytecode::Ge => { + let rhs_ty = operand_stack.pop_ty()?; + let lhs_ty = operand_stack.pop_ty()?; + rhs_ty.paranoid_check_eq(&lhs_ty)?; + + let bool_ty = ty_builder.create_bool_ty(); + operand_stack.push_ty(bool_ty)?; + }, + Bytecode::Eq | Bytecode::Neq => { + let rhs_ty = operand_stack.pop_ty()?; + let lhs_ty = operand_stack.pop_ty()?; + rhs_ty.paranoid_check_eq(&lhs_ty)?; + rhs_ty.paranoid_check_has_ability(Ability::Drop)?; + + let bool_ty = ty_builder.create_bool_ty(); + operand_stack.push_ty(bool_ty)?; + }, + Bytecode::MutBorrowGlobal(idx) => { + operand_stack.pop_ty()?.paranoid_check_is_address_ty()?; + let struct_ty = resolver.get_struct_ty(*idx); + struct_ty.paranoid_check_has_ability(Ability::Key)?; + + let struct_mut_ref_ty = ty_builder.create_ref_ty(&struct_ty, true)?; + operand_stack.push_ty(struct_mut_ref_ty)?; + }, + Bytecode::ImmBorrowGlobal(idx) => { + operand_stack.pop_ty()?.paranoid_check_is_address_ty()?; + let struct_ty = resolver.get_struct_ty(*idx); + struct_ty.paranoid_check_has_ability(Ability::Key)?; + + let struct_ref_ty = ty_builder.create_ref_ty(&struct_ty, false)?; + operand_stack.push_ty(struct_ref_ty)?; + }, + Bytecode::MutBorrowGlobalGeneric(idx) => { + operand_stack.pop_ty()?.paranoid_check_is_address_ty()?; + let struct_ty = ty_cache.get_struct_type(*idx, resolver, ty_args)?.0; + struct_ty.paranoid_check_has_ability(Ability::Key)?; + + let struct_mut_ref_ty = ty_builder.create_ref_ty(struct_ty, true)?; + operand_stack.push_ty(struct_mut_ref_ty)?; + }, + Bytecode::ImmBorrowGlobalGeneric(idx) => { + operand_stack.pop_ty()?.paranoid_check_is_address_ty()?; + let struct_ty = ty_cache.get_struct_type(*idx, resolver, ty_args)?.0; + struct_ty.paranoid_check_has_ability(Ability::Key)?; + + let struct_ref_ty = ty_builder.create_ref_ty(struct_ty, false)?; + operand_stack.push_ty(struct_ref_ty)?; + }, + Bytecode::Exists(_) | Bytecode::ExistsGeneric(_) => { + operand_stack.pop_ty()?.paranoid_check_is_address_ty()?; + + let bool_ty = ty_builder.create_bool_ty(); + operand_stack.push_ty(bool_ty)?; + }, + Bytecode::MoveTo(idx) => { + let ty = operand_stack.pop_ty()?; + operand_stack.pop_ty()?.paranoid_check_is_signer_ref_ty()?; + ty.paranoid_check_eq(&resolver.get_struct_ty(*idx))?; + ty.paranoid_check_has_ability(Ability::Key)?; + }, + Bytecode::MoveToGeneric(idx) => { + let ty = operand_stack.pop_ty()?; + operand_stack.pop_ty()?.paranoid_check_is_signer_ref_ty()?; + ty.paranoid_check_eq(ty_cache.get_struct_type(*idx, resolver, ty_args)?.0)?; + ty.paranoid_check_has_ability(Ability::Key)?; + }, + Bytecode::MoveFrom(idx) => { + operand_stack.pop_ty()?.paranoid_check_is_address_ty()?; + let ty = resolver.get_struct_ty(*idx); + ty.paranoid_check_has_ability(Ability::Key)?; + operand_stack.push_ty(ty)?; + }, + Bytecode::MoveFromGeneric(idx) => { + operand_stack.pop_ty()?.paranoid_check_is_address_ty()?; + let ty = ty_cache.get_struct_type(*idx, resolver, ty_args)?.0.clone(); + ty.paranoid_check_has_ability(Ability::Key)?; + operand_stack.push_ty(ty)?; + }, + Bytecode::FreezeRef => { + let mut_ref_ty = operand_stack.pop_ty()?; + let ref_ty = mut_ref_ty.paranoid_freeze_ref_ty()?; + operand_stack.push_ty(ref_ty)?; + }, + Bytecode::Nop => (), + Bytecode::Not => { + operand_stack.pop_ty()?.paranoid_check_is_bool_ty()?; + let bool_ty = ty_builder.create_bool_ty(); + operand_stack.push_ty(bool_ty)?; + }, + Bytecode::VecPack(si, num) => { + let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; + let elem_tys = operand_stack.popn_tys(*num as u16)?; + for elem_ty in elem_tys.iter() { + elem_ty.paranoid_check_eq(ty)?; + } + + let vec_ty = ty_builder.create_vec_ty(ty)?; + operand_stack.push_ty(vec_ty)?; + }, + Bytecode::VecLen(si) => { + let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; + operand_stack + .pop_ty()? + .paranoid_check_is_vec_ref_ty(ty, false)?; + + let u64_ty = ty_builder.create_u64_ty(); + operand_stack.push_ty(u64_ty)?; + }, + Bytecode::VecImmBorrow(si) => { + let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; + operand_stack.pop_ty()?.paranoid_check_is_u64_ty()?; + let elem_ref_ty = operand_stack + .pop_ty()? + .paranoid_check_and_get_vec_elem_ref_ty(ty, false)?; + + operand_stack.push_ty(elem_ref_ty)?; + }, + Bytecode::VecMutBorrow(si) => { + let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; + operand_stack.pop_ty()?.paranoid_check_is_u64_ty()?; + let elem_ref_ty = operand_stack + .pop_ty()? + .paranoid_check_and_get_vec_elem_ref_ty(ty, true)?; + operand_stack.push_ty(elem_ref_ty)?; + }, + Bytecode::VecPushBack(si) => { + let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; + operand_stack.pop_ty()?.paranoid_check_eq(ty)?; + operand_stack + .pop_ty()? + .paranoid_check_is_vec_ref_ty(ty, true)?; + }, + Bytecode::VecPopBack(si) => { + let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; + let elem_ty = operand_stack + .pop_ty()? + .paranoid_check_and_get_vec_elem_ty(ty, true)?; + operand_stack.push_ty(elem_ty)?; + }, + Bytecode::VecUnpack(si, num) => { + let (expected_elem_ty, _) = + ty_cache.get_signature_index_type(*si, resolver, ty_args)?; + let vec_ty = operand_stack.pop_ty()?; + vec_ty.paranoid_check_is_vec_ty(expected_elem_ty)?; + for _ in 0..*num { + operand_stack.push_ty(expected_elem_ty.clone())?; + } + }, + Bytecode::VecSwap(si) => { + let (ty, _) = ty_cache.get_signature_index_type(*si, resolver, ty_args)?; + operand_stack.pop_ty()?.paranoid_check_is_u64_ty()?; + operand_stack.pop_ty()?.paranoid_check_is_u64_ty()?; + operand_stack + .pop_ty()? + .paranoid_check_is_vec_ref_ty(ty, true)?; + }, + } + Ok(()) + } +} diff --git a/third_party/move/move-vm/runtime/src/storage/implementations/unsync_module_storage.rs b/third_party/move/move-vm/runtime/src/storage/implementations/unsync_module_storage.rs index fba7581d9d341..90d9789c25822 100644 --- a/third_party/move/move-vm/runtime/src/storage/implementations/unsync_module_storage.rs +++ b/third_party/move/move-vm/runtime/src/storage/implementations/unsync_module_storage.rs @@ -397,9 +397,10 @@ pub(crate) mod test { module_storage.assert_cached_state(vec![&a_id, &c_id], vec![&d_id, &e_id, &f_id, &g_id]); assert_ok!(module_storage.fetch_verified_module(a_id.address(), a_id.name())); - module_storage.assert_cached_state(vec![], vec![ - &a_id, &b_id, &c_id, &d_id, &e_id, &f_id, &g_id, - ]); + module_storage.assert_cached_state( + vec![], + vec![&a_id, &b_id, &c_id, &d_id, &e_id, &f_id, &g_id], + ); } #[test] diff --git a/third_party/move/move-vm/runtime/src/tracing.rs b/third_party/move/move-vm/runtime/src/tracing.rs index 6a747e7a75a86..7b0f5b29881dd 100644 --- a/third_party/move/move-vm/runtime/src/tracing.rs +++ b/third_party/move/move-vm/runtime/src/tracing.rs @@ -6,7 +6,7 @@ use crate::debug::DebugContext; #[cfg(any(debug_assertions, feature = "debugging"))] use crate::{ - interpreter::Interpreter, + interpreter::InterpreterImpl, loader::{LoadedFunction, Resolver}, }; #[cfg(any(debug_assertions, feature = "debugging"))] @@ -72,7 +72,7 @@ pub(crate) fn trace( pc: u16, instr: &Bytecode, resolver: &Resolver, - interp: &Interpreter, + interp: &InterpreterImpl, ) { if *TRACING_ENABLED { let buf_writer = &mut *LOGGING_FILE_WRITER.lock().unwrap(); From a8ba29bf15113280afebc08edf3dae659de23cd3 Mon Sep 17 00:00:00 2001 From: Aleks Zi Date: Thu, 21 Nov 2024 00:40:58 -0500 Subject: [PATCH 2/2] lint --- third_party/move/move-vm/runtime/src/interpreter.rs | 4 +--- .../move/move-vm/runtime/src/runtime_type_checks.rs | 3 +-- .../src/storage/implementations/unsync_module_storage.rs | 7 +++---- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/third_party/move/move-vm/runtime/src/interpreter.rs b/third_party/move/move-vm/runtime/src/interpreter.rs index 00310f994e7bf..16d886eeee996 100644 --- a/third_party/move/move-vm/runtime/src/interpreter.rs +++ b/third_party/move/move-vm/runtime/src/interpreter.rs @@ -10,9 +10,7 @@ use crate::{ module_traversal::TraversalContext, native_extensions::NativeContextExtensions, native_functions::NativeContext, - runtime_type_checks::FullRuntimeTypeCheck, - runtime_type_checks::NullRuntimeTypeCheck, - runtime_type_checks::RuntimeTypeCheck, + runtime_type_checks::{FullRuntimeTypeCheck, NullRuntimeTypeCheck, RuntimeTypeCheck}, trace, LoadedFunction, ModuleStorage, }; use fail::fail_point; diff --git a/third_party/move/move-vm/runtime/src/runtime_type_checks.rs b/third_party/move/move-vm/runtime/src/runtime_type_checks.rs index 38ebb169bc8b2..c56ebf2b0b7a7 100644 --- a/third_party/move/move-vm/runtime/src/runtime_type_checks.rs +++ b/third_party/move/move-vm/runtime/src/runtime_type_checks.rs @@ -7,8 +7,7 @@ use move_binary_format::{ file_format::{Ability, AbilitySet, Bytecode}, }; use move_core_types::vm_status::StatusCode; -use move_vm_types::loaded_data::runtime_types::Type; -use move_vm_types::values::Locals; +use move_vm_types::{loaded_data::runtime_types::Type, values::Locals}; pub(crate) trait RuntimeTypeCheck { fn pre_execution_type_stack_transition( diff --git a/third_party/move/move-vm/runtime/src/storage/implementations/unsync_module_storage.rs b/third_party/move/move-vm/runtime/src/storage/implementations/unsync_module_storage.rs index 90d9789c25822..fba7581d9d341 100644 --- a/third_party/move/move-vm/runtime/src/storage/implementations/unsync_module_storage.rs +++ b/third_party/move/move-vm/runtime/src/storage/implementations/unsync_module_storage.rs @@ -397,10 +397,9 @@ pub(crate) mod test { module_storage.assert_cached_state(vec![&a_id, &c_id], vec![&d_id, &e_id, &f_id, &g_id]); assert_ok!(module_storage.fetch_verified_module(a_id.address(), a_id.name())); - module_storage.assert_cached_state( - vec![], - vec![&a_id, &b_id, &c_id, &d_id, &e_id, &f_id, &g_id], - ); + module_storage.assert_cached_state(vec![], vec![ + &a_id, &b_id, &c_id, &d_id, &e_id, &f_id, &g_id, + ]); } #[test]