diff --git a/core/src/nan_preserving_float.rs b/core/src/nan_preserving_float.rs index 58c6585bbb..dbac6cf01b 100644 --- a/core/src/nan_preserving_float.rs +++ b/core/src/nan_preserving_float.rs @@ -32,7 +32,7 @@ macro_rules! float { ); }; ($for:ident, $rep:ident, $is:ident, $sign_bit:expr) => { - #[derive(Copy, Clone)] + #[derive(Copy, Clone, Eq)] pub struct $for($rep); impl_binop!($for, $is, Add, add); diff --git a/core/src/value.rs b/core/src/value.rs index abe860ba68..7dc3844d12 100644 --- a/core/src/value.rs +++ b/core/src/value.rs @@ -64,7 +64,7 @@ impl ValueType { /// /// There is no distinction between signed and unsigned integer types. Instead, integers are /// interpreted by respective operations as either unsigned or signed in two’s complement representation. -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Value { /// Value of 32-bit signed or unsigned integer. I32(i32), diff --git a/src/isa.rs b/src/isa.rs index 8882d6cce9..df5d3dad21 100644 --- a/src/isa.rs +++ b/src/isa.rs @@ -69,7 +69,10 @@ use alloc::vec::Vec; use parity_wasm::elements::ValueType; -use specs::itable::UnaryOp; +use specs::{ + itable::{BinOp, BinaryOp, BitOp, ConversionOp, RelOp, ShiftOp, UnaryOp, UniArg}, + mtable::{MemoryReadSize, MemoryStoreSize}, +}; /// Should we keep a value before "discarding" a stack frame? /// @@ -145,7 +148,7 @@ pub enum Instruction<'a> { GetLocal(u32, ValueType), /// Pop a value and put it in at the specified depth. - SetLocal(u32, ValueType), + SetLocal(u32, ValueType, UniArg), /// Copy a value to the specified depth. TeeLocal(u32, ValueType), @@ -153,8 +156,8 @@ pub enum Instruction<'a> { /// Similar to the Wasm ones, but instead of a label depth /// they specify direct PC. Br(Target), - BrIfEqz(Target), - BrIfNez(Target), + BrIfEqz(Target, UniArg), + BrIfNez(Target, UniArg), /// br_table [t1 t2 t3 .. tn] tdefault /// @@ -165,75 +168,75 @@ pub enum Instruction<'a> { /// is greater than length of the branch table, then the last index will be used. /// /// Validation ensures that there should be at least one target. - BrTable(BrTargets<'a>), + BrTable(BrTargets<'a>, UniArg), Unreachable, Return(DropKeep), Call(u32), - CallIndirect(u32), + CallIndirect(u32, UniArg), Drop, - Select(ValueType), + Select(ValueType, UniArg, UniArg, UniArg), GetGlobal(u32), - SetGlobal(u32), + SetGlobal(u32, UniArg), - I32Load(u32), - I64Load(u32), + I32Load(u32, UniArg), + I64Load(u32, UniArg), F32Load(u32), F64Load(u32), - I32Load8S(u32), - I32Load8U(u32), - I32Load16S(u32), - I32Load16U(u32), - I64Load8S(u32), - I64Load8U(u32), - I64Load16S(u32), - I64Load16U(u32), - I64Load32S(u32), - I64Load32U(u32), - I32Store(u32), - I64Store(u32), + I32Load8S(u32, UniArg), + I32Load8U(u32, UniArg), + I32Load16S(u32, UniArg), + I32Load16U(u32, UniArg), + I64Load8S(u32, UniArg), + I64Load8U(u32, UniArg), + I64Load16S(u32, UniArg), + I64Load16U(u32, UniArg), + I64Load32S(u32, UniArg), + I64Load32U(u32, UniArg), + I32Store(u32, UniArg, UniArg), + I64Store(u32, UniArg, UniArg), F32Store(u32), F64Store(u32), - I32Store8(u32), - I32Store16(u32), - I64Store8(u32), - I64Store16(u32), - I64Store32(u32), + I32Store8(u32, UniArg, UniArg), + I32Store16(u32, UniArg, UniArg), + I64Store8(u32, UniArg, UniArg), + I64Store16(u32, UniArg, UniArg), + I64Store32(u32, UniArg, UniArg), CurrentMemory, - GrowMemory, + GrowMemory(UniArg), I32Const(i32), I64Const(i64), F32Const(u32), F64Const(u64), - I32Eqz, - I32Eq, - I32Ne, - I32LtS, - I32LtU, - I32GtS, - I32GtU, - I32LeS, - I32LeU, - I32GeS, - I32GeU, - - I64Eqz, - I64Eq, - I64Ne, - I64LtS, - I64LtU, - I64GtS, - I64GtU, - I64LeS, - I64LeU, - I64GeS, - I64GeU, + I32Eqz(UniArg), + I32Eq(UniArg, UniArg), + I32Ne(UniArg, UniArg), + I32LtS(UniArg, UniArg), + I32LtU(UniArg, UniArg), + I32GtS(UniArg, UniArg), + I32GtU(UniArg, UniArg), + I32LeS(UniArg, UniArg), + I32LeU(UniArg, UniArg), + I32GeS(UniArg, UniArg), + I32GeU(UniArg, UniArg), + + I64Eqz(UniArg), + I64Eq(UniArg, UniArg), + I64Ne(UniArg, UniArg), + I64LtS(UniArg, UniArg), + I64LtU(UniArg, UniArg), + I64GtS(UniArg, UniArg), + I64GtU(UniArg, UniArg), + I64LeS(UniArg, UniArg), + I64LeU(UniArg, UniArg), + I64GeS(UniArg, UniArg), + I64GeU(UniArg, UniArg), F32Eq, F32Ne, @@ -249,43 +252,43 @@ pub enum Instruction<'a> { F64Le, F64Ge, - I32Clz, - I32Ctz, - I32Popcnt, - I32Add, - I32Sub, - I32Mul, - I32DivS, - I32DivU, - I32RemS, - I32RemU, - I32And, - I32Or, - I32Xor, - I32Shl, - I32ShrS, - I32ShrU, - I32Rotl, - I32Rotr, - - I64Clz, - I64Ctz, - I64Popcnt, - I64Add, - I64Sub, - I64Mul, - I64DivS, - I64DivU, - I64RemS, - I64RemU, - I64And, - I64Or, - I64Xor, - I64Shl, - I64ShrS, - I64ShrU, - I64Rotl, - I64Rotr, + I32Clz(UniArg), + I32Ctz(UniArg), + I32Popcnt(UniArg), + I32Add(UniArg, UniArg), + I32Sub(UniArg, UniArg), + I32Mul(UniArg, UniArg), + I32DivS(UniArg, UniArg), + I32DivU(UniArg, UniArg), + I32RemS(UniArg, UniArg), + I32RemU(UniArg, UniArg), + I32And(UniArg, UniArg), + I32Or(UniArg, UniArg), + I32Xor(UniArg, UniArg), + I32Shl(UniArg, UniArg), + I32ShrS(UniArg, UniArg), + I32ShrU(UniArg, UniArg), + I32Rotl(UniArg, UniArg), + I32Rotr(UniArg, UniArg), + + I64Clz(UniArg), + I64Ctz(UniArg), + I64Popcnt(UniArg), + I64Add(UniArg, UniArg), + I64Sub(UniArg, UniArg), + I64Mul(UniArg, UniArg), + I64DivS(UniArg, UniArg), + I64DivU(UniArg, UniArg), + I64RemS(UniArg, UniArg), + I64RemU(UniArg, UniArg), + I64And(UniArg, UniArg), + I64Or(UniArg, UniArg), + I64Xor(UniArg, UniArg), + I64Shl(UniArg, UniArg), + I64ShrS(UniArg, UniArg), + I64ShrU(UniArg, UniArg), + I64Rotl(UniArg, UniArg), + I64Rotr(UniArg, UniArg), F32Abs, F32Neg, F32Ceil, @@ -315,13 +318,13 @@ pub enum Instruction<'a> { F64Max, F64Copysign, - I32WrapI64, + I32WrapI64(UniArg), I32TruncSF32, I32TruncUF32, I32TruncSF64, I32TruncUF64, - I64ExtendSI32, - I64ExtendUI32, + I64ExtendSI32(UniArg), + I64ExtendUI32(UniArg), I64TruncSF32, I64TruncUF32, I64TruncSF64, @@ -342,19 +345,180 @@ pub enum Instruction<'a> { F32ReinterpretI32, F64ReinterpretI64, - I32Extend8S, - I32Extend16S, - I64Extend8S, - I64Extend16S, - I64Extend32S, + I32Extend8S(UniArg), + I32Extend16S(UniArg), + I64Extend8S(UniArg), + I64Extend16S(UniArg), + I64Extend32S(UniArg), } -impl<'a> From> for UnaryOp { - fn from(value: Instruction<'a>) -> Self { +impl<'a> From<&Instruction<'a>> for MemoryReadSize { + fn from(inst: &Instruction<'a>) -> Self { + match inst { + Instruction::I32Load(_, _) => MemoryReadSize::U32, + Instruction::I64Load(_, _) => MemoryReadSize::I64, + Instruction::I32Load8S(_, _) => MemoryReadSize::S8, + Instruction::I32Load8U(_, _) => MemoryReadSize::U8, + Instruction::I32Load16S(_, _) => MemoryReadSize::S16, + Instruction::I32Load16U(_, _) => MemoryReadSize::U16, + Instruction::I64Load8S(_, _) => MemoryReadSize::S8, + Instruction::I64Load8U(_, _) => MemoryReadSize::U8, + Instruction::I64Load16S(_, _) => MemoryReadSize::S16, + Instruction::I64Load16U(_, _) => MemoryReadSize::U16, + Instruction::I64Load32S(_, _) => MemoryReadSize::S32, + Instruction::I64Load32U(_, _) => MemoryReadSize::U32, + _ => unreachable!(), + } + } +} + +impl<'a> From<&Instruction<'a>> for MemoryStoreSize { + fn from(inst: &Instruction<'a>) -> Self { + match inst { + Instruction::I32Store(_, _, _) => MemoryStoreSize::Byte32, + Instruction::I32Store8(_, _, _) => MemoryStoreSize::Byte8, + Instruction::I32Store16(_, _, _) => MemoryStoreSize::Byte16, + Instruction::I64Store(_, _, _) => MemoryStoreSize::Byte64, + Instruction::I64Store8(_, _, _) => MemoryStoreSize::Byte8, + Instruction::I64Store16(_, _, _) => MemoryStoreSize::Byte16, + Instruction::I64Store32(_, _, _) => MemoryStoreSize::Byte32, + _ => unreachable!(), + } + } +} + +impl<'a> From<&Instruction<'a>> for BinOp { + fn from(value: &Instruction<'a>) -> Self { + match value { + Instruction::I32Add(_, _) => BinOp::Add, + Instruction::I32Sub(_, _) => BinOp::Sub, + Instruction::I32Mul(_, _) => BinOp::Mul, + Instruction::I32DivS(_, _) => BinOp::SignedDiv, + Instruction::I32DivU(_, _) => BinOp::UnsignedDiv, + Instruction::I32RemS(_, _) => BinOp::SignedRem, + Instruction::I32RemU(_, _) => BinOp::UnsignedRem, + + Instruction::I64Add(_, _) => BinOp::Add, + Instruction::I64Sub(_, _) => BinOp::Sub, + Instruction::I64Mul(_, _) => BinOp::Mul, + Instruction::I64DivS(_, _) => BinOp::SignedDiv, + Instruction::I64DivU(_, _) => BinOp::UnsignedDiv, + Instruction::I64RemS(_, _) => BinOp::SignedRem, + Instruction::I64RemU(_, _) => BinOp::UnsignedRem, + _ => unreachable!(), + } + } +} + +impl<'a> From<&Instruction<'a>> for BitOp { + fn from(value: &Instruction<'a>) -> Self { + match value { + Instruction::I32And(_, _) | Instruction::I64And(_, _) => BitOp::And, + Instruction::I32Or(_, _) | Instruction::I64Or(_, _) => BitOp::Or, + Instruction::I32Xor(_, _) | Instruction::I64Xor(_, _) => BitOp::Xor, + _ => unreachable!(), + } + } +} + +impl<'a> From<&Instruction<'a>> for ShiftOp { + fn from(value: &Instruction<'a>) -> Self { + match value { + Instruction::I32Shl(_, _) | Instruction::I64Shl(_, _) => ShiftOp::Shl, + Instruction::I32ShrS(_, _) | Instruction::I64ShrS(_, _) => ShiftOp::SignedShr, + Instruction::I32ShrU(_, _) | Instruction::I64ShrU(_, _) => ShiftOp::UnsignedShr, + Instruction::I32Rotl(_, _) | Instruction::I64Rotl(_, _) => ShiftOp::Rotl, + Instruction::I32Rotr(_, _) | Instruction::I64Rotr(_, _) => ShiftOp::Rotr, + _ => unreachable!(), + } + } +} + +impl<'a> From<&Instruction<'a>> for RelOp { + fn from(value: &Instruction<'a>) -> Self { match value { - Instruction::I32Clz | Instruction::I64Clz => UnaryOp::Clz, - Instruction::I32Ctz | Instruction::I64Ctz => UnaryOp::Ctz, - Instruction::I32Popcnt | Instruction::I64Popcnt => UnaryOp::Popcnt, + Instruction::I32Eq(_, _) | Instruction::I64Eq(_, _) => RelOp::Eq, + Instruction::I32Ne(_, _) | Instruction::I64Ne(_, _) => RelOp::Ne, + Instruction::I32LtS(_, _) | Instruction::I64LtS(_, _) => RelOp::SignedLt, + Instruction::I32LtU(_, _) | Instruction::I64LtU(_, _) => RelOp::UnsignedLt, + Instruction::I32GtS(_, _) | Instruction::I64GtS(_, _) => RelOp::SignedGt, + Instruction::I32GtU(_, _) | Instruction::I64GtU(_, _) => RelOp::UnsignedGt, + Instruction::I32LeS(_, _) | Instruction::I64LeS(_, _) => RelOp::SignedLe, + Instruction::I32LeU(_, _) | Instruction::I64LeU(_, _) => RelOp::UnsignedLe, + Instruction::I32GeS(_, _) | Instruction::I64GeS(_, _) => RelOp::SignedGe, + Instruction::I32GeU(_, _) | Instruction::I64GeU(_, _) => RelOp::UnsignedGe, + + _ => unreachable!(), + } + } +} + +impl<'a> From<&Instruction<'a>> for BinaryOp { + fn from(value: &Instruction<'a>) -> Self { + match value { + Instruction::I32Add(_, _) => BinOp::Add.into(), + Instruction::I32Sub(_, _) => BinOp::Sub.into(), + Instruction::I32Mul(_, _) => BinOp::Mul.into(), + Instruction::I32DivS(_, _) => BinOp::SignedDiv.into(), + Instruction::I32DivU(_, _) => BinOp::UnsignedDiv.into(), + Instruction::I32RemS(_, _) => BinOp::SignedRem.into(), + Instruction::I32RemU(_, _) => BinOp::UnsignedRem.into(), + Instruction::I64Add(_, _) => BinOp::Add.into(), + Instruction::I64Sub(_, _) => BinOp::Sub.into(), + Instruction::I64Mul(_, _) => BinOp::Mul.into(), + Instruction::I64DivS(_, _) => BinOp::SignedDiv.into(), + Instruction::I64DivU(_, _) => BinOp::UnsignedDiv.into(), + Instruction::I64RemS(_, _) => BinOp::SignedRem.into(), + Instruction::I64RemU(_, _) => BinOp::UnsignedRem.into(), + + Instruction::I32And(_, _) | Instruction::I64And(_, _) => BitOp::And.into(), + Instruction::I32Or(_, _) | Instruction::I64Or(_, _) => BitOp::Or.into(), + Instruction::I32Xor(_, _) | Instruction::I64Xor(_, _) => BitOp::Xor.into(), + + Instruction::I32Shl(_, _) | Instruction::I64Shl(_, _) => ShiftOp::Shl.into(), + Instruction::I32ShrS(_, _) | Instruction::I64ShrS(_, _) => ShiftOp::SignedShr.into(), + Instruction::I32ShrU(_, _) | Instruction::I64ShrU(_, _) => ShiftOp::UnsignedShr.into(), + Instruction::I32Rotl(_, _) | Instruction::I64Rotl(_, _) => ShiftOp::Rotl.into(), + Instruction::I32Rotr(_, _) | Instruction::I64Rotr(_, _) => ShiftOp::Rotr.into(), + + Instruction::I32Eq(_, _) | Instruction::I64Eq(_, _) => RelOp::Eq.into(), + Instruction::I32Ne(_, _) | Instruction::I64Ne(_, _) => RelOp::Ne.into(), + Instruction::I32LtS(_, _) | Instruction::I64LtS(_, _) => RelOp::SignedLt.into(), + Instruction::I32LtU(_, _) | Instruction::I64LtU(_, _) => RelOp::UnsignedLt.into(), + Instruction::I32GtS(_, _) | Instruction::I64GtS(_, _) => RelOp::SignedGt.into(), + Instruction::I32GtU(_, _) | Instruction::I64GtU(_, _) => RelOp::UnsignedGt.into(), + Instruction::I32LeS(_, _) | Instruction::I64LeS(_, _) => RelOp::SignedLe.into(), + Instruction::I32LeU(_, _) | Instruction::I64LeU(_, _) => RelOp::UnsignedLe.into(), + Instruction::I32GeS(_, _) | Instruction::I64GeS(_, _) => RelOp::SignedGe.into(), + Instruction::I32GeU(_, _) | Instruction::I64GeU(_, _) => RelOp::UnsignedGe.into(), + + _ => unreachable!(), + } + } +} + +impl<'a> From<&Instruction<'a>> for UnaryOp { + fn from(value: &Instruction<'a>) -> Self { + match value { + Instruction::I32Clz(_) | Instruction::I64Clz(_) => UnaryOp::Clz, + Instruction::I32Ctz(_) | Instruction::I64Ctz(_) => UnaryOp::Ctz, + Instruction::I32Popcnt(_) | Instruction::I64Popcnt(_) => UnaryOp::Popcnt, + _ => unreachable!(), + } + } +} + +impl<'a> From<&Instruction<'a>> for ConversionOp { + fn from(value: &Instruction<'a>) -> Self { + match value { + Instruction::I32WrapI64(_) => ConversionOp::I32WrapI64, + Instruction::I64ExtendSI32(_) => ConversionOp::I64ExtendI32s, + Instruction::I64ExtendUI32(_) => ConversionOp::I64ExtendI32u, + Instruction::I32Extend8S(_) => ConversionOp::I32Extend8S, + Instruction::I32Extend16S(_) => ConversionOp::I32Extend16S, + Instruction::I64Extend8S(_) => ConversionOp::I64Extend8S, + Instruction::I64Extend16S(_) => ConversionOp::I64Extend16S, + Instruction::I64Extend32S(_) => ConversionOp::I64Extend32S, _ => unreachable!(), } } @@ -372,81 +536,81 @@ impl<'a> From> for UnaryOp { #[allow(clippy::upper_case_acronyms)] pub enum InstructionInternal { GetLocal(u32, ValueType), - SetLocal(u32, ValueType), + SetLocal(u32, ValueType, UniArg), TeeLocal(u32, ValueType), Br(Target), - BrIfEqz(Target), - BrIfNez(Target), - BrTable { count: u32 }, + BrIfEqz(Target, UniArg), + BrIfNez(Target, UniArg), + BrTable { count: u32, arg: UniArg }, BrTableTarget(Target), Unreachable, Return(DropKeep), Call(u32), - CallIndirect(u32), + CallIndirect(u32, UniArg), Drop, - Select(ValueType), + Select(ValueType, UniArg, UniArg, UniArg), GetGlobal(u32), - SetGlobal(u32), + SetGlobal(u32, UniArg), - I32Load(u32), - I64Load(u32), + I32Load(u32, UniArg), + I64Load(u32, UniArg), F32Load(u32), F64Load(u32), - I32Load8S(u32), - I32Load8U(u32), - I32Load16S(u32), - I32Load16U(u32), - I64Load8S(u32), - I64Load8U(u32), - I64Load16S(u32), - I64Load16U(u32), - I64Load32S(u32), - I64Load32U(u32), - I32Store(u32), - I64Store(u32), + I32Load8S(u32, UniArg), + I32Load8U(u32, UniArg), + I32Load16S(u32, UniArg), + I32Load16U(u32, UniArg), + I64Load8S(u32, UniArg), + I64Load8U(u32, UniArg), + I64Load16S(u32, UniArg), + I64Load16U(u32, UniArg), + I64Load32S(u32, UniArg), + I64Load32U(u32, UniArg), + I32Store(u32, UniArg, UniArg), + I64Store(u32, UniArg, UniArg), F32Store(u32), F64Store(u32), - I32Store8(u32), - I32Store16(u32), - I64Store8(u32), - I64Store16(u32), - I64Store32(u32), + I32Store8(u32, UniArg, UniArg), + I32Store16(u32, UniArg, UniArg), + I64Store8(u32, UniArg, UniArg), + I64Store16(u32, UniArg, UniArg), + I64Store32(u32, UniArg, UniArg), CurrentMemory, - GrowMemory, + GrowMemory(UniArg), I32Const(i32), I64Const(i64), F32Const(u32), F64Const(u64), - I32Eqz, - I32Eq, - I32Ne, - I32LtS, - I32LtU, - I32GtS, - I32GtU, - I32LeS, - I32LeU, - I32GeS, - I32GeU, - - I64Eqz, - I64Eq, - I64Ne, - I64LtS, - I64LtU, - I64GtS, - I64GtU, - I64LeS, - I64LeU, - I64GeS, - I64GeU, + I32Eqz(UniArg), + I32Eq(UniArg, UniArg), + I32Ne(UniArg, UniArg), + I32LtS(UniArg, UniArg), + I32LtU(UniArg, UniArg), + I32GtS(UniArg, UniArg), + I32GtU(UniArg, UniArg), + I32LeS(UniArg, UniArg), + I32LeU(UniArg, UniArg), + I32GeS(UniArg, UniArg), + I32GeU(UniArg, UniArg), + + I64Eqz(UniArg), + I64Eq(UniArg, UniArg), + I64Ne(UniArg, UniArg), + I64LtS(UniArg, UniArg), + I64LtU(UniArg, UniArg), + I64GtS(UniArg, UniArg), + I64GtU(UniArg, UniArg), + I64LeS(UniArg, UniArg), + I64LeU(UniArg, UniArg), + I64GeS(UniArg, UniArg), + I64GeU(UniArg, UniArg), F32Eq, F32Ne, @@ -462,43 +626,43 @@ pub enum InstructionInternal { F64Le, F64Ge, - I32Clz, - I32Ctz, - I32Popcnt, - I32Add, - I32Sub, - I32Mul, - I32DivS, - I32DivU, - I32RemS, - I32RemU, - I32And, - I32Or, - I32Xor, - I32Shl, - I32ShrS, - I32ShrU, - I32Rotl, - I32Rotr, - - I64Clz, - I64Ctz, - I64Popcnt, - I64Add, - I64Sub, - I64Mul, - I64DivS, - I64DivU, - I64RemS, - I64RemU, - I64And, - I64Or, - I64Xor, - I64Shl, - I64ShrS, - I64ShrU, - I64Rotl, - I64Rotr, + I32Clz(UniArg), + I32Ctz(UniArg), + I32Popcnt(UniArg), + I32Add(UniArg, UniArg), + I32Sub(UniArg, UniArg), + I32Mul(UniArg, UniArg), + I32DivS(UniArg, UniArg), + I32DivU(UniArg, UniArg), + I32RemS(UniArg, UniArg), + I32RemU(UniArg, UniArg), + I32And(UniArg, UniArg), + I32Or(UniArg, UniArg), + I32Xor(UniArg, UniArg), + I32Shl(UniArg, UniArg), + I32ShrS(UniArg, UniArg), + I32ShrU(UniArg, UniArg), + I32Rotl(UniArg, UniArg), + I32Rotr(UniArg, UniArg), + + I64Clz(UniArg), + I64Ctz(UniArg), + I64Popcnt(UniArg), + I64Add(UniArg, UniArg), + I64Sub(UniArg, UniArg), + I64Mul(UniArg, UniArg), + I64DivS(UniArg, UniArg), + I64DivU(UniArg, UniArg), + I64RemS(UniArg, UniArg), + I64RemU(UniArg, UniArg), + I64And(UniArg, UniArg), + I64Or(UniArg, UniArg), + I64Xor(UniArg, UniArg), + I64Shl(UniArg, UniArg), + I64ShrS(UniArg, UniArg), + I64ShrU(UniArg, UniArg), + I64Rotl(UniArg, UniArg), + I64Rotr(UniArg, UniArg), F32Abs, F32Neg, F32Ceil, @@ -528,13 +692,13 @@ pub enum InstructionInternal { F64Max, F64Copysign, - I32WrapI64, + I32WrapI64(UniArg), I32TruncSF32, I32TruncUF32, I32TruncSF64, I32TruncUF64, - I64ExtendSI32, - I64ExtendUI32, + I64ExtendSI32(UniArg), + I64ExtendUI32(UniArg), I64TruncSF32, I64TruncUF32, I64TruncSF64, @@ -555,11 +719,237 @@ pub enum InstructionInternal { F32ReinterpretI32, F64ReinterpretI64, - I32Extend8S, - I32Extend16S, - I64Extend8S, - I64Extend16S, - I64Extend32S, + I32Extend8S(UniArg), + I32Extend16S(UniArg), + I64Extend8S(UniArg), + I64Extend16S(UniArg), + I64Extend32S(UniArg), +} + +impl InstructionInternal { + pub(crate) fn get_uniarg_skip_count(&self) -> usize { + match self { + _ => 0, + } + } + + pub(crate) fn get_uniarg_count(&self) -> usize { + match self { + InstructionInternal::Select(_, _, _, _) => 3, + InstructionInternal::I32Store(_, _, _) + | InstructionInternal::I64Store(_, _, _) + | InstructionInternal::I32Store8(_, _, _) + | InstructionInternal::I32Store16(_, _, _) + | InstructionInternal::I64Store8(_, _, _) + | InstructionInternal::I64Store16(_, _, _) + | InstructionInternal::I64Store32(_, _, _) + | InstructionInternal::I32Eq(_, _) + | InstructionInternal::I32Ne(_, _) + | InstructionInternal::I32LtS(_, _) + | InstructionInternal::I32LtU(_, _) + | InstructionInternal::I32GtS(_, _) + | InstructionInternal::I32GtU(_, _) + | InstructionInternal::I32LeS(_, _) + | InstructionInternal::I32LeU(_, _) + | InstructionInternal::I32GeS(_, _) + | InstructionInternal::I32GeU(_, _) + | InstructionInternal::I64Eq(_, _) + | InstructionInternal::I64Ne(_, _) + | InstructionInternal::I64LtS(_, _) + | InstructionInternal::I64LtU(_, _) + | InstructionInternal::I64GtS(_, _) + | InstructionInternal::I64GtU(_, _) + | InstructionInternal::I64LeS(_, _) + | InstructionInternal::I64LeU(_, _) + | InstructionInternal::I64GeS(_, _) + | InstructionInternal::I64GeU(_, _) + | InstructionInternal::I32Add(_, _) + | InstructionInternal::I32Sub(_, _) + | InstructionInternal::I32Mul(_, _) + | InstructionInternal::I32DivS(_, _) + | InstructionInternal::I32DivU(_, _) + | InstructionInternal::I32RemS(_, _) + | InstructionInternal::I32RemU(_, _) + | InstructionInternal::I32And(_, _) + | InstructionInternal::I32Or(_, _) + | InstructionInternal::I32Xor(_, _) + | InstructionInternal::I32Shl(_, _) + | InstructionInternal::I32ShrS(_, _) + | InstructionInternal::I32ShrU(_, _) + | InstructionInternal::I32Rotl(_, _) + | InstructionInternal::I32Rotr(_, _) + | InstructionInternal::I64Add(_, _) + | InstructionInternal::I64Sub(_, _) + | InstructionInternal::I64Mul(_, _) + | InstructionInternal::I64DivS(_, _) + | InstructionInternal::I64DivU(_, _) + | InstructionInternal::I64RemS(_, _) + | InstructionInternal::I64RemU(_, _) + | InstructionInternal::I64And(_, _) + | InstructionInternal::I64Or(_, _) + | InstructionInternal::I64Xor(_, _) + | InstructionInternal::I64Shl(_, _) + | InstructionInternal::I64ShrS(_, _) + | InstructionInternal::I64ShrU(_, _) + | InstructionInternal::I64Rotl(_, _) + | InstructionInternal::I64Rotr(_, _) => 2, + + InstructionInternal::BrIfEqz(_, _) + | InstructionInternal::BrIfNez(_, _) + | InstructionInternal::BrTable { .. } + | InstructionInternal::CallIndirect(_, _) => 1, + + InstructionInternal::SetLocal(_, _, _) + | InstructionInternal::SetGlobal(_, _) + | InstructionInternal::I32Load(_, _) + | InstructionInternal::I64Load(_, _) + | InstructionInternal::I32Load8S(_, _) + | InstructionInternal::I32Load8U(_, _) + | InstructionInternal::I32Load16S(_, _) + | InstructionInternal::I32Load16U(_, _) + | InstructionInternal::I64Load8S(_, _) + | InstructionInternal::I64Load8U(_, _) + | InstructionInternal::I64Load16S(_, _) + | InstructionInternal::I64Load16U(_, _) + | InstructionInternal::I64Load32S(_, _) + | InstructionInternal::I64Load32U(_, _) + | InstructionInternal::GrowMemory(_) + | InstructionInternal::I32Eqz(_) + | InstructionInternal::I64Eqz(_) + | InstructionInternal::I32Clz(_) + | InstructionInternal::I32Ctz(_) + | InstructionInternal::I32Popcnt(_) + | InstructionInternal::I64Clz(_) + | InstructionInternal::I64Ctz(_) + | InstructionInternal::I64Popcnt(_) + | InstructionInternal::I32WrapI64(_) + | InstructionInternal::I64ExtendSI32(_) + | InstructionInternal::I64ExtendUI32(_) + | InstructionInternal::I32Extend8S(_) + | InstructionInternal::I32Extend16S(_) + | InstructionInternal::I64Extend8S(_) + | InstructionInternal::I64Extend16S(_) + | InstructionInternal::I64Extend32S(_) => 1, + _ => 0, + } + } + + pub(crate) fn update_uniarg(&mut self, uniargs: [Option; 3]) { + if self.get_uniarg_count() == 0 { + return; + } + + match self { + InstructionInternal::Select(_, a, b, c) => { + *c = uniargs[0].unwrap(); + *b = uniargs[1].unwrap(); + *a = uniargs[2].unwrap(); + } + + InstructionInternal::I32Store(_, a, b) + | InstructionInternal::I64Store(_, a, b) + | InstructionInternal::I32Store8(_, a, b) + | InstructionInternal::I32Store16(_, a, b) + | InstructionInternal::I64Store8(_, a, b) + | InstructionInternal::I64Store16(_, a, b) + | InstructionInternal::I64Store32(_, a, b) + | InstructionInternal::I32Eq(a, b) + | InstructionInternal::I32Ne(a, b) + | InstructionInternal::I32LtS(a, b) + | InstructionInternal::I32LtU(a, b) + | InstructionInternal::I32GtS(a, b) + | InstructionInternal::I32GtU(a, b) + | InstructionInternal::I32LeS(a, b) + | InstructionInternal::I32LeU(a, b) + | InstructionInternal::I32GeS(a, b) + | InstructionInternal::I32GeU(a, b) + | InstructionInternal::I64Eq(a, b) + | InstructionInternal::I64Ne(a, b) + | InstructionInternal::I64LtS(a, b) + | InstructionInternal::I64LtU(a, b) + | InstructionInternal::I64GtS(a, b) + | InstructionInternal::I64GtU(a, b) + | InstructionInternal::I64LeS(a, b) + | InstructionInternal::I64LeU(a, b) + | InstructionInternal::I64GeS(a, b) + | InstructionInternal::I64GeU(a, b) + | InstructionInternal::I32Add(a, b) + | InstructionInternal::I32Sub(a, b) + | InstructionInternal::I32Mul(a, b) + | InstructionInternal::I32DivS(a, b) + | InstructionInternal::I32DivU(a, b) + | InstructionInternal::I32RemS(a, b) + | InstructionInternal::I32RemU(a, b) + | InstructionInternal::I32And(a, b) + | InstructionInternal::I32Or(a, b) + | InstructionInternal::I32Xor(a, b) + | InstructionInternal::I32Shl(a, b) + | InstructionInternal::I32ShrS(a, b) + | InstructionInternal::I32ShrU(a, b) + | InstructionInternal::I32Rotl(a, b) + | InstructionInternal::I32Rotr(a, b) + | InstructionInternal::I64Add(a, b) + | InstructionInternal::I64Sub(a, b) + | InstructionInternal::I64Mul(a, b) + | InstructionInternal::I64DivS(a, b) + | InstructionInternal::I64DivU(a, b) + | InstructionInternal::I64RemS(a, b) + | InstructionInternal::I64RemU(a, b) + | InstructionInternal::I64And(a, b) + | InstructionInternal::I64Or(a, b) + | InstructionInternal::I64Xor(a, b) + | InstructionInternal::I64Shl(a, b) + | InstructionInternal::I64ShrS(a, b) + | InstructionInternal::I64ShrU(a, b) + | InstructionInternal::I64Rotl(a, b) + | InstructionInternal::I64Rotr(a, b) => { + *b = uniargs[0].unwrap(); + *a = uniargs[1].unwrap(); + } + + InstructionInternal::BrIfEqz(_, arg) + | InstructionInternal::BrIfNez(_, arg) + | InstructionInternal::BrTable { arg, .. } + | InstructionInternal::CallIndirect(_, arg) => { + *arg = uniargs[0].unwrap(); + } + + InstructionInternal::SetLocal(_, _, arg) + | InstructionInternal::SetGlobal(_, arg) + | InstructionInternal::I32Load(_, arg) + | InstructionInternal::I64Load(_, arg) + | InstructionInternal::I32Load8S(_, arg) + | InstructionInternal::I32Load8U(_, arg) + | InstructionInternal::I32Load16S(_, arg) + | InstructionInternal::I32Load16U(_, arg) + | InstructionInternal::I64Load8S(_, arg) + | InstructionInternal::I64Load8U(_, arg) + | InstructionInternal::I64Load16S(_, arg) + | InstructionInternal::I64Load16U(_, arg) + | InstructionInternal::I64Load32S(_, arg) + | InstructionInternal::I64Load32U(_, arg) + | InstructionInternal::GrowMemory(arg) + | InstructionInternal::I32Eqz(arg) + | InstructionInternal::I64Eqz(arg) + | InstructionInternal::I32Clz(arg) + | InstructionInternal::I32Ctz(arg) + | InstructionInternal::I32Popcnt(arg) + | InstructionInternal::I64Clz(arg) + | InstructionInternal::I64Ctz(arg) + | InstructionInternal::I64Popcnt(arg) + | InstructionInternal::I32WrapI64(arg) + | InstructionInternal::I64ExtendSI32(arg) + | InstructionInternal::I64ExtendUI32(arg) + | InstructionInternal::I32Extend8S(arg) + | InstructionInternal::I32Extend16S(arg) + | InstructionInternal::I64Extend8S(arg) + | InstructionInternal::I64Extend16S(arg) + | InstructionInternal::I64Extend32S(arg) => { + *arg = uniargs[0].unwrap(); + } + _ => {} + } + } } #[derive(Debug, Clone)] @@ -586,8 +976,8 @@ impl Instructions { match reloc { Reloc::Br { pc } => match self.vec[pc as usize] { InstructionInternal::Br(ref mut target) - | InstructionInternal::BrIfEqz(ref mut target) - | InstructionInternal::BrIfNez(ref mut target) => target.dst_pc = dst_pc, + | InstructionInternal::BrIfEqz(ref mut target, _) + | InstructionInternal::BrIfNez(ref mut target, _) => target.dst_pc = dst_pc, _ => panic!("branch relocation points to a non-branch instruction"), }, Reloc::BrTable { pc, idx } => match &mut self.vec[pc as usize + idx + 1] { @@ -626,19 +1016,20 @@ impl<'a> Iterator for InstructionIter<'a> { let out = match *internal { InstructionInternal::GetLocal(x, typ) => Instruction::GetLocal(x, typ), - InstructionInternal::SetLocal(x, typ) => Instruction::SetLocal(x, typ), + InstructionInternal::SetLocal(x, typ, arg) => Instruction::SetLocal(x, typ, arg), InstructionInternal::TeeLocal(x, typ) => Instruction::TeeLocal(x, typ), InstructionInternal::Br(x) => Instruction::Br(x), - InstructionInternal::BrIfEqz(x) => Instruction::BrIfEqz(x), - InstructionInternal::BrIfNez(x) => Instruction::BrIfNez(x), - InstructionInternal::BrTable { count } => { + InstructionInternal::BrIfEqz(x, arg) => Instruction::BrIfEqz(x, arg), + InstructionInternal::BrIfNez(x, arg) => Instruction::BrIfNez(x, arg), + InstructionInternal::BrTable { count, arg } => { let start = self.position as usize + 1; self.position += count; - Instruction::BrTable(BrTargets::from_internal( - &self.instructions[start..start + count as usize], - )) + Instruction::BrTable( + BrTargets::from_internal(&self.instructions[start..start + count as usize]), + arg, + ) } InstructionInternal::BrTableTarget(_) => panic!("Executed BrTableTarget"), @@ -646,69 +1037,77 @@ impl<'a> Iterator for InstructionIter<'a> { InstructionInternal::Return(x) => Instruction::Return(x), InstructionInternal::Call(x) => Instruction::Call(x), - InstructionInternal::CallIndirect(x) => Instruction::CallIndirect(x), + InstructionInternal::CallIndirect(x, arg) => Instruction::CallIndirect(x, arg), InstructionInternal::Drop => Instruction::Drop, - InstructionInternal::Select(vtype) => Instruction::Select(vtype), + InstructionInternal::Select(vtype, lhs, rhs, cond) => { + Instruction::Select(vtype, lhs, rhs, cond) + } InstructionInternal::GetGlobal(x) => Instruction::GetGlobal(x), - InstructionInternal::SetGlobal(x) => Instruction::SetGlobal(x), + InstructionInternal::SetGlobal(x, arg) => Instruction::SetGlobal(x, arg), - InstructionInternal::I32Load(x) => Instruction::I32Load(x), - InstructionInternal::I64Load(x) => Instruction::I64Load(x), + InstructionInternal::I32Load(x, arg) => Instruction::I32Load(x, arg), + InstructionInternal::I64Load(x, arg) => Instruction::I64Load(x, arg), InstructionInternal::F32Load(x) => Instruction::F32Load(x), InstructionInternal::F64Load(x) => Instruction::F64Load(x), - InstructionInternal::I32Load8S(x) => Instruction::I32Load8S(x), - InstructionInternal::I32Load8U(x) => Instruction::I32Load8U(x), - InstructionInternal::I32Load16S(x) => Instruction::I32Load16S(x), - InstructionInternal::I32Load16U(x) => Instruction::I32Load16U(x), - InstructionInternal::I64Load8S(x) => Instruction::I64Load8S(x), - InstructionInternal::I64Load8U(x) => Instruction::I64Load8U(x), - InstructionInternal::I64Load16S(x) => Instruction::I64Load16S(x), - InstructionInternal::I64Load16U(x) => Instruction::I64Load16U(x), - InstructionInternal::I64Load32S(x) => Instruction::I64Load32S(x), - InstructionInternal::I64Load32U(x) => Instruction::I64Load32U(x), - InstructionInternal::I32Store(x) => Instruction::I32Store(x), - InstructionInternal::I64Store(x) => Instruction::I64Store(x), + InstructionInternal::I32Load8S(x, arg) => Instruction::I32Load8S(x, arg), + InstructionInternal::I32Load8U(x, arg) => Instruction::I32Load8U(x, arg), + InstructionInternal::I32Load16S(x, arg) => Instruction::I32Load16S(x, arg), + InstructionInternal::I32Load16U(x, arg) => Instruction::I32Load16U(x, arg), + InstructionInternal::I64Load8S(x, arg) => Instruction::I64Load8S(x, arg), + InstructionInternal::I64Load8U(x, arg) => Instruction::I64Load8U(x, arg), + InstructionInternal::I64Load16S(x, arg) => Instruction::I64Load16S(x, arg), + InstructionInternal::I64Load16U(x, arg) => Instruction::I64Load16U(x, arg), + InstructionInternal::I64Load32S(x, arg) => Instruction::I64Load32S(x, arg), + InstructionInternal::I64Load32U(x, arg) => Instruction::I64Load32U(x, arg), + InstructionInternal::I32Store(x, arg0, arg1) => Instruction::I32Store(x, arg0, arg1), + InstructionInternal::I64Store(x, arg0, arg1) => Instruction::I64Store(x, arg0, arg1), InstructionInternal::F32Store(x) => Instruction::F32Store(x), InstructionInternal::F64Store(x) => Instruction::F64Store(x), - InstructionInternal::I32Store8(x) => Instruction::I32Store8(x), - InstructionInternal::I32Store16(x) => Instruction::I32Store16(x), - InstructionInternal::I64Store8(x) => Instruction::I64Store8(x), - InstructionInternal::I64Store16(x) => Instruction::I64Store16(x), - InstructionInternal::I64Store32(x) => Instruction::I64Store32(x), + InstructionInternal::I32Store8(x, arg0, arg1) => Instruction::I32Store8(x, arg0, arg1), + InstructionInternal::I32Store16(x, arg0, arg1) => { + Instruction::I32Store16(x, arg0, arg1) + } + InstructionInternal::I64Store8(x, arg0, arg1) => Instruction::I64Store8(x, arg0, arg1), + InstructionInternal::I64Store16(x, arg0, arg1) => { + Instruction::I64Store16(x, arg0, arg1) + } + InstructionInternal::I64Store32(x, arg0, arg1) => { + Instruction::I64Store32(x, arg0, arg1) + } InstructionInternal::CurrentMemory => Instruction::CurrentMemory, - InstructionInternal::GrowMemory => Instruction::GrowMemory, + InstructionInternal::GrowMemory(arg) => Instruction::GrowMemory(arg), InstructionInternal::I32Const(x) => Instruction::I32Const(x), InstructionInternal::I64Const(x) => Instruction::I64Const(x), InstructionInternal::F32Const(x) => Instruction::F32Const(x), InstructionInternal::F64Const(x) => Instruction::F64Const(x), - InstructionInternal::I32Eqz => Instruction::I32Eqz, - InstructionInternal::I32Eq => Instruction::I32Eq, - InstructionInternal::I32Ne => Instruction::I32Ne, - InstructionInternal::I32LtS => Instruction::I32LtS, - InstructionInternal::I32LtU => Instruction::I32LtU, - InstructionInternal::I32GtS => Instruction::I32GtS, - InstructionInternal::I32GtU => Instruction::I32GtU, - InstructionInternal::I32LeS => Instruction::I32LeS, - InstructionInternal::I32LeU => Instruction::I32LeU, - InstructionInternal::I32GeS => Instruction::I32GeS, - InstructionInternal::I32GeU => Instruction::I32GeU, - - InstructionInternal::I64Eqz => Instruction::I64Eqz, - InstructionInternal::I64Eq => Instruction::I64Eq, - InstructionInternal::I64Ne => Instruction::I64Ne, - InstructionInternal::I64LtS => Instruction::I64LtS, - InstructionInternal::I64LtU => Instruction::I64LtU, - InstructionInternal::I64GtS => Instruction::I64GtS, - InstructionInternal::I64GtU => Instruction::I64GtU, - InstructionInternal::I64LeS => Instruction::I64LeS, - InstructionInternal::I64LeU => Instruction::I64LeU, - InstructionInternal::I64GeS => Instruction::I64GeS, - InstructionInternal::I64GeU => Instruction::I64GeU, + InstructionInternal::I32Eqz(arg) => Instruction::I32Eqz(arg), + InstructionInternal::I32Eq(arg0, arg1) => Instruction::I32Eq(arg0, arg1), + InstructionInternal::I32Ne(arg0, arg1) => Instruction::I32Ne(arg0, arg1), + InstructionInternal::I32LtS(arg0, arg1) => Instruction::I32LtS(arg0, arg1), + InstructionInternal::I32LtU(arg0, arg1) => Instruction::I32LtU(arg0, arg1), + InstructionInternal::I32GtS(arg0, arg1) => Instruction::I32GtS(arg0, arg1), + InstructionInternal::I32GtU(arg0, arg1) => Instruction::I32GtU(arg0, arg1), + InstructionInternal::I32LeS(arg0, arg1) => Instruction::I32LeS(arg0, arg1), + InstructionInternal::I32LeU(arg0, arg1) => Instruction::I32LeU(arg0, arg1), + InstructionInternal::I32GeS(arg0, arg1) => Instruction::I32GeS(arg0, arg1), + InstructionInternal::I32GeU(arg0, arg1) => Instruction::I32GeU(arg0, arg1), + + InstructionInternal::I64Eqz(arg) => Instruction::I64Eqz(arg), + InstructionInternal::I64Eq(arg0, arg1) => Instruction::I64Eq(arg0, arg1), + InstructionInternal::I64Ne(arg0, arg1) => Instruction::I64Ne(arg0, arg1), + InstructionInternal::I64LtS(arg0, arg1) => Instruction::I64LtS(arg0, arg1), + InstructionInternal::I64LtU(arg0, arg1) => Instruction::I64LtU(arg0, arg1), + InstructionInternal::I64GtS(arg0, arg1) => Instruction::I64GtS(arg0, arg1), + InstructionInternal::I64GtU(arg0, arg1) => Instruction::I64GtU(arg0, arg1), + InstructionInternal::I64LeS(arg0, arg1) => Instruction::I64LeS(arg0, arg1), + InstructionInternal::I64LeU(arg0, arg1) => Instruction::I64LeU(arg0, arg1), + InstructionInternal::I64GeS(arg0, arg1) => Instruction::I64GeS(arg0, arg1), + InstructionInternal::I64GeU(arg0, arg1) => Instruction::I64GeU(arg0, arg1), InstructionInternal::F32Eq => Instruction::F32Eq, InstructionInternal::F32Ne => Instruction::F32Ne, @@ -724,43 +1123,43 @@ impl<'a> Iterator for InstructionIter<'a> { InstructionInternal::F64Le => Instruction::F64Le, InstructionInternal::F64Ge => Instruction::F64Ge, - InstructionInternal::I32Clz => Instruction::I32Clz, - InstructionInternal::I32Ctz => Instruction::I32Ctz, - InstructionInternal::I32Popcnt => Instruction::I32Popcnt, - InstructionInternal::I32Add => Instruction::I32Add, - InstructionInternal::I32Sub => Instruction::I32Sub, - InstructionInternal::I32Mul => Instruction::I32Mul, - InstructionInternal::I32DivS => Instruction::I32DivS, - InstructionInternal::I32DivU => Instruction::I32DivU, - InstructionInternal::I32RemS => Instruction::I32RemS, - InstructionInternal::I32RemU => Instruction::I32RemU, - InstructionInternal::I32And => Instruction::I32And, - InstructionInternal::I32Or => Instruction::I32Or, - InstructionInternal::I32Xor => Instruction::I32Xor, - InstructionInternal::I32Shl => Instruction::I32Shl, - InstructionInternal::I32ShrS => Instruction::I32ShrS, - InstructionInternal::I32ShrU => Instruction::I32ShrU, - InstructionInternal::I32Rotl => Instruction::I32Rotl, - InstructionInternal::I32Rotr => Instruction::I32Rotr, - - InstructionInternal::I64Clz => Instruction::I64Clz, - InstructionInternal::I64Ctz => Instruction::I64Ctz, - InstructionInternal::I64Popcnt => Instruction::I64Popcnt, - InstructionInternal::I64Add => Instruction::I64Add, - InstructionInternal::I64Sub => Instruction::I64Sub, - InstructionInternal::I64Mul => Instruction::I64Mul, - InstructionInternal::I64DivS => Instruction::I64DivS, - InstructionInternal::I64DivU => Instruction::I64DivU, - InstructionInternal::I64RemS => Instruction::I64RemS, - InstructionInternal::I64RemU => Instruction::I64RemU, - InstructionInternal::I64And => Instruction::I64And, - InstructionInternal::I64Or => Instruction::I64Or, - InstructionInternal::I64Xor => Instruction::I64Xor, - InstructionInternal::I64Shl => Instruction::I64Shl, - InstructionInternal::I64ShrS => Instruction::I64ShrS, - InstructionInternal::I64ShrU => Instruction::I64ShrU, - InstructionInternal::I64Rotl => Instruction::I64Rotl, - InstructionInternal::I64Rotr => Instruction::I64Rotr, + InstructionInternal::I32Clz(arg) => Instruction::I32Clz(arg), + InstructionInternal::I32Ctz(arg) => Instruction::I32Ctz(arg), + InstructionInternal::I32Popcnt(arg) => Instruction::I32Popcnt(arg), + InstructionInternal::I32Add(arg0, arg1) => Instruction::I32Add(arg0, arg1), + InstructionInternal::I32Sub(arg0, arg1) => Instruction::I32Sub(arg0, arg1), + InstructionInternal::I32Mul(arg0, arg1) => Instruction::I32Mul(arg0, arg1), + InstructionInternal::I32DivS(arg0, arg1) => Instruction::I32DivS(arg0, arg1), + InstructionInternal::I32DivU(arg0, arg1) => Instruction::I32DivU(arg0, arg1), + InstructionInternal::I32RemS(arg0, arg1) => Instruction::I32RemS(arg0, arg1), + InstructionInternal::I32RemU(arg0, arg1) => Instruction::I32RemU(arg0, arg1), + InstructionInternal::I32And(arg0, arg1) => Instruction::I32And(arg0, arg1), + InstructionInternal::I32Or(arg0, arg1) => Instruction::I32Or(arg0, arg1), + InstructionInternal::I32Xor(arg0, arg1) => Instruction::I32Xor(arg0, arg1), + InstructionInternal::I32Shl(arg0, arg1) => Instruction::I32Shl(arg0, arg1), + InstructionInternal::I32ShrS(arg0, arg1) => Instruction::I32ShrS(arg0, arg1), + InstructionInternal::I32ShrU(arg0, arg1) => Instruction::I32ShrU(arg0, arg1), + InstructionInternal::I32Rotl(arg0, arg1) => Instruction::I32Rotl(arg0, arg1), + InstructionInternal::I32Rotr(arg0, arg1) => Instruction::I32Rotr(arg0, arg1), + + InstructionInternal::I64Clz(arg) => Instruction::I64Clz(arg), + InstructionInternal::I64Ctz(arg) => Instruction::I64Ctz(arg), + InstructionInternal::I64Popcnt(arg) => Instruction::I64Popcnt(arg), + InstructionInternal::I64Add(arg0, arg1) => Instruction::I64Add(arg0, arg1), + InstructionInternal::I64Sub(arg0, arg1) => Instruction::I64Sub(arg0, arg1), + InstructionInternal::I64Mul(arg0, arg1) => Instruction::I64Mul(arg0, arg1), + InstructionInternal::I64DivS(arg0, arg1) => Instruction::I64DivS(arg0, arg1), + InstructionInternal::I64DivU(arg0, arg1) => Instruction::I64DivU(arg0, arg1), + InstructionInternal::I64RemS(arg0, arg1) => Instruction::I64RemS(arg0, arg1), + InstructionInternal::I64RemU(arg0, arg1) => Instruction::I64RemU(arg0, arg1), + InstructionInternal::I64And(arg0, arg1) => Instruction::I64And(arg0, arg1), + InstructionInternal::I64Or(arg0, arg1) => Instruction::I64Or(arg0, arg1), + InstructionInternal::I64Xor(arg0, arg1) => Instruction::I64Xor(arg0, arg1), + InstructionInternal::I64Shl(arg0, arg1) => Instruction::I64Shl(arg0, arg1), + InstructionInternal::I64ShrS(arg0, arg1) => Instruction::I64ShrS(arg0, arg1), + InstructionInternal::I64ShrU(arg0, arg1) => Instruction::I64ShrU(arg0, arg1), + InstructionInternal::I64Rotl(arg0, arg1) => Instruction::I64Rotl(arg0, arg1), + InstructionInternal::I64Rotr(arg0, arg1) => Instruction::I64Rotr(arg0, arg1), InstructionInternal::F32Abs => Instruction::F32Abs, InstructionInternal::F32Neg => Instruction::F32Neg, InstructionInternal::F32Ceil => Instruction::F32Ceil, @@ -790,13 +1189,13 @@ impl<'a> Iterator for InstructionIter<'a> { InstructionInternal::F64Max => Instruction::F64Max, InstructionInternal::F64Copysign => Instruction::F64Copysign, - InstructionInternal::I32WrapI64 => Instruction::I32WrapI64, + InstructionInternal::I32WrapI64(arg) => Instruction::I32WrapI64(arg), InstructionInternal::I32TruncSF32 => Instruction::I32TruncSF32, InstructionInternal::I32TruncUF32 => Instruction::I32TruncUF32, InstructionInternal::I32TruncSF64 => Instruction::I32TruncSF64, InstructionInternal::I32TruncUF64 => Instruction::I32TruncUF64, - InstructionInternal::I64ExtendSI32 => Instruction::I64ExtendSI32, - InstructionInternal::I64ExtendUI32 => Instruction::I64ExtendUI32, + InstructionInternal::I64ExtendSI32(arg) => Instruction::I64ExtendSI32(arg), + InstructionInternal::I64ExtendUI32(arg) => Instruction::I64ExtendUI32(arg), InstructionInternal::I64TruncSF32 => Instruction::I64TruncSF32, InstructionInternal::I64TruncUF32 => Instruction::I64TruncUF32, InstructionInternal::I64TruncSF64 => Instruction::I64TruncSF64, @@ -817,11 +1216,11 @@ impl<'a> Iterator for InstructionIter<'a> { InstructionInternal::F32ReinterpretI32 => Instruction::F32ReinterpretI32, InstructionInternal::F64ReinterpretI64 => Instruction::F64ReinterpretI64, - InstructionInternal::I32Extend8S => Instruction::I32Extend8S, - InstructionInternal::I32Extend16S => Instruction::I32Extend16S, - InstructionInternal::I64Extend8S => Instruction::I64Extend8S, - InstructionInternal::I64Extend16S => Instruction::I64Extend16S, - InstructionInternal::I64Extend32S => Instruction::I64Extend32S, + InstructionInternal::I32Extend8S(arg) => Instruction::I32Extend8S(arg), + InstructionInternal::I32Extend16S(arg) => Instruction::I32Extend16S(arg), + InstructionInternal::I64Extend8S(arg) => Instruction::I64Extend8S(arg), + InstructionInternal::I64Extend16S(arg) => Instruction::I64Extend16S(arg), + InstructionInternal::I64Extend32S(arg) => Instruction::I64Extend32S(arg), }; self.position += 1; diff --git a/src/prepare/compile.rs b/src/prepare/compile.rs index ee34e51e60..03cd55acef 100644 --- a/src/prepare/compile.rs +++ b/src/prepare/compile.rs @@ -1,6 +1,7 @@ use alloc::{string::String, vec::Vec}; use parity_wasm::elements::{BlockType, FuncBody, Instruction, SignExtInstruction, ValueType}; +use specs::{itable::UniArg, types::Value}; use crate::isa::{self, InstructionInternal}; use validation::{ @@ -328,7 +329,7 @@ impl Compiler { CallIndirect(index, _reserved) => { context.step(instruction)?; self.sink - .emit(isa::InstructionInternal::CallIndirect(index)); + .emit(isa::InstructionInternal::CallIndirect(index, UniArg::Pop)); } Drop => { @@ -338,7 +339,12 @@ impl Compiler { Select => { context.step(instruction)?; if let StackValueType::Specific(t) = context.value_stack.top()? { - self.sink.emit(isa::InstructionInternal::Select(*t)); + self.sink.emit(isa::InstructionInternal::Select( + *t, + UniArg::Pop, + UniArg::Pop, + UniArg::Pop, + )); } else { unreachable!() } @@ -350,15 +356,17 @@ impl Compiler { let (depth, typ) = relative_local_depth_type(index, &context.locals, &context.value_stack)?; context.step(instruction)?; - self.sink - .emit(isa::InstructionInternal::GetLocal(depth, typ)); + self.sink.emit_uniarg( + UniArg::Stack(depth as usize), + isa::InstructionInternal::GetLocal(depth, typ), + ); } SetLocal(index) => { context.step(instruction)?; let (depth, typ) = relative_local_depth_type(index, &context.locals, &context.value_stack)?; self.sink - .emit(isa::InstructionInternal::SetLocal(depth, typ)); + .emit(isa::InstructionInternal::SetLocal(depth, typ, UniArg::Pop)); } TeeLocal(index) => { context.step(instruction)?; @@ -373,16 +381,19 @@ impl Compiler { } SetGlobal(index) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::SetGlobal(index)); + self.sink + .emit(isa::InstructionInternal::SetGlobal(index, UniArg::Pop)); } I32Load(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Load(offset)); + self.sink + .emit(isa::InstructionInternal::I32Load(offset, UniArg::Pop)); } I64Load(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Load(offset)); + self.sink + .emit(isa::InstructionInternal::I64Load(offset, UniArg::Pop)); } F32Load(_, offset) => { context.step(instruction)?; @@ -394,52 +405,70 @@ impl Compiler { } I32Load8S(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Load8S(offset)); + self.sink + .emit(isa::InstructionInternal::I32Load8S(offset, UniArg::Pop)); } I32Load8U(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Load8U(offset)); + self.sink + .emit(isa::InstructionInternal::I32Load8U(offset, UniArg::Pop)); } I32Load16S(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Load16S(offset)); + self.sink + .emit(isa::InstructionInternal::I32Load16S(offset, UniArg::Pop)); } I32Load16U(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Load16U(offset)); + self.sink + .emit(isa::InstructionInternal::I32Load16U(offset, UniArg::Pop)); } I64Load8S(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Load8S(offset)); + self.sink + .emit(isa::InstructionInternal::I64Load8S(offset, UniArg::Pop)); } I64Load8U(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Load8U(offset)); + self.sink + .emit(isa::InstructionInternal::I64Load8U(offset, UniArg::Pop)); } I64Load16S(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Load16S(offset)); + self.sink + .emit(isa::InstructionInternal::I64Load16S(offset, UniArg::Pop)); } I64Load16U(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Load16U(offset)); + self.sink + .emit(isa::InstructionInternal::I64Load16U(offset, UniArg::Pop)); } I64Load32S(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Load32S(offset)); + self.sink + .emit(isa::InstructionInternal::I64Load32S(offset, UniArg::Pop)); } I64Load32U(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Load32U(offset)); + self.sink + .emit(isa::InstructionInternal::I64Load32U(offset, UniArg::Pop)); } I32Store(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Store(offset)); + self.sink.emit(isa::InstructionInternal::I32Store( + offset, + UniArg::Pop, + UniArg::Pop, + )); } I64Store(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Store(offset)); + self.sink.emit(isa::InstructionInternal::I64Store( + offset, + UniArg::Pop, + UniArg::Pop, + )); } F32Store(_, offset) => { context.step(instruction)?; @@ -451,23 +480,43 @@ impl Compiler { } I32Store8(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Store8(offset)); + self.sink.emit(isa::InstructionInternal::I32Store8( + offset, + UniArg::Pop, + UniArg::Pop, + )); } I32Store16(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Store16(offset)); + self.sink.emit(isa::InstructionInternal::I32Store16( + offset, + UniArg::Pop, + UniArg::Pop, + )); } I64Store8(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Store8(offset)); + self.sink.emit(isa::InstructionInternal::I64Store8( + offset, + UniArg::Pop, + UniArg::Pop, + )); } I64Store16(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Store16(offset)); + self.sink.emit(isa::InstructionInternal::I64Store16( + offset, + UniArg::Pop, + UniArg::Pop, + )); } I64Store32(_, offset) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Store32(offset)); + self.sink.emit(isa::InstructionInternal::I64Store32( + offset, + UniArg::Pop, + UniArg::Pop, + )); } CurrentMemory(_) => { @@ -476,16 +525,23 @@ impl Compiler { } GrowMemory(_) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::GrowMemory); + self.sink + .emit(isa::InstructionInternal::GrowMemory(UniArg::Pop)); } I32Const(v) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Const(v)); + self.sink.emit_uniarg( + UniArg::IConst(Value::I32(v)), + isa::InstructionInternal::I32Const(v), + ); } I64Const(v) => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Const(v)); + self.sink.emit_uniarg( + UniArg::IConst(Value::I64(v)), + isa::InstructionInternal::I64Const(v), + ); } F32Const(v) => { context.step(instruction)?; @@ -498,92 +554,114 @@ impl Compiler { I32Eqz => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Eqz); + self.sink + .emit(isa::InstructionInternal::I32Eqz(UniArg::Pop)); } I32Eq => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Eq); + self.sink + .emit(isa::InstructionInternal::I32Eq(UniArg::Pop, UniArg::Pop)); } I32Ne => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Ne); + self.sink + .emit(isa::InstructionInternal::I32Ne(UniArg::Pop, UniArg::Pop)); } I32LtS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32LtS); + self.sink + .emit(isa::InstructionInternal::I32LtS(UniArg::Pop, UniArg::Pop)); } I32LtU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32LtU); + self.sink + .emit(isa::InstructionInternal::I32LtU(UniArg::Pop, UniArg::Pop)); } I32GtS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32GtS); + self.sink + .emit(isa::InstructionInternal::I32GtS(UniArg::Pop, UniArg::Pop)); } I32GtU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32GtU); + self.sink + .emit(isa::InstructionInternal::I32GtU(UniArg::Pop, UniArg::Pop)); } I32LeS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32LeS); + self.sink + .emit(isa::InstructionInternal::I32LeS(UniArg::Pop, UniArg::Pop)); } I32LeU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32LeU); + self.sink + .emit(isa::InstructionInternal::I32LeU(UniArg::Pop, UniArg::Pop)); } I32GeS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32GeS); + self.sink + .emit(isa::InstructionInternal::I32GeS(UniArg::Pop, UniArg::Pop)); } I32GeU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32GeU); + self.sink + .emit(isa::InstructionInternal::I32GeU(UniArg::Pop, UniArg::Pop)); } I64Eqz => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Eqz); + self.sink + .emit(isa::InstructionInternal::I64Eqz(UniArg::Pop)); } I64Eq => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Eq); + self.sink + .emit(isa::InstructionInternal::I64Eq(UniArg::Pop, UniArg::Pop)); } I64Ne => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Ne); + self.sink + .emit(isa::InstructionInternal::I64Ne(UniArg::Pop, UniArg::Pop)); } I64LtS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64LtS); + self.sink + .emit(isa::InstructionInternal::I64LtS(UniArg::Pop, UniArg::Pop)); } I64LtU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64LtU); + self.sink + .emit(isa::InstructionInternal::I64LtU(UniArg::Pop, UniArg::Pop)); } I64GtS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64GtS); + self.sink + .emit(isa::InstructionInternal::I64GtS(UniArg::Pop, UniArg::Pop)); } I64GtU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64GtU); + self.sink + .emit(isa::InstructionInternal::I64GtU(UniArg::Pop, UniArg::Pop)); } I64LeS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64LeS); + self.sink + .emit(isa::InstructionInternal::I64LeS(UniArg::Pop, UniArg::Pop)); } I64LeU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64LeU); + self.sink + .emit(isa::InstructionInternal::I64LeU(UniArg::Pop, UniArg::Pop)); } I64GeS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64GeS); + self.sink + .emit(isa::InstructionInternal::I64GeS(UniArg::Pop, UniArg::Pop)); } I64GeU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64GeU); + self.sink + .emit(isa::InstructionInternal::I64GeU(UniArg::Pop, UniArg::Pop)); } F32Eq => { @@ -638,148 +716,184 @@ impl Compiler { I32Clz => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Clz); + self.sink + .emit(isa::InstructionInternal::I32Clz(UniArg::Pop)); } I32Ctz => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Ctz); + self.sink + .emit(isa::InstructionInternal::I32Ctz(UniArg::Pop)); } I32Popcnt => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Popcnt); + self.sink + .emit(isa::InstructionInternal::I32Popcnt(UniArg::Pop)); } I32Add => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Add); + self.sink + .emit(isa::InstructionInternal::I32Add(UniArg::Pop, UniArg::Pop)); } I32Sub => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Sub); + self.sink + .emit(isa::InstructionInternal::I32Sub(UniArg::Pop, UniArg::Pop)); } I32Mul => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Mul); + self.sink + .emit(isa::InstructionInternal::I32Mul(UniArg::Pop, UniArg::Pop)); } I32DivS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32DivS); + self.sink + .emit(isa::InstructionInternal::I32DivS(UniArg::Pop, UniArg::Pop)); } I32DivU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32DivU); + self.sink + .emit(isa::InstructionInternal::I32DivU(UniArg::Pop, UniArg::Pop)); } I32RemS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32RemS); + self.sink + .emit(isa::InstructionInternal::I32RemS(UniArg::Pop, UniArg::Pop)); } I32RemU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32RemU); + self.sink + .emit(isa::InstructionInternal::I32RemU(UniArg::Pop, UniArg::Pop)); } I32And => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32And); + self.sink + .emit(isa::InstructionInternal::I32And(UniArg::Pop, UniArg::Pop)); } I32Or => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Or); + self.sink + .emit(isa::InstructionInternal::I32Or(UniArg::Pop, UniArg::Pop)); } I32Xor => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Xor); + self.sink + .emit(isa::InstructionInternal::I32Xor(UniArg::Pop, UniArg::Pop)); } I32Shl => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Shl); + self.sink + .emit(isa::InstructionInternal::I32Shl(UniArg::Pop, UniArg::Pop)); } I32ShrS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32ShrS); + self.sink + .emit(isa::InstructionInternal::I32ShrS(UniArg::Pop, UniArg::Pop)); } I32ShrU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32ShrU); + self.sink + .emit(isa::InstructionInternal::I32ShrU(UniArg::Pop, UniArg::Pop)); } I32Rotl => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Rotl); + self.sink + .emit(isa::InstructionInternal::I32Rotl(UniArg::Pop, UniArg::Pop)); } I32Rotr => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32Rotr); + self.sink + .emit(isa::InstructionInternal::I32Rotr(UniArg::Pop, UniArg::Pop)); } I64Clz => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Clz); + self.sink + .emit(isa::InstructionInternal::I64Clz(UniArg::Pop)); } I64Ctz => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Ctz); + self.sink + .emit(isa::InstructionInternal::I64Ctz(UniArg::Pop)); } I64Popcnt => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Popcnt); + self.sink + .emit(isa::InstructionInternal::I64Popcnt(UniArg::Pop)); } I64Add => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Add); + self.sink + .emit(isa::InstructionInternal::I64Add(UniArg::Pop, UniArg::Pop)); } I64Sub => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Sub); + self.sink + .emit(isa::InstructionInternal::I64Sub(UniArg::Pop, UniArg::Pop)); } I64Mul => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Mul); + self.sink + .emit(isa::InstructionInternal::I64Mul(UniArg::Pop, UniArg::Pop)); } I64DivS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64DivS); + self.sink + .emit(isa::InstructionInternal::I64DivS(UniArg::Pop, UniArg::Pop)); } I64DivU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64DivU); + self.sink + .emit(isa::InstructionInternal::I64DivU(UniArg::Pop, UniArg::Pop)); } I64RemS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64RemS); + self.sink + .emit(isa::InstructionInternal::I64RemS(UniArg::Pop, UniArg::Pop)); } I64RemU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64RemU); + self.sink + .emit(isa::InstructionInternal::I64RemU(UniArg::Pop, UniArg::Pop)); } I64And => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64And); + self.sink + .emit(isa::InstructionInternal::I64And(UniArg::Pop, UniArg::Pop)); } I64Or => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Or); + self.sink + .emit(isa::InstructionInternal::I64Or(UniArg::Pop, UniArg::Pop)); } I64Xor => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Xor); + self.sink + .emit(isa::InstructionInternal::I64Xor(UniArg::Pop, UniArg::Pop)); } I64Shl => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Shl); + self.sink + .emit(isa::InstructionInternal::I64Shl(UniArg::Pop, UniArg::Pop)); } I64ShrS => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64ShrS); + self.sink + .emit(isa::InstructionInternal::I64ShrS(UniArg::Pop, UniArg::Pop)); } I64ShrU => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64ShrU); + self.sink + .emit(isa::InstructionInternal::I64ShrU(UniArg::Pop, UniArg::Pop)); } I64Rotl => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Rotl); + self.sink + .emit(isa::InstructionInternal::I64Rotl(UniArg::Pop, UniArg::Pop)); } I64Rotr => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64Rotr); + self.sink + .emit(isa::InstructionInternal::I64Rotr(UniArg::Pop, UniArg::Pop)); } F32Abs => { @@ -898,7 +1012,8 @@ impl Compiler { I32WrapI64 => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I32WrapI64); + self.sink + .emit(isa::InstructionInternal::I32WrapI64(UniArg::Pop)); } I32TruncSF32 => { context.step(instruction)?; @@ -918,11 +1033,13 @@ impl Compiler { } I64ExtendSI32 => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64ExtendSI32); + self.sink + .emit(isa::InstructionInternal::I64ExtendSI32(UniArg::Pop)); } I64ExtendUI32 => { context.step(instruction)?; - self.sink.emit(isa::InstructionInternal::I64ExtendUI32); + self.sink + .emit(isa::InstructionInternal::I64ExtendUI32(UniArg::Pop)); } I64TruncSF32 => { context.step(instruction)?; @@ -999,19 +1116,24 @@ impl Compiler { } SignExt(ref ext) => match ext { SignExtInstruction::I32Extend8S => { - self.sink.emit(isa::InstructionInternal::I32Extend8S); + self.sink + .emit(isa::InstructionInternal::I32Extend8S(UniArg::Pop)); } SignExtInstruction::I32Extend16S => { - self.sink.emit(isa::InstructionInternal::I32Extend16S); + self.sink + .emit(isa::InstructionInternal::I32Extend16S(UniArg::Pop)); } SignExtInstruction::I64Extend8S => { - self.sink.emit(isa::InstructionInternal::I64Extend8S); + self.sink + .emit(isa::InstructionInternal::I64Extend8S(UniArg::Pop)); } SignExtInstruction::I64Extend16S => { - self.sink.emit(isa::InstructionInternal::I64Extend16S); + self.sink + .emit(isa::InstructionInternal::I64Extend16S(UniArg::Pop)); } SignExtInstruction::I64Extend32S => { - self.sink.emit(isa::InstructionInternal::I64Extend32S); + self.sink + .emit(isa::InstructionInternal::I64Extend32S(UniArg::Pop)); } }, _ => { @@ -1208,6 +1330,9 @@ enum Label { struct Sink { ins: isa::Instructions, labels: Vec<(Label, Vec)>, + + /// For UniArg optimizer + uncommitted_uniarg: Vec<(UniArg, isa::InstructionInternal)>, } impl Sink { @@ -1215,6 +1340,7 @@ impl Sink { Sink { ins: isa::Instructions::with_capacity(capacity), labels: Vec::new(), + uncommitted_uniarg: vec![], } } @@ -1236,11 +1362,57 @@ impl Sink { } } - fn emit(&mut self, instruction: isa::InstructionInternal) { + fn commit_uniarg(&mut self) { + let mut const_ins = vec![]; + std::mem::swap(&mut const_ins, &mut self.uncommitted_uniarg); + for ins in const_ins { + self.ins.push(ins.1) + } + } + + fn emit_uniarg(&mut self, uniarg: UniArg, instruction: isa::InstructionInternal) { + self.uncommitted_uniarg.push((uniarg, instruction)); + } + + fn emit(&mut self, mut instruction: isa::InstructionInternal) { + if !self.uncommitted_uniarg.is_empty() { + let mut args = [None; 3]; + + for _ in 0..instruction.get_uniarg_skip_count() { + self.ins.push(self.uncommitted_uniarg.pop().unwrap().1) + } + + for i in 0..instruction.get_uniarg_count() { + args[i] = self.uncommitted_uniarg.pop().map(|x| x.0); + if args[i].is_some() { + for j in 0..i { + args[j] + .iter_mut() + .next() + .map(|x| x.try_decrease_stack_depth(1)); + } + } else { + args[i] = Some(UniArg::Pop) + } + } + + instruction.update_uniarg(args); + + /* + * pending instructions + * if !self.uncommitted_uniarg.is_empty() { + * println!("this instruction is {:?}", instruction) + * } + */ + self.commit_uniarg(); + } + self.ins.push(instruction); } fn emit_br(&mut self, target: Target) { + self.commit_uniarg(); + let Target { label, drop_keep } = target; let pc = self.cur_pc(); let dst_pc = self.pc_or_placeholder(label, || isa::Reloc::Br { pc }); @@ -1251,34 +1423,39 @@ impl Sink { } fn emit_br_eqz(&mut self, target: Target) { + self.commit_uniarg(); + let Target { label, drop_keep } = target; let pc = self.cur_pc(); let dst_pc = self.pc_or_placeholder(label, || isa::Reloc::Br { pc }); - self.ins - .push(isa::InstructionInternal::BrIfEqz(isa::Target { - dst_pc, - drop_keep, - })); + self.ins.push(isa::InstructionInternal::BrIfEqz( + isa::Target { dst_pc, drop_keep }, + UniArg::Pop, + )); } fn emit_br_nez(&mut self, target: Target) { + self.commit_uniarg(); + let Target { label, drop_keep } = target; let pc = self.cur_pc(); let dst_pc = self.pc_or_placeholder(label, || isa::Reloc::Br { pc }); - self.ins - .push(isa::InstructionInternal::BrIfNez(isa::Target { - dst_pc, - drop_keep, - })); + self.ins.push(isa::InstructionInternal::BrIfNez( + isa::Target { dst_pc, drop_keep }, + UniArg::Pop, + )); } fn emit_br_table(&mut self, targets: &[Target], default: Target) { + self.commit_uniarg(); + use core::iter; let pc = self.cur_pc(); self.ins.push(isa::InstructionInternal::BrTable { count: targets.len() as u32 + 1, + arg: UniArg::Pop, }); for (idx, &Target { label, drop_keep }) in @@ -1295,6 +1472,8 @@ impl Sink { /// Create a new unresolved label. fn new_label(&mut self) -> LabelId { + self.commit_uniarg(); + let label_idx = self.labels.len(); self.labels.push((Label::NotResolved, Vec::new())); LabelId(label_idx) @@ -1304,6 +1483,8 @@ impl Sink { /// /// Panics if the label is already resolved. fn resolve_label(&mut self, label: LabelId) { + self.commit_uniarg(); + use core::mem; if let (Label::Resolved(_), _) = self.labels[label.0] { diff --git a/src/runner.rs b/src/runner.rs index a888b1d90b..57d836f324 100644 --- a/src/runner.rs +++ b/src/runner.rs @@ -28,7 +28,7 @@ use crate::{ use alloc::{boxed::Box, vec::Vec}; use core::{fmt, ops, u32, usize}; use parity_wasm::elements::Local; -use specs::mtable::VarType; +use specs::{itable::UniArg, mtable::VarType, types::Value}; use validation::{DEFAULT_MEMORY_INDEX, DEFAULT_TABLE_INDEX}; /// Maximum number of bytes on the value stack. @@ -65,6 +65,15 @@ impl ValueInternal { } } +impl From<&Value> for ValueInternal { + fn from(value: &Value) -> Self { + match value { + Value::I32(val) => (*val).into(), + Value::I64(val) => (*val).into(), + } + } +} + pub trait FromValueInternal where Self: Sized, @@ -450,220 +459,236 @@ impl<'m> Interpreter<'m> { isa::Instruction::Unreachable => self.run_unreachable(context), isa::Instruction::Br(target) => self.run_br(context, *target), - isa::Instruction::BrIfEqz(target) => self.run_br_eqz(*target), - isa::Instruction::BrIfNez(target) => self.run_br_nez(*target), - isa::Instruction::BrTable(targets) => self.run_br_table(*targets), + isa::Instruction::BrIfEqz(target, a) => self.run_br_eqz(*target, a), + isa::Instruction::BrIfNez(target, a) => self.run_br_nez(*target, a), + isa::Instruction::BrTable(targets, a) => self.run_br_table(*targets, a), isa::Instruction::Return(drop_keep, ..) => self.run_return(*drop_keep), isa::Instruction::Call(index) => self.run_call(context, *index), - isa::Instruction::CallIndirect(index) => self.run_call_indirect(context, *index), + isa::Instruction::CallIndirect(index, a) => self.run_call_indirect(context, *index, a), isa::Instruction::Drop => self.run_drop(), - isa::Instruction::Select(_) => self.run_select(), + isa::Instruction::Select(_, lhs, rhs, cond) => self.run_select(lhs, rhs, cond), isa::Instruction::GetLocal(depth, ..) => self.run_get_local(*depth), - isa::Instruction::SetLocal(depth, ..) => self.run_set_local(*depth), + isa::Instruction::SetLocal(depth, _, arg0) => self.run_set_local(*depth, arg0), isa::Instruction::TeeLocal(depth, ..) => self.run_tee_local(*depth), isa::Instruction::GetGlobal(index) => self.run_get_global(context, *index), - isa::Instruction::SetGlobal(index) => self.run_set_global(context, *index), - - isa::Instruction::I32Load(offset) => self.run_load::(context, *offset), - isa::Instruction::I64Load(offset) => self.run_load::(context, *offset), - isa::Instruction::F32Load(offset) => self.run_load::(context, *offset), - isa::Instruction::F64Load(offset) => self.run_load::(context, *offset), - isa::Instruction::I32Load8S(offset) => { - self.run_load_extend::(context, *offset) + isa::Instruction::SetGlobal(index, a) => self.run_set_global(context, *index, a), + + isa::Instruction::I32Load(offset, a) => self.run_load::(context, *offset, a), + isa::Instruction::I64Load(offset, a) => self.run_load::(context, *offset, a), + isa::Instruction::F32Load(offset) => { + self.run_load::(context, *offset, &UniArg::Pop) } - isa::Instruction::I32Load8U(offset) => { - self.run_load_extend::(context, *offset) + isa::Instruction::F64Load(offset) => { + self.run_load::(context, *offset, &UniArg::Pop) } - isa::Instruction::I32Load16S(offset) => { - self.run_load_extend::(context, *offset) + isa::Instruction::I32Load8S(offset, a) => { + self.run_load_extend::(context, *offset, a) } - isa::Instruction::I32Load16U(offset) => { - self.run_load_extend::(context, *offset) + isa::Instruction::I32Load8U(offset, a) => { + self.run_load_extend::(context, *offset, a) } - isa::Instruction::I64Load8S(offset) => { - self.run_load_extend::(context, *offset) + isa::Instruction::I32Load16S(offset, a) => { + self.run_load_extend::(context, *offset, a) } - isa::Instruction::I64Load8U(offset) => { - self.run_load_extend::(context, *offset) + isa::Instruction::I32Load16U(offset, a) => { + self.run_load_extend::(context, *offset, a) } - isa::Instruction::I64Load16S(offset) => { - self.run_load_extend::(context, *offset) + isa::Instruction::I64Load8S(offset, a) => { + self.run_load_extend::(context, *offset, a) } - isa::Instruction::I64Load16U(offset) => { - self.run_load_extend::(context, *offset) + isa::Instruction::I64Load8U(offset, a) => { + self.run_load_extend::(context, *offset, a) } - isa::Instruction::I64Load32S(offset) => { - self.run_load_extend::(context, *offset) + isa::Instruction::I64Load16S(offset, a) => { + self.run_load_extend::(context, *offset, a) } - isa::Instruction::I64Load32U(offset) => { - self.run_load_extend::(context, *offset) + isa::Instruction::I64Load16U(offset, a) => { + self.run_load_extend::(context, *offset, a) + } + isa::Instruction::I64Load32S(offset, a) => { + self.run_load_extend::(context, *offset, a) + } + isa::Instruction::I64Load32U(offset, a) => { + self.run_load_extend::(context, *offset, a) } - isa::Instruction::I32Store(offset) => self.run_store::(context, *offset), - isa::Instruction::I64Store(offset) => self.run_store::(context, *offset), - isa::Instruction::F32Store(offset) => self.run_store::(context, *offset), - isa::Instruction::F64Store(offset) => self.run_store::(context, *offset), - isa::Instruction::I32Store8(offset) => self.run_store_wrap::(context, *offset), - isa::Instruction::I32Store16(offset) => { - self.run_store_wrap::(context, *offset) + isa::Instruction::I32Store(offset, a0, a1) => { + self.run_store::(context, *offset, a0, a1) + } + isa::Instruction::I64Store(offset, a0, a1) => { + self.run_store::(context, *offset, a0, a1) + } + isa::Instruction::F32Store(offset) => { + self.run_store::(context, *offset, &UniArg::Pop, &UniArg::Pop) + } + isa::Instruction::F64Store(offset) => { + self.run_store::(context, *offset, &UniArg::Pop, &UniArg::Pop) } - isa::Instruction::I64Store8(offset) => self.run_store_wrap::(context, *offset), - isa::Instruction::I64Store16(offset) => { - self.run_store_wrap::(context, *offset) + isa::Instruction::I32Store8(offset, a0, a1) => { + self.run_store_wrap::(context, *offset, a0, a1) } - isa::Instruction::I64Store32(offset) => { - self.run_store_wrap::(context, *offset) + isa::Instruction::I32Store16(offset, a0, a1) => { + self.run_store_wrap::(context, *offset, a0, a1) + } + isa::Instruction::I64Store8(offset, a0, a1) => { + self.run_store_wrap::(context, *offset, a0, a1) + } + isa::Instruction::I64Store16(offset, a0, a1) => { + self.run_store_wrap::(context, *offset, a0, a1) + } + isa::Instruction::I64Store32(offset, a0, a1) => { + self.run_store_wrap::(context, *offset, a0, a1) } isa::Instruction::CurrentMemory => self.run_current_memory(context), - isa::Instruction::GrowMemory => self.run_grow_memory(context), + isa::Instruction::GrowMemory(a) => self.run_grow_memory(context, a), isa::Instruction::I32Const(val) => self.run_const((*val).into()), isa::Instruction::I64Const(val) => self.run_const((*val).into()), isa::Instruction::F32Const(val) => self.run_const((*val).into()), isa::Instruction::F64Const(val) => self.run_const((*val).into()), - isa::Instruction::I32Eqz => self.run_eqz::(), - isa::Instruction::I32Eq => self.run_eq::(), - isa::Instruction::I32Ne => self.run_ne::(), - isa::Instruction::I32LtS => self.run_lt::(), - isa::Instruction::I32LtU => self.run_lt::(), - isa::Instruction::I32GtS => self.run_gt::(), - isa::Instruction::I32GtU => self.run_gt::(), - isa::Instruction::I32LeS => self.run_lte::(), - isa::Instruction::I32LeU => self.run_lte::(), - isa::Instruction::I32GeS => self.run_gte::(), - isa::Instruction::I32GeU => self.run_gte::(), - - isa::Instruction::I64Eqz => self.run_eqz::(), - isa::Instruction::I64Eq => self.run_eq::(), - isa::Instruction::I64Ne => self.run_ne::(), - isa::Instruction::I64LtS => self.run_lt::(), - isa::Instruction::I64LtU => self.run_lt::(), - isa::Instruction::I64GtS => self.run_gt::(), - isa::Instruction::I64GtU => self.run_gt::(), - isa::Instruction::I64LeS => self.run_lte::(), - isa::Instruction::I64LeU => self.run_lte::(), - isa::Instruction::I64GeS => self.run_gte::(), - isa::Instruction::I64GeU => self.run_gte::(), - - isa::Instruction::F32Eq => self.run_eq::(), - isa::Instruction::F32Ne => self.run_ne::(), - isa::Instruction::F32Lt => self.run_lt::(), - isa::Instruction::F32Gt => self.run_gt::(), - isa::Instruction::F32Le => self.run_lte::(), - isa::Instruction::F32Ge => self.run_gte::(), - - isa::Instruction::F64Eq => self.run_eq::(), - isa::Instruction::F64Ne => self.run_ne::(), - isa::Instruction::F64Lt => self.run_lt::(), - isa::Instruction::F64Gt => self.run_gt::(), - isa::Instruction::F64Le => self.run_lte::(), - isa::Instruction::F64Ge => self.run_gte::(), - - isa::Instruction::I32Clz => self.run_clz::(), - isa::Instruction::I32Ctz => self.run_ctz::(), - isa::Instruction::I32Popcnt => self.run_popcnt::(), - isa::Instruction::I32Add => self.run_add::(), - isa::Instruction::I32Sub => self.run_sub::(), - isa::Instruction::I32Mul => self.run_mul::(), - isa::Instruction::I32DivS => self.run_div::(), - isa::Instruction::I32DivU => self.run_div::(), - isa::Instruction::I32RemS => self.run_rem::(), - isa::Instruction::I32RemU => self.run_rem::(), - isa::Instruction::I32And => self.run_and::(), - isa::Instruction::I32Or => self.run_or::(), - isa::Instruction::I32Xor => self.run_xor::(), - isa::Instruction::I32Shl => self.run_shl::(0x1F), - isa::Instruction::I32ShrS => self.run_shr::(0x1F), - isa::Instruction::I32ShrU => self.run_shr::(0x1F), - isa::Instruction::I32Rotl => self.run_rotl::(), - isa::Instruction::I32Rotr => self.run_rotr::(), - - isa::Instruction::I64Clz => self.run_clz::(), - isa::Instruction::I64Ctz => self.run_ctz::(), - isa::Instruction::I64Popcnt => self.run_popcnt::(), - isa::Instruction::I64Add => self.run_add::(), - isa::Instruction::I64Sub => self.run_sub::(), - isa::Instruction::I64Mul => self.run_mul::(), - isa::Instruction::I64DivS => self.run_div::(), - isa::Instruction::I64DivU => self.run_div::(), - isa::Instruction::I64RemS => self.run_rem::(), - isa::Instruction::I64RemU => self.run_rem::(), - isa::Instruction::I64And => self.run_and::(), - isa::Instruction::I64Or => self.run_or::(), - isa::Instruction::I64Xor => self.run_xor::(), - isa::Instruction::I64Shl => self.run_shl::(0x3F), - isa::Instruction::I64ShrS => self.run_shr::(0x3F), - isa::Instruction::I64ShrU => self.run_shr::(0x3F), - isa::Instruction::I64Rotl => self.run_rotl::(), - isa::Instruction::I64Rotr => self.run_rotr::(), - - isa::Instruction::F32Abs => self.run_abs::(), - isa::Instruction::F32Neg => self.run_neg::(), - isa::Instruction::F32Ceil => self.run_ceil::(), - isa::Instruction::F32Floor => self.run_floor::(), - isa::Instruction::F32Trunc => self.run_trunc::(), - isa::Instruction::F32Nearest => self.run_nearest::(), - isa::Instruction::F32Sqrt => self.run_sqrt::(), - isa::Instruction::F32Add => self.run_add::(), - isa::Instruction::F32Sub => self.run_sub::(), - isa::Instruction::F32Mul => self.run_mul::(), - isa::Instruction::F32Div => self.run_div::(), - isa::Instruction::F32Min => self.run_min::(), - isa::Instruction::F32Max => self.run_max::(), - isa::Instruction::F32Copysign => self.run_copysign::(), - - isa::Instruction::F64Abs => self.run_abs::(), - isa::Instruction::F64Neg => self.run_neg::(), - isa::Instruction::F64Ceil => self.run_ceil::(), - isa::Instruction::F64Floor => self.run_floor::(), - isa::Instruction::F64Trunc => self.run_trunc::(), - isa::Instruction::F64Nearest => self.run_nearest::(), - isa::Instruction::F64Sqrt => self.run_sqrt::(), - isa::Instruction::F64Add => self.run_add::(), - isa::Instruction::F64Sub => self.run_sub::(), - isa::Instruction::F64Mul => self.run_mul::(), - isa::Instruction::F64Div => self.run_div::(), - isa::Instruction::F64Min => self.run_min::(), - isa::Instruction::F64Max => self.run_max::(), - isa::Instruction::F64Copysign => self.run_copysign::(), - - isa::Instruction::I32WrapI64 => self.run_wrap::(), - isa::Instruction::I32TruncSF32 => self.run_trunc_to_int::(), - isa::Instruction::I32TruncUF32 => self.run_trunc_to_int::(), - isa::Instruction::I32TruncSF64 => self.run_trunc_to_int::(), - isa::Instruction::I32TruncUF64 => self.run_trunc_to_int::(), - isa::Instruction::I64ExtendSI32 => self.run_extend::(), - isa::Instruction::I64ExtendUI32 => self.run_extend::(), - isa::Instruction::I64TruncSF32 => self.run_trunc_to_int::(), - isa::Instruction::I64TruncUF32 => self.run_trunc_to_int::(), - isa::Instruction::I64TruncSF64 => self.run_trunc_to_int::(), - isa::Instruction::I64TruncUF64 => self.run_trunc_to_int::(), - isa::Instruction::F32ConvertSI32 => self.run_extend::(), - isa::Instruction::F32ConvertUI32 => self.run_extend::(), - isa::Instruction::F32ConvertSI64 => self.run_wrap::(), - isa::Instruction::F32ConvertUI64 => self.run_wrap::(), - isa::Instruction::F32DemoteF64 => self.run_wrap::(), - isa::Instruction::F64ConvertSI32 => self.run_extend::(), - isa::Instruction::F64ConvertUI32 => self.run_extend::(), - isa::Instruction::F64ConvertSI64 => self.run_extend::(), - isa::Instruction::F64ConvertUI64 => self.run_extend::(), - isa::Instruction::F64PromoteF32 => self.run_extend::(), - - isa::Instruction::I32ReinterpretF32 => self.run_reinterpret::(), - isa::Instruction::I64ReinterpretF64 => self.run_reinterpret::(), - isa::Instruction::F32ReinterpretI32 => self.run_reinterpret::(), - isa::Instruction::F64ReinterpretI64 => self.run_reinterpret::(), - - isa::Instruction::I32Extend8S => self.run_extend::(), - isa::Instruction::I32Extend16S => self.run_extend::(), - isa::Instruction::I64Extend8S => self.run_extend::(), - isa::Instruction::I64Extend16S => self.run_extend::(), - isa::Instruction::I64Extend32S => self.run_extend::(), + isa::Instruction::I32Eqz(a) => self.run_eqz::(a), + isa::Instruction::I32Eq(a0, a1) => self.run_eq::(a0, a1), + isa::Instruction::I32Ne(a0, a1) => self.run_ne::(a0, a1), + isa::Instruction::I32LtS(a0, a1) => self.run_lt::(a0, a1), + isa::Instruction::I32LtU(a0, a1) => self.run_lt::(a0, a1), + isa::Instruction::I32GtS(a0, a1) => self.run_gt::(a0, a1), + isa::Instruction::I32GtU(a0, a1) => self.run_gt::(a0, a1), + isa::Instruction::I32LeS(a0, a1) => self.run_lte::(a0, a1), + isa::Instruction::I32LeU(a0, a1) => self.run_lte::(a0, a1), + isa::Instruction::I32GeS(a0, a1) => self.run_gte::(a0, a1), + isa::Instruction::I32GeU(a0, a1) => self.run_gte::(a0, a1), + + isa::Instruction::I64Eqz(a) => self.run_eqz::(a), + isa::Instruction::I64Eq(a0, a1) => self.run_eq::(a0, a1), + isa::Instruction::I64Ne(a0, a1) => self.run_ne::(a0, a1), + isa::Instruction::I64LtS(a0, a1) => self.run_lt::(a0, a1), + isa::Instruction::I64LtU(a0, a1) => self.run_lt::(a0, a1), + isa::Instruction::I64GtS(a0, a1) => self.run_gt::(a0, a1), + isa::Instruction::I64GtU(a0, a1) => self.run_gt::(a0, a1), + isa::Instruction::I64LeS(a0, a1) => self.run_lte::(a0, a1), + isa::Instruction::I64LeU(a0, a1) => self.run_lte::(a0, a1), + isa::Instruction::I64GeS(a0, a1) => self.run_gte::(a0, a1), + isa::Instruction::I64GeU(a0, a1) => self.run_gte::(a0, a1), + + isa::Instruction::F32Eq => self.run_eq::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Ne => self.run_ne::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Lt => self.run_lt::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Gt => self.run_gt::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Le => self.run_lte::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Ge => self.run_gte::(&UniArg::Pop, &UniArg::Pop), + + isa::Instruction::F64Eq => self.run_eq::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Ne => self.run_ne::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Lt => self.run_lt::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Gt => self.run_gt::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Le => self.run_lte::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Ge => self.run_gte::(&UniArg::Pop, &UniArg::Pop), + + isa::Instruction::I32Clz(a) => self.run_clz::(a), + isa::Instruction::I32Ctz(a) => self.run_ctz::(a), + isa::Instruction::I32Popcnt(a) => self.run_popcnt::(a), + isa::Instruction::I32Add(a0, a1) => self.run_add::(a0, a1), + isa::Instruction::I32Sub(a0, a1) => self.run_sub::(a0, a1), + isa::Instruction::I32Mul(a0, a1) => self.run_mul::(a0, a1), + isa::Instruction::I32DivS(a0, a1) => self.run_div::(a0, a1), + isa::Instruction::I32DivU(a0, a1) => self.run_div::(a0, a1), + isa::Instruction::I32RemS(a0, a1) => self.run_rem::(a0, a1), + isa::Instruction::I32RemU(a0, a1) => self.run_rem::(a0, a1), + isa::Instruction::I32And(a0, a1) => self.run_and::(a0, a1), + isa::Instruction::I32Or(a0, a1) => self.run_or::(a0, a1), + isa::Instruction::I32Xor(a0, a1) => self.run_xor::(a0, a1), + isa::Instruction::I32Shl(a0, a1) => self.run_shl::(0x1F, a0, a1), + isa::Instruction::I32ShrS(a0, a1) => self.run_shr::(0x1F, a0, a1), + isa::Instruction::I32ShrU(a0, a1) => self.run_shr::(0x1F, a0, a1), + isa::Instruction::I32Rotl(a0, a1) => self.run_rotl::(a0, a1), + isa::Instruction::I32Rotr(a0, a1) => self.run_rotr::(a0, a1), + + isa::Instruction::I64Clz(a) => self.run_clz::(a), + isa::Instruction::I64Ctz(a) => self.run_ctz::(a), + isa::Instruction::I64Popcnt(a) => self.run_popcnt::(a), + isa::Instruction::I64Add(a0, a1) => self.run_add::(a0, a1), + isa::Instruction::I64Sub(a0, a1) => self.run_sub::(a0, a1), + isa::Instruction::I64Mul(a0, a1) => self.run_mul::(a0, a1), + isa::Instruction::I64DivS(a0, a1) => self.run_div::(a0, a1), + isa::Instruction::I64DivU(a0, a1) => self.run_div::(a0, a1), + isa::Instruction::I64RemS(a0, a1) => self.run_rem::(a0, a1), + isa::Instruction::I64RemU(a0, a1) => self.run_rem::(a0, a1), + isa::Instruction::I64And(a0, a1) => self.run_and::(a0, a1), + isa::Instruction::I64Or(a0, a1) => self.run_or::(a0, a1), + isa::Instruction::I64Xor(a0, a1) => self.run_xor::(a0, a1), + isa::Instruction::I64Shl(a0, a1) => self.run_shl::(0x3F, a0, a1), + isa::Instruction::I64ShrS(a0, a1) => self.run_shr::(0x3F, a0, a1), + isa::Instruction::I64ShrU(a0, a1) => self.run_shr::(0x3F, a0, a1), + isa::Instruction::I64Rotl(a0, a1) => self.run_rotl::(a0, a1), + isa::Instruction::I64Rotr(a0, a1) => self.run_rotr::(a0, a1), + + isa::Instruction::F32Abs => self.run_abs::(&UniArg::Pop), + isa::Instruction::F32Neg => self.run_neg::(&UniArg::Pop), + isa::Instruction::F32Ceil => self.run_ceil::(&UniArg::Pop), + isa::Instruction::F32Floor => self.run_floor::(&UniArg::Pop), + isa::Instruction::F32Trunc => self.run_trunc::(&UniArg::Pop), + isa::Instruction::F32Nearest => self.run_nearest::(&UniArg::Pop), + isa::Instruction::F32Sqrt => self.run_sqrt::(&UniArg::Pop), + isa::Instruction::F32Add => self.run_add::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Sub => self.run_sub::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Mul => self.run_mul::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Div => self.run_div::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Min => self.run_min::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Max => self.run_max::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F32Copysign => self.run_copysign::(&UniArg::Pop, &UniArg::Pop), + + isa::Instruction::F64Abs => self.run_abs::(&UniArg::Pop), + isa::Instruction::F64Neg => self.run_neg::(&UniArg::Pop), + isa::Instruction::F64Ceil => self.run_ceil::(&UniArg::Pop), + isa::Instruction::F64Floor => self.run_floor::(&UniArg::Pop), + isa::Instruction::F64Trunc => self.run_trunc::(&UniArg::Pop), + isa::Instruction::F64Nearest => self.run_nearest::(&UniArg::Pop), + isa::Instruction::F64Sqrt => self.run_sqrt::(&UniArg::Pop), + isa::Instruction::F64Add => self.run_add::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Sub => self.run_sub::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Mul => self.run_mul::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Div => self.run_div::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Min => self.run_min::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Max => self.run_max::(&UniArg::Pop, &UniArg::Pop), + isa::Instruction::F64Copysign => self.run_copysign::(&UniArg::Pop, &UniArg::Pop), + + isa::Instruction::I32WrapI64(a) => self.run_wrap::(a), + isa::Instruction::I32TruncSF32 => self.run_trunc_to_int::(&UniArg::Pop), + isa::Instruction::I32TruncUF32 => self.run_trunc_to_int::(&UniArg::Pop), + isa::Instruction::I32TruncSF64 => self.run_trunc_to_int::(&UniArg::Pop), + isa::Instruction::I32TruncUF64 => self.run_trunc_to_int::(&UniArg::Pop), + isa::Instruction::I64ExtendSI32(a) => self.run_extend::(a), + isa::Instruction::I64ExtendUI32(a) => self.run_extend::(a), + isa::Instruction::I64TruncSF32 => self.run_trunc_to_int::(&UniArg::Pop), + isa::Instruction::I64TruncUF32 => self.run_trunc_to_int::(&UniArg::Pop), + isa::Instruction::I64TruncSF64 => self.run_trunc_to_int::(&UniArg::Pop), + isa::Instruction::I64TruncUF64 => self.run_trunc_to_int::(&UniArg::Pop), + isa::Instruction::F32ConvertSI32 => self.run_extend::(&UniArg::Pop), + isa::Instruction::F32ConvertUI32 => self.run_extend::(&UniArg::Pop), + isa::Instruction::F32ConvertSI64 => self.run_wrap::(&UniArg::Pop), + isa::Instruction::F32ConvertUI64 => self.run_wrap::(&UniArg::Pop), + isa::Instruction::F32DemoteF64 => self.run_wrap::(&UniArg::Pop), + isa::Instruction::F64ConvertSI32 => self.run_extend::(&UniArg::Pop), + isa::Instruction::F64ConvertUI32 => self.run_extend::(&UniArg::Pop), + isa::Instruction::F64ConvertSI64 => self.run_extend::(&UniArg::Pop), + isa::Instruction::F64ConvertUI64 => self.run_extend::(&UniArg::Pop), + isa::Instruction::F64PromoteF32 => self.run_extend::(&UniArg::Pop), + + isa::Instruction::I32ReinterpretF32 => self.run_reinterpret::(&UniArg::Pop), + isa::Instruction::I64ReinterpretF64 => self.run_reinterpret::(&UniArg::Pop), + isa::Instruction::F32ReinterpretI32 => self.run_reinterpret::(&UniArg::Pop), + isa::Instruction::F64ReinterpretI64 => self.run_reinterpret::(&UniArg::Pop), + + isa::Instruction::I32Extend8S(a) => self.run_extend::(a), + isa::Instruction::I32Extend16S(a) => self.run_extend::(a), + isa::Instruction::I64Extend8S(a) => self.run_extend::(a), + isa::Instruction::I64Extend16S(a) => self.run_extend::(a), + isa::Instruction::I64Extend32S(a) => self.run_extend::(a), } } @@ -682,8 +707,12 @@ impl<'m> Interpreter<'m> { Ok(InstructionOutcome::Branch(target)) } - fn run_br_nez(&mut self, target: isa::Target) -> Result { - let condition = self.value_stack.pop_as(); + fn run_br_nez( + &mut self, + target: isa::Target, + arg: &UniArg, + ) -> Result { + let condition = self.value_stack.handle_uniarg(arg); if condition { Ok(InstructionOutcome::Branch(target)) } else { @@ -691,8 +720,12 @@ impl<'m> Interpreter<'m> { } } - fn run_br_eqz(&mut self, target: isa::Target) -> Result { - let condition = self.value_stack.pop_as(); + fn run_br_eqz( + &mut self, + target: isa::Target, + arg: &UniArg, + ) -> Result { + let condition = self.value_stack.handle_uniarg(arg); if condition { Ok(InstructionOutcome::RunNextInstruction) } else { @@ -700,8 +733,12 @@ impl<'m> Interpreter<'m> { } } - fn run_br_table(&mut self, targets: isa::BrTargets) -> Result { - let index: u32 = self.value_stack.pop_as(); + fn run_br_table( + &mut self, + targets: isa::BrTargets, + arg: &UniArg, + ) -> Result { + let index: u32 = self.value_stack.handle_uniarg(arg); let dst = targets.get(index); @@ -728,8 +765,9 @@ impl<'m> Interpreter<'m> { &mut self, context: &mut FunctionContext, signature_idx: u32, + arg: &UniArg, ) -> Result { - let table_func_idx: u32 = self.value_stack.pop_as(); + let table_func_idx: u32 = self.value_stack.handle_uniarg(arg); let table = context .module() .table_by_index(DEFAULT_TABLE_INDEX) @@ -759,11 +797,17 @@ impl<'m> Interpreter<'m> { Ok(InstructionOutcome::RunNextInstruction) } - fn run_select(&mut self) -> Result { - let (left, mid, right) = self.value_stack.pop_triple(); + fn run_select( + &mut self, + lhs: &UniArg, + rhs: &UniArg, + cond: &UniArg, + ) -> Result { + let condition = self.value_stack.handle_uniarg(cond); + let right = self.value_stack.handle_uniarg_raw(rhs); + let left = self.value_stack.handle_uniarg_raw(lhs); - let condition = <_>::from_value_internal(right); - let val = if condition { left } else { mid }; + let val = if condition { left } else { right }; self.value_stack.push(val)?; Ok(InstructionOutcome::RunNextInstruction) } @@ -774,8 +818,8 @@ impl<'m> Interpreter<'m> { Ok(InstructionOutcome::RunNextInstruction) } - fn run_set_local(&mut self, index: u32) -> Result { - let val = self.value_stack.pop(); + fn run_set_local(&mut self, index: u32, arg0: &UniArg) -> Result { + let val = self.value_stack.handle_uniarg_raw(arg0); *self.value_stack.pick_mut(index as usize) = val; Ok(InstructionOutcome::RunNextInstruction) } @@ -804,8 +848,9 @@ impl<'m> Interpreter<'m> { &mut self, context: &mut FunctionContext, index: u32, + arg: &UniArg, ) -> Result { - let val = self.value_stack.pop(); + let val: ValueInternal = self.value_stack.handle_uniarg_raw(arg); let global = context .module() .global_by_index(index) @@ -820,12 +865,13 @@ impl<'m> Interpreter<'m> { &mut self, context: &mut FunctionContext, offset: u32, + arg: &UniArg, ) -> Result where ValueInternal: From, T: LittleEndianConvert, { - let raw_address = self.value_stack.pop_as(); + let raw_address = self.value_stack.handle_uniarg(arg); let address = effective_address(offset, raw_address)?; let m = context .memory() @@ -841,13 +887,14 @@ impl<'m> Interpreter<'m> { &mut self, context: &mut FunctionContext, offset: u32, + arg: &UniArg, ) -> Result where T: ExtendInto, ValueInternal: From, T: LittleEndianConvert, { - let raw_address = self.value_stack.pop_as(); + let raw_address = self.value_stack.handle_uniarg(arg); let address = effective_address(offset, raw_address)?; let m = context .memory() @@ -866,13 +913,15 @@ impl<'m> Interpreter<'m> { &mut self, context: &mut FunctionContext, offset: u32, + arg0: &UniArg, + arg1: &UniArg, ) -> Result where T: FromValueInternal, T: LittleEndianConvert, { - let stack_value = self.value_stack.pop_as::(); - let raw_address = self.value_stack.pop_as::(); + let stack_value = self.value_stack.handle_uniarg::(arg1); + let raw_address = self.value_stack.handle_uniarg::(arg0); let address = effective_address(offset, raw_address)?; let m = context @@ -887,15 +936,18 @@ impl<'m> Interpreter<'m> { &mut self, context: &mut FunctionContext, offset: u32, + arg0: &UniArg, + arg1: &UniArg, ) -> Result where T: FromValueInternal, T: WrapInto, U: LittleEndianConvert, { - let stack_value: T = <_>::from_value_internal(self.value_stack.pop()); + let stack_value: T = self.value_stack.handle_uniarg(arg1); let stack_value = stack_value.wrap_into(); - let raw_address = self.value_stack.pop_as::(); + let raw_address: u32 = self.value_stack.handle_uniarg(arg0); + let address = effective_address(offset, raw_address)?; let m = context .memory() @@ -920,8 +972,9 @@ impl<'m> Interpreter<'m> { fn run_grow_memory( &mut self, context: &mut FunctionContext, + arg0: &UniArg, ) -> Result { - let pages: u32 = self.value_stack.pop_as(); + let pages: u32 = self.value_stack.handle_uniarg(arg0); let m = context .memory() .expect("Due to validation memory should exists"); @@ -940,12 +993,18 @@ impl<'m> Interpreter<'m> { .map(|_| InstructionOutcome::RunNextInstruction) } - fn run_relop(&mut self, f: F) -> Result + fn run_relop( + &mut self, + f: F, + arg0: &UniArg, + arg1: &UniArg, + ) -> Result where T: FromValueInternal, F: FnOnce(T, T) -> bool, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right = self.value_stack.handle_uniarg::(arg1); + let left = self.value_stack.handle_uniarg::(arg0); let v = if f(left, right) { ValueInternal(1) } else { @@ -955,135 +1014,147 @@ impl<'m> Interpreter<'m> { Ok(InstructionOutcome::RunNextInstruction) } - fn run_eqz(&mut self) -> Result + fn run_eqz(&mut self, arg0: &UniArg) -> Result where T: FromValueInternal, T: PartialEq + Default, { - let v = self.value_stack.pop_as::(); + let v = self.value_stack.handle_uniarg::(arg0); let v = ValueInternal(if v == Default::default() { 1 } else { 0 }); self.value_stack.push(v)?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_eq(&mut self) -> Result + fn run_eq(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where T: FromValueInternal + PartialEq, { - self.run_relop(|left: T, right: T| left == right) + self.run_relop(|left: T, right: T| left == right, arg0, arg1) } - fn run_ne(&mut self) -> Result + fn run_ne(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where T: FromValueInternal + PartialEq, { - self.run_relop(|left: T, right: T| left != right) + self.run_relop(|left: T, right: T| left != right, arg0, arg1) } - fn run_lt(&mut self) -> Result + fn run_lt(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where T: FromValueInternal + PartialOrd, { - self.run_relop(|left: T, right: T| left < right) + self.run_relop(|left: T, right: T| left < right, arg0, arg1) } - fn run_gt(&mut self) -> Result + fn run_gt(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where T: FromValueInternal + PartialOrd, { - self.run_relop(|left: T, right: T| left > right) + self.run_relop(|left: T, right: T| left > right, arg0, arg1) } - fn run_lte(&mut self) -> Result + fn run_lte(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where T: FromValueInternal + PartialOrd, { - self.run_relop(|left: T, right: T| left <= right) + self.run_relop(|left: T, right: T| left <= right, arg0, arg1) } - fn run_gte(&mut self) -> Result + fn run_gte(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where T: FromValueInternal + PartialOrd, { - self.run_relop(|left: T, right: T| left >= right) + self.run_relop(|left: T, right: T| left >= right, arg0, arg1) } - fn run_unop(&mut self, f: F) -> Result + fn run_unop(&mut self, f: F, arg0: &UniArg) -> Result where F: FnOnce(T) -> U, T: FromValueInternal, ValueInternal: From, { - let v = self.value_stack.pop_as::(); + let v = self.value_stack.handle_uniarg::(arg0); let v = f(v); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_clz(&mut self) -> Result + fn run_clz(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: Integer + FromValueInternal, { - self.run_unop(|v: T| v.leading_zeros()) + self.run_unop(|v: T| v.leading_zeros(), arg0) } - fn run_ctz(&mut self) -> Result + fn run_ctz(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: Integer + FromValueInternal, { - self.run_unop(|v: T| v.trailing_zeros()) + self.run_unop(|v: T| v.trailing_zeros(), arg0) } - fn run_popcnt(&mut self) -> Result + fn run_popcnt(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: Integer + FromValueInternal, { - self.run_unop(|v: T| v.count_ones()) + self.run_unop(|v: T| v.count_ones(), arg0) } - fn run_add(&mut self) -> Result + fn run_add(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where ValueInternal: From, T: ArithmeticOps + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.add(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_sub(&mut self) -> Result + fn run_sub(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where ValueInternal: From, T: ArithmeticOps + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.sub(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_mul(&mut self) -> Result + fn run_mul(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where ValueInternal: From, T: ArithmeticOps + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.mul(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_div(&mut self) -> Result + fn run_div( + &mut self, + arg0: &UniArg, + arg1: &UniArg, + ) -> Result where ValueInternal: From, T: TransmuteInto + FromValueInternal, U: ArithmeticOps + TransmuteInto, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let (left, right) = (left.transmute_into(), right.transmute_into()); let v = left.div(right)?; let v = v.transmute_into(); @@ -1091,13 +1162,19 @@ impl<'m> Interpreter<'m> { Ok(InstructionOutcome::RunNextInstruction) } - fn run_rem(&mut self) -> Result + fn run_rem( + &mut self, + arg0: &UniArg, + arg1: &UniArg, + ) -> Result where ValueInternal: From, T: TransmuteInto + FromValueInternal, U: Integer + TransmuteInto, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let (left, right) = (left.transmute_into(), right.transmute_into()); let v = left.rem(right)?; let v = v.transmute_into(); @@ -1105,58 +1182,78 @@ impl<'m> Interpreter<'m> { Ok(InstructionOutcome::RunNextInstruction) } - fn run_and(&mut self) -> Result + fn run_and(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where ValueInternal: From<::Output>, T: ops::BitAnd + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.bitand(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_or(&mut self) -> Result + fn run_or(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where ValueInternal: From<::Output>, T: ops::BitOr + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.bitor(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_xor(&mut self) -> Result + fn run_xor(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where ValueInternal: From<::Output>, T: ops::BitXor + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.bitxor(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_shl(&mut self, mask: T) -> Result + fn run_shl( + &mut self, + mask: T, + arg0: &UniArg, + arg1: &UniArg, + ) -> Result where ValueInternal: From<>::Output>, T: ops::Shl + ops::BitAnd + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.shl(right & mask); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_shr(&mut self, mask: U) -> Result + fn run_shr( + &mut self, + mask: U, + arg0: &UniArg, + arg1: &UniArg, + ) -> Result where ValueInternal: From, T: TransmuteInto + FromValueInternal, U: ops::Shr + ops::BitAnd, >::Output: TransmuteInto, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let (left, right) = (left.transmute_into(), right.transmute_into()); let v = left.shr(right & mask); let v = v.transmute_into(); @@ -1164,132 +1261,146 @@ impl<'m> Interpreter<'m> { Ok(InstructionOutcome::RunNextInstruction) } - fn run_rotl(&mut self) -> Result + fn run_rotl(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where ValueInternal: From, T: Integer + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.rotl(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_rotr(&mut self) -> Result + fn run_rotr(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where ValueInternal: From, T: Integer + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.rotr(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_abs(&mut self) -> Result + fn run_abs(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: Float + FromValueInternal, { - self.run_unop(|v: T| v.abs()) + self.run_unop(|v: T| v.abs(), arg0) } - fn run_neg(&mut self) -> Result + fn run_neg(&mut self, arg0: &UniArg) -> Result where ValueInternal: From<::Output>, T: ops::Neg + FromValueInternal, { - self.run_unop(|v: T| v.neg()) + self.run_unop(|v: T| v.neg(), arg0) } - fn run_ceil(&mut self) -> Result + fn run_ceil(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: Float + FromValueInternal, { - self.run_unop(|v: T| v.ceil()) + self.run_unop(|v: T| v.ceil(), arg0) } - fn run_floor(&mut self) -> Result + fn run_floor(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: Float + FromValueInternal, { - self.run_unop(|v: T| v.floor()) + self.run_unop(|v: T| v.floor(), arg0) } - fn run_trunc(&mut self) -> Result + fn run_trunc(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: Float + FromValueInternal, { - self.run_unop(|v: T| v.trunc()) + self.run_unop(|v: T| v.trunc(), arg0) } - fn run_nearest(&mut self) -> Result + fn run_nearest(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: Float + FromValueInternal, { - self.run_unop(|v: T| v.nearest()) + self.run_unop(|v: T| v.nearest(), arg0) } - fn run_sqrt(&mut self) -> Result + fn run_sqrt(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: Float + FromValueInternal, { - self.run_unop(|v: T| v.sqrt()) + self.run_unop(|v: T| v.sqrt(), arg0) } - fn run_min(&mut self) -> Result + fn run_min(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where ValueInternal: From, T: Float + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.min(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_max(&mut self) -> Result + fn run_max(&mut self, arg0: &UniArg, arg1: &UniArg) -> Result where ValueInternal: From, T: Float + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.max(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_copysign(&mut self) -> Result + fn run_copysign( + &mut self, + arg0: &UniArg, + arg1: &UniArg, + ) -> Result where ValueInternal: From, T: Float + FromValueInternal, { - let (left, right) = self.value_stack.pop_pair_as::(); + let right: T = self.value_stack.handle_uniarg(arg1); + let left: T = self.value_stack.handle_uniarg(arg0); + let v = left.copysign(right); self.value_stack.push(v.into())?; Ok(InstructionOutcome::RunNextInstruction) } - fn run_wrap(&mut self) -> Result + fn run_wrap(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: WrapInto + FromValueInternal, { - self.run_unop(|v: T| v.wrap_into()) + self.run_unop(|v: T| v.wrap_into(), arg0) } - fn run_trunc_to_int(&mut self) -> Result + fn run_trunc_to_int(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: TryTruncateInto + FromValueInternal, U: TransmuteInto, { - let v = self.value_stack.pop_as::(); + let v: T = self.value_stack.handle_uniarg(arg0); v.try_truncate_into() .map(|v| v.transmute_into()) @@ -1297,13 +1408,13 @@ impl<'m> Interpreter<'m> { .map(|_| InstructionOutcome::RunNextInstruction) } - fn run_extend(&mut self) -> Result + fn run_extend(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: ExtendInto + FromValueInternal, U: TransmuteInto, { - let v = self.value_stack.pop_as::(); + let v: T = self.value_stack.handle_uniarg(arg0); let v = v.extend_into().transmute_into(); self.value_stack.push(v.into())?; @@ -1311,13 +1422,13 @@ impl<'m> Interpreter<'m> { Ok(InstructionOutcome::RunNextInstruction) } - fn run_reinterpret(&mut self) -> Result + fn run_reinterpret(&mut self, arg0: &UniArg) -> Result where ValueInternal: From, T: FromValueInternal, T: TransmuteInto, { - let v = self.value_stack.pop_as::(); + let v: T = self.value_stack.handle_uniarg(arg0); let v = v.transmute_into(); self.value_stack.push(v.into())?; @@ -1461,34 +1572,6 @@ impl ValueStack { self.sp = cur_stack_len - drop_keep.drop as usize; } - #[inline] - fn pop_as(&mut self) -> T - where - T: FromValueInternal, - { - let value = self.pop(); - - T::from_value_internal(value) - } - - #[inline] - fn pop_pair_as(&mut self) -> (T, T) - where - T: FromValueInternal, - { - let right = self.pop_as(); - let left = self.pop_as(); - (left, right) - } - - #[inline] - fn pop_triple(&mut self) -> (ValueInternal, ValueInternal, ValueInternal) { - let right = self.pop(); - let mid = self.pop(); - let left = self.pop(); - (left, mid, right) - } - #[inline] pub fn top(&self) -> &ValueInternal { self.pick(1) @@ -1509,6 +1592,23 @@ impl ValueStack { self.buf[self.sp] } + #[inline] + fn handle_uniarg_raw(&mut self, arg: &UniArg) -> ValueInternal { + match arg { + UniArg::Pop => self.pop(), + UniArg::Stack(i) => *self.pick(*i), + UniArg::IConst(c) => c.into(), + } + } + + #[inline] + fn handle_uniarg(&mut self, arg: &UniArg) -> T + where + T: FromValueInternal, + { + T::from_value_internal(self.handle_uniarg_raw(arg)) + } + #[inline] fn push(&mut self, value: ValueInternal) -> Result<(), TrapCode> { let cell = self.buf.get_mut(self.sp).ok_or(TrapCode::StackOverflow)?;