diff --git a/.gitignore b/.gitignore index 7a55dead..50d2c52a 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,4 @@ cmake-build-*/ prefix/ CMakeLists.txt.user CMakeUserPresets.json -maat_state_* +maat_state_* \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index c7be0169..a578f59e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,6 +18,7 @@ include(cmake/variables.cmake) # ---- Declare library ---- add_library(maat_maat + src/arch/arch_ARM64.cpp src/arch/arch_EVM.cpp src/arch/arch_X86.cpp src/arch/lifter.cpp @@ -180,15 +181,31 @@ macro(maat_sleigh_compile ARCH_DIR ARCH) configure_file("${spec_dir}/${ARCH_DIR}/data/languages/${ARCH}.pspec" "${spec_out_dir}/${ARCH}.pspec") endmacro() +macro(maat_sleigh_compile_files ARCH_DIR ARCH SLASPEC PSPEC) + # ARCH_DIR is the directory that appears in Ghidra's source code hierarchy + # ARCH appears in the name of the '.slaspec' and '.pspec' file (they should be the same) + # Creates a target maat_sleigh_spec_${ARCH} + sleigh_compile( + TARGET maat_sleigh_spec_${ARCH} + COMPILER "${maat_SLEIGH_COMPILER}" + SLASPEC "${spec_dir}/${ARCH_DIR}/data/languages/${SLASPEC}.slaspec" + LOG_FILE "${PROJECT_BINARY_DIR}/sleigh-log/${ARCH}.log" + OUT_FILE "${spec_out_dir}/${SLASPEC}.sla" + ) + configure_file("${spec_dir}/${ARCH_DIR}/data/languages/${PSPEC}.pspec" "${spec_out_dir}/${PSPEC}.pspec") +endmacro() + maat_sleigh_compile(x86 x86-64) maat_sleigh_compile(x86 x86) maat_sleigh_compile(EVM EVM) +maat_sleigh_compile(AARCH64 AARCH64) # All of the sla spec targets are combined into this one add_custom_target(maat_all_sla_specs DEPENDS maat_sleigh_spec_x86-64 maat_sleigh_spec_x86 maat_sleigh_spec_EVM + maat_sleigh_spec_AARCH64 ) # Add sla specs as dependencies to our targets diff --git a/bindings/python/py_arch.cpp b/bindings/python/py_arch.cpp index 6961fe14..280120f4 100644 --- a/bindings/python/py_arch.cpp +++ b/bindings/python/py_arch.cpp @@ -11,6 +11,7 @@ void init_arch(PyObject* module) PyDict_SetItemString(arch_enum, "X86", PyLong_FromLong((int)Arch::Type::X86)); PyDict_SetItemString(arch_enum, "X64", PyLong_FromLong((int)Arch::Type::X64)); PyDict_SetItemString(arch_enum, "EVM", PyLong_FromLong((int)Arch::Type::EVM)); + PyDict_SetItemString(arch_enum, "ARM64", PyLong_FromLong((int)Arch::Type::ARM64)); PyObject* arch_class = create_class(PyUnicode_FromString("ARCH"), PyTuple_New(0), arch_enum); PyModule_AddObject(module, "ARCH", arch_class); diff --git a/src/arch/arch_ARM64.cpp b/src/arch/arch_ARM64.cpp new file mode 100644 index 00000000..6376aabd --- /dev/null +++ b/src/arch/arch_ARM64.cpp @@ -0,0 +1,197 @@ +/* +Commonwealth of Australia represented by the Department of Defence + +Produced by Nathan Do, Student Intern at DSTG (Defence Science and Technology Group) +*/ + +#include "maat/arch.hpp" +#include "maat/exception.hpp" +#include "maat/cpu.hpp" + +namespace maat +{ +namespace ARM64 +{ + ArchARM64::ArchARM64(): Arch(Arch::Type::ARM64, 64, ARM64::NB_REGS) + { + available_modes = {CPUMode::A64}; + reg_map = + { + {"r0", R0}, + {"r1", R1}, + {"r2", R2}, + {"r3", R3}, + {"r4", R4}, + {"r5", R5}, + {"r6", R6}, + {"r7", R7}, + {"r8", R8}, + {"r9", R9}, + {"r10", R10}, + {"r11", R11}, + {"r12", R12}, + {"r13", R13}, + {"r14", R14}, + {"r15", R15}, + {"r16", R16}, + {"r17", R17}, + {"r18", R18}, + {"r19", R19}, + {"r20", R20}, + {"r21", R21}, + {"r22", R22}, + {"r23", R23}, + {"r24", R24}, + {"r25", R25}, + {"r26", R26}, + {"r27", R27}, + {"r28", R28}, + {"r29", R29}, + {"r30", R30}, + {"lr", LR}, + {"v0", V0}, + {"v1", V1}, + {"v2", V2}, + {"v3", V3}, + {"v4", V4}, + {"v5", V5}, + {"v6", V6}, + {"v7", V7}, + {"v8", V8}, + {"v9", V9}, + {"v10", V10}, + {"v11", V11}, + {"v12", V12}, + {"v13", V13}, + {"v14", V14}, + {"v15", V15}, + {"v16", V16}, + {"v17", V17}, + {"v18", V18}, + {"v19", V19}, + {"v20", V20}, + {"v21", V21}, + {"v22", V22}, + {"v23", V23}, + {"v24", V24}, + {"v25", V25}, + {"v26", V26}, + {"v27", V27}, + {"v28", V28}, + {"v29", V29}, + {"v30", V30}, + {"v31", V31}, + {"zr", ZR}, + {"pc", PC}, + {"sp", SP}, + {"pstate",PSTATE}, + {"zf", ZF}, + {"nf", NF}, + {"cf", CF}, + {"vf", VF}, + {"cntpct_el0", CNTPCT_EL0} + }; + } + + size_t ArchARM64::reg_size(reg_t reg_num) const + { + switch (reg_num) { + case R0: + case R1: + case R2: + case R3: + case R4: + case R5: + case R6: + case R7: + case R8: + case R9: + case R10: + case R11: + case R12: + case R13: + case R14: + case R15: + case R16: + case R17: + case R18: + case R19: + case R20: + case R21: + case R22: + case R23: + case R24: + case R25: + case R26: + case R27: + case R28: + case R29: + case R30: + case ZR: + case PC: + case SP: + case PSTATE: + case SPSR: + case ELR: + case CNTPCT_EL0: + return 64; + case V0: + case V1: + case V2: + case V3: + case V4: + case V5: + case V6: + case V7: + case V8: + case V9: + case V10: + case V11: + case V12: + case V13: + case V14: + case V15: + case V16: + case V17: + case V18: + case V19: + case V20: + case V21: + case V22: + case V23: + case V24: + case V25: + case V26: + case V27: + case V28: + case V29: + case V30: + case V31: + return 128; + case ZF: + case NF: + case CF: + case VF: + return 8; + default: + throw runtime_exception("ArchARM64::reg_size(): got unsupported reg num"); + } + } + + reg_t ArchARM64::sp() const + { + return ARM64::SP; + } + + reg_t ArchARM64::pc() const + { + return ARM64::PC; + } + + reg_t ArchARM64::tsc() const + { + throw runtime_exception("ArchARM64::tsc(): method not available"); + } +} // namespace ARM64 + +} // namespace maat diff --git a/src/arch/lifter.cpp b/src/arch/lifter.cpp index 2cb4cebe..5387c1d0 100644 --- a/src/arch/lifter.cpp +++ b/src/arch/lifter.cpp @@ -38,6 +38,12 @@ Lifter::Lifter(CPUMode m): mode(m) pspecfile = config.find_sleigh_file("EVM.pspec"); arch = Arch::Type::EVM; } + else if (mode == CPUMode::A64) + { + slafile = config.find_sleigh_file("AARCH64.sla"); + pspecfile = config.find_sleigh_file("AARCH64.pspec"); + arch = Arch::Type::ARM64; + } else { throw lifter_exception("Lifter: this CPU mode is not supported"); diff --git a/src/engine/callother.cpp b/src/engine/callother.cpp index f12b7b49..c78e9b2e 100644 --- a/src/engine/callother.cpp +++ b/src/engine/callother.cpp @@ -25,6 +25,10 @@ Id mnemonic_to_id(const std::string& mnemonic, Arch::Type arch) if (mnemonic == "STACK_PUSH") return Id::EVM_STACK_PUSH; if (mnemonic == "STACK_POP") return Id::EVM_STACK_POP; break; + case Arch::Type::ARM64: + if (mnemonic == "udf") return Id::AARCH64_UDF; + if (mnemonic == "svc") return Id::AARCH64_SVC; + break; default: break; } @@ -1041,6 +1045,62 @@ void EVM_LOG_handler(MaatEngine& engine, const ir::Inst& inst, ir::ProcessedInst } } +/* +System call for AARCH. +The syscalls are untested and don't work +*/ +void AARCH64_SVC_handler(MaatEngine& engine, const ir::Inst& inst, ir::ProcessedInst& pinst) +{ + engine.log.warning("System Call is untested and might not work!!"); + // Get syscall number + const Value& sys_num = engine.cpu.ctx().get(ARM64::R8); + if (sys_num.is_symbolic(*engine.vars)) + { + throw callother_exception("SVC #0: syscall number is symbolic!"); + } + // Get function to emulate syscall` + try + { + const env::Function& func = engine.env->get_syscall_func_by_num( + sys_num.as_uint(*engine.vars) + ); + + // Execute function callback + switch (func.callback().execute(engine, env::abi::AARCH64_SVC::instance())) + { + case env::Action::CONTINUE: + break; + case env::Action::ERROR: + throw callother_exception( + "SVC #0: Emulation callback signaled an error, SVC is untested and might not work!!" + ); + } + } + catch(const env_exception& e) + { + throw callother_exception( + Fmt() << "SVC #0: " << e.what() >> Fmt::to_str + ); + } +} +/* +Permanently Undefined generates an Undefined Instruction exception. +The encodings for UDF used in this section are defined as permanently undefined in the ARMv8-A architecture. +x86 has a similar instruction UD2 for undefined instructions. This indicates that the processor encountered an invalid instruction +*/ +void AARCH64_UDF_handler(MaatEngine& engine, const ir::Inst& inst, ir::ProcessedInst& pinst) +{ + // UDF Does nothing + engine.log.warning( + Fmt() << "LOG" + << ": UDF instruction called, UDF does nothing. UDF stands for (Undefined)" + >> Fmt::to_str + ); + throw callother_exception( + "UDF instruction: Emulation callback signaled an error" + ); +} + /// Return the default handler map for CALLOTHER occurences HandlerMap default_handler_map() { @@ -1083,6 +1143,8 @@ HandlerMap default_handler_map() h.set_handler(Id::EVM_SELFDESTRUCT, EVM_SELFDESTRUCT_handler); h.set_handler(Id::EVM_LOG, EVM_LOG_handler); + h.set_handler(Id::AARCH64_UDF, AARCH64_UDF_handler); + h.set_handler(Id::AARCH64_SVC, AARCH64_SVC_handler); return h; } diff --git a/src/engine/engine.cpp b/src/engine/engine.cpp index 36965036..d85475f3 100644 --- a/src/engine/engine.cpp +++ b/src/engine/engine.cpp @@ -32,6 +32,11 @@ MaatEngine::MaatEngine(Arch::Type _arch, env::OS os): env(nullptr), _uid(++_uid_ env = std::make_shared(); endianness = Endian::BIG; break; + case Arch::Type::ARM64: + arch = std::make_shared(); + lifters[CPUMode::A64] = std::make_shared(CPUMode::A64); + _current_cpu_mode = CPUMode::A64; + break; case Arch::Type::NONE: arch = std::make_shared(); _current_cpu_mode = CPUMode::NONE; diff --git a/src/env/abi.cpp b/src/env/abi.cpp index 142660d8..1498e348 100644 --- a/src/env/abi.cpp +++ b/src/env/abi.cpp @@ -402,6 +402,124 @@ void X86_LINUX_INT80::ret(MaatEngine& engine) const // Do nothing } +// ========== ABI standard AArch64 EABI ============ +AARCH64_ABI::AARCH64_ABI(): ABI(Type::AARCH64_ABI) +{} + +ABI& AARCH64_ABI::instance() +{ + static AARCH64_ABI abi; + return abi; +} + +void AARCH64_ABI::get_args( + MaatEngine& engine, + const args_spec_t& args_spec, + std::vector& args +) const +{ + int i = 0; + for (auto arg : args_spec) + args.push_back(get_arg(engine, i++, arg)); +} + +Value AARCH64_ABI::get_arg(MaatEngine& engine, int n, size_t arg_size) const +{ + std::vector arg_regs{ARM64::R0, ARM64::R1, ARM64::R2, ARM64::R3, ARM64::R4, ARM64::R5,ARM64::R6, ARM64::R7, ARM64::R8}; + Value res; + arg_size = ABI::real_arg_size(engine, arg_size); + if (n < 9) + { + res = engine.cpu.ctx().get(arg_regs[n]); + } + else + { + addr_t stack = engine.cpu.ctx().get(ARM64::SP).as_uint() + 16; + res = engine.mem->read(stack+(16*(n-arg_regs.size())), arg_size); + } + // this assumes little endian if we read arguments + // from the stack + return _adjust_value_to_size(res, arg_size, engine); +} + +void AARCH64_ABI::prepare_ret_address(MaatEngine& engine, addr_t ret_addr) const +{ + // Push the return address, simply + engine.cpu.ctx().set(ARM64::SP, engine.cpu.ctx().get(ARM64::SP).as_uint() - 16); + engine.mem->write(engine.cpu.ctx().get(ARM64::SP).as_uint(), ret_addr, 16); +} + +void AARCH64_ABI::set_ret_value( + MaatEngine& engine, + const FunctionCallback::return_t& ret_val +) const +{ + // Return value in R0 + std::visit(maat::util::overloaded{ + [](std::monostate arg){return;}, // no return value + [&engine](auto arg){engine.cpu.ctx().set(ARM64::R0, arg);} + }, ret_val); +} + +void AARCH64_ABI::ret(MaatEngine& engine) const +{ + // Caller clean-up, we just simulate a 'ret' instruction + engine.cpu.ctx().set(ARM64::PC, engine.mem->read(engine.cpu.ctx().get(ARM64::SP).as_uint(), 16)); + engine.cpu.ctx().set(ARM64::SP, engine.cpu.ctx().get(ARM64::SP).as_uint() + 16); +} + +/* ============== AARCH SYSCALL LINUX ==============*/ +AARCH64_SVC::AARCH64_SVC(): ABI(Type::AARCH64_SVC) +{} + +ABI& AARCH64_SVC::instance() +{ + static AARCH64_SVC abi; + return abi; +} + +void AARCH64_SVC::get_args( + MaatEngine& engine, + const args_spec_t& args_spec, + std::vector& args +) const +{ + int i = 0; + for (auto arg : args_spec) + args.push_back(get_arg(engine, i++, arg)); +} + +Value AARCH64_SVC::get_arg(MaatEngine& engine, int n, size_t arg_size) const +{ + std::vector arg_regs{ARM64::R0,ARM64::R1,ARM64::R2,ARM64::R3,ARM64::R4,ARM64::R5,ARM64::R6,ARM64::R7}; + Value res; + arg_size = ABI::real_arg_size(engine, arg_size); + if (n >= arg_regs.size()) + { + throw env_exception("get_arg(): Linux ARM64 CS ABI supports only up to 8 arguments"); + } + else + { + res = engine.cpu.ctx().get(arg_regs[n]); + } + return _adjust_value_to_size(res, arg_size, engine); +} + +void AARCH64_SVC::set_ret_value( + MaatEngine& engine, + const FunctionCallback::return_t& ret_val +) const +{ + // Return value in R0 + std::visit(maat::util::overloaded{ + [](std::monostate arg){return;}, // no return value + [&engine](auto arg){engine.cpu.ctx().set(ARM64::R0, arg);} + }, ret_val); +} +void AARCH64_SVC::ret(MaatEngine& engine) const +{ + // Do nothing +} } // namespace abi } // namespace env } // namespace maat diff --git a/src/env/emulated_libs/libc.cpp b/src/env/emulated_libs/libc.cpp index 1c38fc1b..aa0b105e 100644 --- a/src/env/emulated_libs/libc.cpp +++ b/src/env/emulated_libs/libc.cpp @@ -659,6 +659,19 @@ Library linux_x64_libc() return lib; } +// For Linux AArch64 +Library linux_arm64_libc() +{ + Library lib("libc", libc_common_functions, libc_common_data); + // Arch specific functions... + lib.add_function(Function("__libc_start_main", + FunctionCallback({8,8,8,8,8,8,8}, linux_x64_libc_start_main_callback_part1) + )); + lib.add_function(Function("__libc_start_main_part2", + FunctionCallback({}, linux_x64_libc_start_main_callback_part2) + )); + return lib; +} } // namespace emulated } // namespace env } // namespace maat diff --git a/src/env/emulated_syscalls/linux_syscalls.cpp b/src/env/emulated_syscalls/linux_syscalls.cpp index 23f912db..327c51c5 100644 --- a/src/env/emulated_syscalls/linux_syscalls.cpp +++ b/src/env/emulated_syscalls/linux_syscalls.cpp @@ -833,6 +833,32 @@ syscall_func_map_t linux_x64_syscall_map() return res; } +syscall_func_map_t linux_arm64_syscall_map() +{ + syscall_func_map_t res + { + {43, Function("sys_statfs", FunctionCallback({env::abi::auto_argsize, env::abi::auto_argsize}, sys_linux_stat))}, + {44, Function("sys_fstatfs", FunctionCallback({env::abi::auto_argsize, env::abi::auto_argsize}, sys_linux_stat))}, + {48, Function("sys_accessat", FunctionCallback({env::abi::auto_argsize, 4}, sys_linux_access))}, + {56, Function("sys_openat", FunctionCallback({4, env::abi::auto_argsize, 4, 4}, sys_linux_openat))}, + {57, Function("sys_close", FunctionCallback({4}, sys_linux_close))}, + {63, Function("sys_read", FunctionCallback({4, env::abi::auto_argsize, 4}, sys_linux_read))}, + {64, Function("sys_write", FunctionCallback({4, env::abi::auto_argsize, 4}, sys_linux_write))}, + {66, Function("sys_writev", FunctionCallback({4, env::abi::auto_argsize, env::abi::auto_argsize}, sys_linux_writev))}, + {67, Function("sys_pread64", FunctionCallback({4, env::abi::auto_argsize, 4, 4}, sys_linux_pread))}, + {78, Function("sys_readlinkat", FunctionCallback({env::abi::auto_argsize, env::abi::auto_argsize, env::abi::auto_argsize}, sys_linux_readlink))}, + {79, Function("sys_newfstatat", FunctionCallback({4, env::abi::auto_argsize, env::abi::auto_argsize, 4}, sys_linux_fstatat))}, + {93, Function("sys_exit", FunctionCallback({4}, sys_linux_exit))}, + {94, Function("sys_exit_group", FunctionCallback({4}, sys_linux_exit))}, + {160, Function("sys_uname", FunctionCallback({env::abi::auto_argsize}, sys_linux_newuname))}, + {214, Function("sys_brk", FunctionCallback({env::abi::auto_argsize}, sys_linux_brk))}, + {215, Function("sys_munmap", FunctionCallback({env::abi::auto_argsize, env::abi::auto_argsize}, sys_linux_munmap))}, + {222, Function("sys_mmap", FunctionCallback({env::abi::auto_argsize, 4, 4, 4, 4, 4}, sys_linux_mmap))}, + {226, Function("sys_mprotect", FunctionCallback({env::abi::auto_argsize, 4, 4}, sys_linux_mprotect))} + }; + return res; +} + } // namespace emulated } // namespace env } // namespace maat diff --git a/src/env/env.cpp b/src/env/env.cpp index 0e12cf7e..3f774cb4 100644 --- a/src/env/env.cpp +++ b/src/env/env.cpp @@ -15,6 +15,11 @@ abi::ABI* _get_default_abi(Arch::Type arch, OS os) if (os == OS::LINUX) return &abi::X64_SYSTEM_V::instance(); } + else if (arch == Arch::Type::ARM64) + { + if (os == OS::LINUX) + return &abi::AARCH64_ABI::instance(); + } return &abi::ABI_NONE::instance(); } @@ -25,6 +30,11 @@ abi::ABI* _get_syscall_abi(Arch::Type arch, OS os) if (os == OS::LINUX) return &abi::X64_LINUX_SYSCALL::instance(); } + else if (arch == Arch::Type::ARM64) + { + if (os == OS::LINUX) + return &abi::AARCH64_SVC::instance(); + } return &abi::ABI_NONE::instance(); } diff --git a/src/env/env_linux.cpp b/src/env/env_linux.cpp index a1f0d585..f0f63acc 100644 --- a/src/env/env_linux.cpp +++ b/src/env/env_linux.cpp @@ -23,6 +23,9 @@ void LinuxEmulator::_init(Arch::Type arch) _libraries.push_back(env::emulated::linux_x64_libc()); _syscall_func_map = env::emulated::linux_x64_syscall_map(); break; + case Arch::Type::ARM64: + _libraries.push_back(env::emulated::linux_arm64_libc()); + _syscall_func_map = env::emulated::linux_arm64_syscall_map(); case Arch::Type::NONE: default: break; diff --git a/src/include/maat/arch.hpp b/src/include/maat/arch.hpp index ddb96ef3..f1e7ab99 100644 --- a/src/include/maat/arch.hpp +++ b/src/include/maat/arch.hpp @@ -361,48 +361,85 @@ namespace EVM // Namespace for ARMv8 (64-bits) specific definitions and classes namespace ARM64 { - static constexpr reg_t R0 = 0; - static constexpr reg_t R1 = 1; - static constexpr reg_t R2 = 2; - static constexpr reg_t R3 = 3; - static constexpr reg_t R4 = 4; - static constexpr reg_t R5 = 5; - static constexpr reg_t R6 = 6; - static constexpr reg_t R7 = 7; - static constexpr reg_t R8 = 8; - static constexpr reg_t R9 = 9; - static constexpr reg_t R10 = 10; - static constexpr reg_t R11 = 11; - static constexpr reg_t R12 = 12; - static constexpr reg_t R13 = 13; - static constexpr reg_t R14 = 14; - static constexpr reg_t R15 = 15; - static constexpr reg_t R16 = 16; - static constexpr reg_t R17 = 17; - static constexpr reg_t R18 = 18; - static constexpr reg_t R19 = 19; - static constexpr reg_t R20 = 20; - static constexpr reg_t R21 = 21; - static constexpr reg_t R22 = 22; - static constexpr reg_t R23 = 23; - static constexpr reg_t R24 = 24; - static constexpr reg_t R25 = 25; - static constexpr reg_t R26 = 26; - static constexpr reg_t R27 = 27; - static constexpr reg_t R28 = 28; - static constexpr reg_t R29 = 29; - static constexpr reg_t R30 = 30; - static constexpr reg_t LR = 30; // Same as R30 - static constexpr reg_t R31 = 31; - static constexpr reg_t PC = 32; - static constexpr reg_t SP = 33; - static constexpr reg_t ZR = 34; - static constexpr reg_t ZF = 35; - static constexpr reg_t NF = 36; - static constexpr reg_t CF = 37; - static constexpr reg_t VF = 38; - static constexpr reg_t CNTPCT_EL0 = 39; // Physical cycle counter - static constexpr reg_t NB_REGS = 40; + /* General Purpose Register */ + static constexpr reg_t R0 = 0; // X0 + static constexpr reg_t R1 = 1; // X1 + static constexpr reg_t R2 = 2; // X2 + static constexpr reg_t R3 = 3; // X3 + static constexpr reg_t R4 = 4; // X4 + static constexpr reg_t R5 = 5; // X5 + static constexpr reg_t R6 = 6; // X6 + static constexpr reg_t R7 = 7; // X7 + static constexpr reg_t R8 = 8; // X8 + static constexpr reg_t R9 = 9; // X9 + static constexpr reg_t R10 = 10; // X10 + static constexpr reg_t R11 = 11; // X11 + static constexpr reg_t R12 = 12; // X12 + static constexpr reg_t R13 = 13; // X13 + static constexpr reg_t R14 = 14; // X14 + static constexpr reg_t R15 = 15; // X15 + static constexpr reg_t R16 = 16; // X16 + static constexpr reg_t R17 = 17; // X17 + static constexpr reg_t R18 = 18; // X18 + static constexpr reg_t R19 = 19; // X19 + static constexpr reg_t R20 = 20; // X20 + static constexpr reg_t R21 = 21; // X21 + static constexpr reg_t R22 = 22; // X22 + static constexpr reg_t R23 = 23; // X23 + static constexpr reg_t R24 = 24; // X24 + static constexpr reg_t R25 = 25; // X25 + static constexpr reg_t R26 = 26; // X26 + static constexpr reg_t R27 = 27; // X27 + static constexpr reg_t R28 = 28; // X28 + static constexpr reg_t R29 = 29; // X29 Frame pointer + static constexpr reg_t R30 = 30; // X30 + static constexpr reg_t LR = 30; // Same as R30 + static constexpr reg_t ZR = 31; // Zero Register + /* Floating Point Register */ + static constexpr reg_t V0 = 32; // V0 + static constexpr reg_t V1 = 33; // V1 + static constexpr reg_t V2 = 34; // V2 + static constexpr reg_t V3 = 35; // V3 + static constexpr reg_t V4 = 36; // V4 + static constexpr reg_t V5 = 37; // V5 + static constexpr reg_t V6 = 38; // V6 + static constexpr reg_t V7 = 39; // V7 + static constexpr reg_t V8 = 40; // V8 + static constexpr reg_t V9 = 41; // V9 + static constexpr reg_t V10 = 42; // V10 + static constexpr reg_t V11 = 43; // V11 + static constexpr reg_t V12 = 44; // V12 + static constexpr reg_t V13 = 45; // V13 + static constexpr reg_t V14 = 46; // V14 + static constexpr reg_t V15 = 47; // V15 + static constexpr reg_t V16 = 48; // V16 + static constexpr reg_t V17 = 49; // V17 + static constexpr reg_t V18 = 50; // V18 + static constexpr reg_t V19 = 51; // V19 + static constexpr reg_t V20 = 52; // V20 + static constexpr reg_t V21 = 53; // V21 + static constexpr reg_t V22 = 54; // V22 + static constexpr reg_t V23 = 55; // V23 + static constexpr reg_t V24 = 56; // V24 + static constexpr reg_t V25 = 57; // V25 + static constexpr reg_t V26 = 58; // V26 + static constexpr reg_t V27 = 59; // V27 + static constexpr reg_t V28 = 60; // V28 + static constexpr reg_t V29 = 61; // V29 + static constexpr reg_t V30 = 62; // V30 + static constexpr reg_t V31 = 63; // V30 + static constexpr reg_t PC = 64; // Program Counter + static constexpr reg_t SP = 65; // Stack Pointer + static constexpr reg_t PSTATE = 66; // Processor State Register + static constexpr reg_t SPSR = 67; // Saved Processor State Register + static constexpr reg_t ELR = 68; // Execption level + // Conditional Flags + static constexpr reg_t ZF = 69; // Zero flag + static constexpr reg_t NF = 70; // Negative flag + static constexpr reg_t CF = 71; // Carry Flag + static constexpr reg_t VF = 72; // OverFlow Flag + static constexpr reg_t CNTPCT_EL0 = 73; // Physical cycle counter + static constexpr reg_t NB_REGS = 74; /** \addtogroup arch * \{ */ diff --git a/src/include/maat/callother.hpp b/src/include/maat/callother.hpp index 0e3e34e3..e077b212 100644 --- a/src/include/maat/callother.hpp +++ b/src/include/maat/callother.hpp @@ -65,6 +65,9 @@ enum class Id EVM_CREATE, EVM_SELFDESTRUCT, EVM_LOG, + // AARCH64 + AARCH64_UDF, + AARCH64_SVC, UNSUPPORTED }; diff --git a/src/include/maat/env/library.hpp b/src/include/maat/env/library.hpp index 914d747b..c04f26d2 100644 --- a/src/include/maat/env/library.hpp +++ b/src/include/maat/env/library.hpp @@ -103,6 +103,8 @@ enum class Type X64_LINUX_SYSCALL, /* ARM64 */ ARM64, + AARCH64_ABI, + AARCH64_SVC, /* Custom */ X86_LINUX_CUSTOM_SYSCALL, ///< Used internally X64_LINUX_CUSTOM_SYSCALL, ///< Used internally @@ -308,10 +310,62 @@ class X64_LINUX_SYSCALL : public ABI virtual void ret(MaatEngine& engine) const; }; +/// AArch64 Default ABI +class AARCH64_ABI : public ABI +{ +protected: + AARCH64_ABI(); +public: + /// ABI instance (singleton pattern) + static ABI& instance(); +public: + /// Get function arguments + virtual void get_args( + MaatEngine& engine, + const args_spec_t& args_spec, + std::vector& args + ) const; + /// Get function argument number 'n' (starting at 0) + virtual Value get_arg(MaatEngine& engine, int n, size_t arg_size) const; + /// Set a function's return value before it returns + virtual void set_ret_value( + MaatEngine& engine, + const FunctionCallback::return_t& ret_val + ) const; + /// Set the return address prior to call a function + virtual void prepare_ret_address(MaatEngine& engine, addr_t ret_addr) const; + /// Return from a function + virtual void ret(MaatEngine& engine) const; +}; + +/// AArch64 System Call +class AARCH64_SVC : public ABI +{ +protected: + AARCH64_SVC(); +public: + /// ABI instance (singleton pattern) + static ABI& instance(); +public: + /// Get function arguments + virtual void get_args( + MaatEngine& engine, + const args_spec_t& args_spec, + std::vector& args + ) const; + /// Get function argument number 'n' (starting at 0) + virtual Value get_arg(MaatEngine& engine, int n, size_t arg_size) const; + /// Set a function's return value before it returns + virtual void set_ret_value( + MaatEngine& engine, + const FunctionCallback::return_t& ret_val + ) const; + /// Return from the syscall + virtual void ret(MaatEngine& engine) const; +}; /** \} */ // doxygen group env } // namespace ABI - /// Emulated function class Function { @@ -417,6 +471,8 @@ namespace emulated Library linux_x86_libc(); /// Return the emulated libc.so for Linux on X64 Library linux_x64_libc(); +/// Return the emulated libc.so for Linux on AARCH64 +Library linux_arm64_libc(); } diff --git a/src/include/maat/env/syscall.hpp b/src/include/maat/env/syscall.hpp index b6fd7bf1..7995fc7c 100644 --- a/src/include/maat/env/syscall.hpp +++ b/src/include/maat/env/syscall.hpp @@ -21,6 +21,8 @@ namespace emulated syscall_func_map_t linux_x86_syscall_map(); /// Return the emulated syscalls for Linux on X64 syscall_func_map_t linux_x64_syscall_map(); +/// Return the emulated syscalls for Linux on AARCH64 +syscall_func_map_t linux_arm64_syscall_map(); } diff --git a/src/include/maat/loader.hpp b/src/include/maat/loader.hpp index 2f9fe3d8..ebadd180 100644 --- a/src/include/maat/loader.hpp +++ b/src/include/maat/loader.hpp @@ -144,7 +144,7 @@ class LoaderLIEF : public Loader private: void parse_binary(const std::string& binary, loader::Format type); void get_arch_special_registers( - const Arch& arch, reg_t& pc, reg_t& sp, reg_t& bp, reg_t& gs, reg_t& fs + const Arch& arch, std::optional& pc, std::optional& sp, std::optional& bp, std::optional& gs, std::optional& fs ); void map_elf_segments(MaatEngine*engine, addr_t base_address); void load_elf_dependencies( diff --git a/src/include/maat/sleigh_interface.hpp b/src/include/maat/sleigh_interface.hpp index d2884524..7e3294bb 100644 --- a/src/include/maat/sleigh_interface.hpp +++ b/src/include/maat/sleigh_interface.hpp @@ -40,6 +40,7 @@ namespace maat inline maat::ir::Param sleigh_reg_translate_X86(const std::string& reg_name); inline maat::ir::Param sleigh_reg_translate_X64(const std::string& reg_name); inline maat::ir::Param sleigh_reg_translate_EVM(const std::string& reg_name); + inline maat::ir::Param sleigh_reg_translate_ARM64(const std::string& reg_name); } diff --git a/src/loader/loader_lief.cpp b/src/loader/loader_lief.cpp index 012eac5b..3904ab85 100644 --- a/src/loader/loader_lief.cpp +++ b/src/loader/loader_lief.cpp @@ -85,7 +85,7 @@ void LoaderLIEF::parse_binary(const std::string& binary, Format type) } void LoaderLIEF::get_arch_special_registers( - const Arch& arch, reg_t& pc, reg_t& sp, reg_t& bp, reg_t& gs, reg_t& fs + const Arch& arch, std::optional& pc, std::optional& sp, std::optional& bp, std::optional& gs, std::optional& fs ) { pc = arch.pc(); @@ -102,6 +102,8 @@ void LoaderLIEF::get_arch_special_registers( gs = X64::GS; fs = X64::FS; break; + case Arch::Type::ARM64: + break; default: throw loader_exception( Fmt() << "LoaderLIEF::get_arch_special_registers(): Unsupported architecture!" diff --git a/src/loader/loader_lief_elf.cpp b/src/loader/loader_lief_elf.cpp index 97d94e3e..486e0b4c 100644 --- a/src/loader/loader_lief_elf.cpp +++ b/src/loader/loader_lief_elf.cpp @@ -212,11 +212,11 @@ void LoaderLIEF::load_elf_using_interpreter( const std::string& interp_path ) { - reg_t reg_sp = -1; - reg_t reg_bp = -1; - reg_t reg_gs = -1; - reg_t reg_fs = -1; - reg_t reg_pc = -1; + std::optional reg_sp = std::nullopt; + std::optional reg_bp = std::nullopt; + std::optional reg_gs = std::nullopt; + std::optional reg_fs = std::nullopt; + std::optional reg_pc = std::nullopt; addr_t stack_base, stack_size, stack_top; // Get particular registers @@ -242,8 +242,10 @@ void LoaderLIEF::load_elf_using_interpreter( stack_size = 0x00200000; stack_top = engine->arch->bits() == 32 ? 0x0c000000 : 0x80000000000; stack_base = alloc_segment(engine, stack_top-stack_size, stack_size, maat::mem_flag_rw, "Stack"); - engine->cpu.ctx().set(reg_sp, stack_base+stack_size-0x400); // - 0x400 to leave some space in memory - engine->cpu.ctx().set(reg_bp, stack_base+stack_size-0x400); + engine->cpu.ctx().set(reg_sp.value(), stack_base+stack_size-0x400); // - 0x400 to leave some space in memory + // For x86 special register 'reg_bp', leave some space in memory + if (reg_bp) + engine->cpu.ctx().set(*reg_bp, stack_base+stack_size-0x400); // Load interpreter load_elf_interpreter(engine, interp_path, *this); @@ -255,7 +257,7 @@ void LoaderLIEF::load_elf_using_interpreter( add_elf_dependencies_to_emulated_fs(engine, libdirs, ignore_libs, virtual_fs); // Point PC to interpreter entrypoint - engine->cpu.ctx().set(reg_pc, interpreter_entry.value()); + engine->cpu.ctx().set(reg_pc.value(), interpreter_entry.value()); } void LoaderLIEF::load_elf_binary( @@ -272,11 +274,11 @@ void LoaderLIEF::load_elf_binary( addr_t stack_base, stack_top, stack_size, heap_base, heap_size; addr_t gs, fs; std::list loaded_libs; - reg_t reg_sp = -1; - reg_t reg_bp = -1; - reg_t reg_gs = -1; - reg_t reg_fs = -1; - reg_t reg_pc = -1; + std::optional reg_sp = std::nullopt; + std::optional reg_bp = std::nullopt; + std::optional reg_gs = std::nullopt; + std::optional reg_fs = std::nullopt; + std::optional reg_pc = std::nullopt; int arch_bytes = engine->arch->octets(); // Get particular registers @@ -292,8 +294,11 @@ void LoaderLIEF::load_elf_binary( stack_size = 0x00200000; stack_top = engine->arch->bits() == 32 ? 0x0c000000 : 0x80000000000; stack_base = alloc_segment(engine, stack_top-stack_size, stack_size, maat::mem_flag_rw, "Stack"); - engine->cpu.ctx().set(reg_sp, stack_base+stack_size-0x400); // - 0x400 to leave some space in memory - engine->cpu.ctx().set(reg_bp, stack_base+stack_size-0x400); + engine->cpu.ctx().set(reg_sp.value(), stack_base+stack_size-0x400); // - 0x400 to leave some space in memory + // For x86 special register 'reg_bp', leave some space in memory + if (reg_bp) + engine->cpu.ctx().set(*reg_bp, stack_base+stack_size-0x400); + // Setup heap heap_base = end_of_segment(*engine->mem, binary_name); @@ -306,12 +311,12 @@ void LoaderLIEF::load_elf_binary( ); // Allocate some segments for GS and FS segment selectors (stack canary stuff) - if (reg_gs != -1) + if (reg_gs && reg_fs) { gs = alloc_segment(engine, 0x00aa0000, 0x1000, maat::mem_flag_rw, "Fake GS: segment"); fs = alloc_segment(engine, 0x00aa0000, 0x1000, maat::mem_flag_rw, "Fake FS: segment"); - engine->cpu.ctx().set(reg_gs, gs); - engine->cpu.ctx().set(reg_fs, fs); + engine->cpu.ctx().set(*reg_gs, gs); + engine->cpu.ctx().set(*reg_fs, fs); } // Preload emulated libraries. We do it before loading dependencies @@ -333,7 +338,7 @@ void LoaderLIEF::load_elf_binary( elf_setup_stack(engine, base, args, envp); // Point PC to program entrypoint - engine->cpu.ctx().set(reg_pc, _elf->entrypoint() + base); + engine->cpu.ctx().set(reg_pc.value(), _elf->entrypoint() + base); } void LoaderLIEF::force_relocation(MaatEngine* engine, addr_t base, const std::string& rel_name, addr_t value) diff --git a/src/third-party/sleigh/native/reg_translator.cpp b/src/third-party/sleigh/native/reg_translator.cpp index bc39c700..126d028b 100644 --- a/src/third-party/sleigh/native/reg_translator.cpp +++ b/src/third-party/sleigh/native/reg_translator.cpp @@ -1320,4 +1320,191 @@ maat::ir::Param sleigh_reg_translate_EVM(const std::string& reg_name) ); } +maat::ir::Param sleigh_reg_translate_ARM64(const std::string& reg_name) +{ + /* General Purpose Registers */ + if (reg_name == "x0") return maat::ir::Reg(maat::ARM64::R0, 63, 0); + if (reg_name == "x1") return maat::ir::Reg(maat::ARM64::R1, 63, 0); + if (reg_name == "x2") return maat::ir::Reg(maat::ARM64::R2, 63, 0); + if (reg_name == "x3") return maat::ir::Reg(maat::ARM64::R3, 63, 0); + if (reg_name == "x4") return maat::ir::Reg(maat::ARM64::R4, 63, 0); + if (reg_name == "x5") return maat::ir::Reg(maat::ARM64::R5, 63, 0); + if (reg_name == "x6") return maat::ir::Reg(maat::ARM64::R6, 63, 0); + if (reg_name == "x7") return maat::ir::Reg(maat::ARM64::R7, 63, 0); + if (reg_name == "x8") return maat::ir::Reg(maat::ARM64::R8, 63, 0); + if (reg_name == "x9") return maat::ir::Reg(maat::ARM64::R9, 63, 0); + if (reg_name == "x10") return maat::ir::Reg(maat::ARM64::R10, 63, 0); + if (reg_name == "x11") return maat::ir::Reg(maat::ARM64::R11, 63, 0); + if (reg_name == "x12") return maat::ir::Reg(maat::ARM64::R12, 63, 0); + if (reg_name == "x13") return maat::ir::Reg(maat::ARM64::R13, 63, 0); + if (reg_name == "x14") return maat::ir::Reg(maat::ARM64::R14, 63, 0); + if (reg_name == "x15") return maat::ir::Reg(maat::ARM64::R15, 63, 0); + if (reg_name == "x16") return maat::ir::Reg(maat::ARM64::R16, 63, 0); + if (reg_name == "x17") return maat::ir::Reg(maat::ARM64::R17, 63, 0); + if (reg_name == "x18") return maat::ir::Reg(maat::ARM64::R18, 63, 0); + if (reg_name == "x19") return maat::ir::Reg(maat::ARM64::R19, 63, 0); + if (reg_name == "x20") return maat::ir::Reg(maat::ARM64::R20, 63, 0); + if (reg_name == "x21") return maat::ir::Reg(maat::ARM64::R21, 63, 0); + if (reg_name == "x22") return maat::ir::Reg(maat::ARM64::R22, 63, 0); + if (reg_name == "x23") return maat::ir::Reg(maat::ARM64::R23, 63, 0); + if (reg_name == "x24") return maat::ir::Reg(maat::ARM64::R24, 63, 0); + if (reg_name == "x25") return maat::ir::Reg(maat::ARM64::R25, 63, 0); + if (reg_name == "x26") return maat::ir::Reg(maat::ARM64::R26, 63, 0); + if (reg_name == "x27") return maat::ir::Reg(maat::ARM64::R27, 63, 0); + if (reg_name == "x28") return maat::ir::Reg(maat::ARM64::R28, 63, 0); + if (reg_name == "x29") return maat::ir::Reg(maat::ARM64::R29, 63, 0); + if (reg_name == "x30") return maat::ir::Reg(maat::ARM64::R30, 63, 0); + /* 32-bit masked GP Registers */ + if (reg_name == "w0") return maat::ir::Reg(maat::ARM64::R0, 31, 0); + if (reg_name == "w1") return maat::ir::Reg(maat::ARM64::R1, 31, 0); + if (reg_name == "w2") return maat::ir::Reg(maat::ARM64::R2, 31, 0); + if (reg_name == "w3") return maat::ir::Reg(maat::ARM64::R3, 31, 0); + if (reg_name == "w4") return maat::ir::Reg(maat::ARM64::R4, 31, 0); + if (reg_name == "w5") return maat::ir::Reg(maat::ARM64::R5, 31, 0); + if (reg_name == "w6") return maat::ir::Reg(maat::ARM64::R6, 31, 0); + if (reg_name == "w7") return maat::ir::Reg(maat::ARM64::R7, 31, 0); + if (reg_name == "w8") return maat::ir::Reg(maat::ARM64::R8, 31, 0); + if (reg_name == "w9") return maat::ir::Reg(maat::ARM64::R9, 31, 0); + if (reg_name == "w10") return maat::ir::Reg(maat::ARM64::R10, 31, 0); + if (reg_name == "w11") return maat::ir::Reg(maat::ARM64::R11, 31, 0); + if (reg_name == "w12") return maat::ir::Reg(maat::ARM64::R12, 31, 0); + if (reg_name == "w13") return maat::ir::Reg(maat::ARM64::R13, 31, 0); + if (reg_name == "w14") return maat::ir::Reg(maat::ARM64::R14, 31, 0); + if (reg_name == "w15") return maat::ir::Reg(maat::ARM64::R15, 31, 0); + if (reg_name == "w16") return maat::ir::Reg(maat::ARM64::R16, 31, 0); + if (reg_name == "w17") return maat::ir::Reg(maat::ARM64::R17, 31, 0); + if (reg_name == "w18") return maat::ir::Reg(maat::ARM64::R18, 31, 0); + if (reg_name == "w19") return maat::ir::Reg(maat::ARM64::R19, 31, 0); + if (reg_name == "w20") return maat::ir::Reg(maat::ARM64::R20, 31, 0); + if (reg_name == "w21") return maat::ir::Reg(maat::ARM64::R21, 31, 0); + if (reg_name == "w22") return maat::ir::Reg(maat::ARM64::R22, 31, 0); + if (reg_name == "w23") return maat::ir::Reg(maat::ARM64::R23, 31, 0); + if (reg_name == "w24") return maat::ir::Reg(maat::ARM64::R24, 31, 0); + if (reg_name == "w25") return maat::ir::Reg(maat::ARM64::R25, 31, 0); + if (reg_name == "w26") return maat::ir::Reg(maat::ARM64::R26, 31, 0); + if (reg_name == "w27") return maat::ir::Reg(maat::ARM64::R27, 31, 0); + if (reg_name == "w28") return maat::ir::Reg(maat::ARM64::R28, 31, 0); + if (reg_name == "w29") return maat::ir::Reg(maat::ARM64::R29, 31, 0); + if (reg_name == "w30") return maat::ir::Reg(maat::ARM64::R30, 31, 0); + /* Floating Point Registers */ + if (reg_name == "q0") return maat::ir::Reg(maat::ARM64::V0, 127, 0); + if (reg_name == "q1") return maat::ir::Reg(maat::ARM64::V1, 127, 0); + if (reg_name == "q2") return maat::ir::Reg(maat::ARM64::V2, 127, 0); + if (reg_name == "q3") return maat::ir::Reg(maat::ARM64::V3, 127, 0); + if (reg_name == "q4") return maat::ir::Reg(maat::ARM64::V4, 127, 0); + if (reg_name == "q5") return maat::ir::Reg(maat::ARM64::V5, 127, 0); + if (reg_name == "q6") return maat::ir::Reg(maat::ARM64::V6, 127, 0); + if (reg_name == "q7") return maat::ir::Reg(maat::ARM64::V7, 127, 0); + if (reg_name == "q8") return maat::ir::Reg(maat::ARM64::V8, 127, 0); + if (reg_name == "q9") return maat::ir::Reg(maat::ARM64::V9, 127, 0); + if (reg_name == "q10") return maat::ir::Reg(maat::ARM64::V10, 127, 0); + if (reg_name == "q11") return maat::ir::Reg(maat::ARM64::V11, 127, 0); + if (reg_name == "q12") return maat::ir::Reg(maat::ARM64::V12, 127, 0); + if (reg_name == "q13") return maat::ir::Reg(maat::ARM64::V13, 127, 0); + if (reg_name == "q14") return maat::ir::Reg(maat::ARM64::V14, 127, 0); + if (reg_name == "q15") return maat::ir::Reg(maat::ARM64::V15, 127, 0); + if (reg_name == "q16") return maat::ir::Reg(maat::ARM64::V16, 127, 0); + if (reg_name == "q17") return maat::ir::Reg(maat::ARM64::V17, 127, 0); + if (reg_name == "q18") return maat::ir::Reg(maat::ARM64::V18, 127, 0); + if (reg_name == "q19") return maat::ir::Reg(maat::ARM64::V19, 127, 0); + if (reg_name == "q20") return maat::ir::Reg(maat::ARM64::V20, 127, 0); + if (reg_name == "q21") return maat::ir::Reg(maat::ARM64::V21, 127, 0); + if (reg_name == "q22") return maat::ir::Reg(maat::ARM64::V22, 127, 0); + if (reg_name == "q23") return maat::ir::Reg(maat::ARM64::V23, 127, 0); + if (reg_name == "q24") return maat::ir::Reg(maat::ARM64::V24, 127, 0); + if (reg_name == "q25") return maat::ir::Reg(maat::ARM64::V25, 127, 0); + if (reg_name == "q26") return maat::ir::Reg(maat::ARM64::V26, 127, 0); + if (reg_name == "q27") return maat::ir::Reg(maat::ARM64::V27, 127, 0); + if (reg_name == "q28") return maat::ir::Reg(maat::ARM64::V28, 127, 0); + if (reg_name == "q29") return maat::ir::Reg(maat::ARM64::V29, 127, 0); + if (reg_name == "q30") return maat::ir::Reg(maat::ARM64::V30, 127, 0); + if (reg_name == "q31") return maat::ir::Reg(maat::ARM64::V31, 127, 0); + /* 64-bit masked FP Registers*/ + if (reg_name == "d0") return maat::ir::Reg(maat::ARM64::V0, 63, 0); + if (reg_name == "d1") return maat::ir::Reg(maat::ARM64::V1, 63, 0); + if (reg_name == "d2") return maat::ir::Reg(maat::ARM64::V2, 63, 0); + if (reg_name == "d3") return maat::ir::Reg(maat::ARM64::V3, 63, 0); + if (reg_name == "d4") return maat::ir::Reg(maat::ARM64::V4, 63, 0); + if (reg_name == "d5") return maat::ir::Reg(maat::ARM64::V5, 63, 0); + if (reg_name == "d6") return maat::ir::Reg(maat::ARM64::V6, 63, 0); + if (reg_name == "d7") return maat::ir::Reg(maat::ARM64::V7, 63, 0); + if (reg_name == "d8") return maat::ir::Reg(maat::ARM64::V8, 63, 0); + if (reg_name == "d9") return maat::ir::Reg(maat::ARM64::V9, 63, 0); + if (reg_name == "d10") return maat::ir::Reg(maat::ARM64::V10, 63, 0); + if (reg_name == "d11") return maat::ir::Reg(maat::ARM64::V11, 63, 0); + if (reg_name == "d12") return maat::ir::Reg(maat::ARM64::V12, 63, 0); + if (reg_name == "d13") return maat::ir::Reg(maat::ARM64::V13, 63, 0); + if (reg_name == "d14") return maat::ir::Reg(maat::ARM64::V14, 63, 0); + if (reg_name == "d15") return maat::ir::Reg(maat::ARM64::V15, 63, 0); + if (reg_name == "d16") return maat::ir::Reg(maat::ARM64::V16, 63, 0); + if (reg_name == "d17") return maat::ir::Reg(maat::ARM64::V17, 63, 0); + if (reg_name == "d18") return maat::ir::Reg(maat::ARM64::V18, 63, 0); + if (reg_name == "d19") return maat::ir::Reg(maat::ARM64::V19, 63, 0); + if (reg_name == "d20") return maat::ir::Reg(maat::ARM64::V20, 63, 0); + if (reg_name == "d21") return maat::ir::Reg(maat::ARM64::V21, 63, 0); + if (reg_name == "d22") return maat::ir::Reg(maat::ARM64::V22, 63, 0); + if (reg_name == "d23") return maat::ir::Reg(maat::ARM64::V23, 63, 0); + if (reg_name == "d24") return maat::ir::Reg(maat::ARM64::V24, 63, 0); + if (reg_name == "d25") return maat::ir::Reg(maat::ARM64::V25, 63, 0); + if (reg_name == "d26") return maat::ir::Reg(maat::ARM64::V26, 63, 0); + if (reg_name == "d27") return maat::ir::Reg(maat::ARM64::V27, 63, 0); + if (reg_name == "d28") return maat::ir::Reg(maat::ARM64::V28, 63, 0); + if (reg_name == "d29") return maat::ir::Reg(maat::ARM64::V29, 63, 0); + if (reg_name == "d30") return maat::ir::Reg(maat::ARM64::V30, 63, 0); + if (reg_name == "d31") return maat::ir::Reg(maat::ARM64::V31, 63, 0); + /* Scalable Vector Extension Registers */ + if (reg_name == "z0") return maat::ir::Reg(maat::ARM64::V0, 127, 0); + if (reg_name == "z1") return maat::ir::Reg(maat::ARM64::V1, 127, 0); + if (reg_name == "z2") return maat::ir::Reg(maat::ARM64::V2, 127, 0); + if (reg_name == "z3") return maat::ir::Reg(maat::ARM64::V3, 127, 0); + if (reg_name == "z4") return maat::ir::Reg(maat::ARM64::V4, 127, 0); + if (reg_name == "z5") return maat::ir::Reg(maat::ARM64::V5, 127, 0); + if (reg_name == "z6") return maat::ir::Reg(maat::ARM64::V6, 127, 0); + if (reg_name == "z7") return maat::ir::Reg(maat::ARM64::V7, 127, 0); + if (reg_name == "z8") return maat::ir::Reg(maat::ARM64::V8, 127, 0); + if (reg_name == "z9") return maat::ir::Reg(maat::ARM64::V9, 127, 0); + if (reg_name == "z10") return maat::ir::Reg(maat::ARM64::V10, 127, 0); + if (reg_name == "z11") return maat::ir::Reg(maat::ARM64::V11, 127, 0); + if (reg_name == "z12") return maat::ir::Reg(maat::ARM64::V12, 127, 0); + if (reg_name == "z13") return maat::ir::Reg(maat::ARM64::V13, 127, 0); + if (reg_name == "z14") return maat::ir::Reg(maat::ARM64::V14, 127, 0); + if (reg_name == "z15") return maat::ir::Reg(maat::ARM64::V15, 127, 0); + if (reg_name == "z16") return maat::ir::Reg(maat::ARM64::V16, 127, 0); + if (reg_name == "z17") return maat::ir::Reg(maat::ARM64::V17, 127, 0); + if (reg_name == "z18") return maat::ir::Reg(maat::ARM64::V18, 127, 0); + if (reg_name == "z19") return maat::ir::Reg(maat::ARM64::V19, 127, 0); + if (reg_name == "z20") return maat::ir::Reg(maat::ARM64::V20, 127, 0); + if (reg_name == "z21") return maat::ir::Reg(maat::ARM64::V21, 127, 0); + if (reg_name == "z22") return maat::ir::Reg(maat::ARM64::V22, 127, 0); + if (reg_name == "z23") return maat::ir::Reg(maat::ARM64::V23, 127, 0); + if (reg_name == "z24") return maat::ir::Reg(maat::ARM64::V24, 127, 0); + if (reg_name == "z25") return maat::ir::Reg(maat::ARM64::V25, 127, 0); + if (reg_name == "z26") return maat::ir::Reg(maat::ARM64::V26, 127, 0); + if (reg_name == "z27") return maat::ir::Reg(maat::ARM64::V27, 127, 0); + if (reg_name == "z28") return maat::ir::Reg(maat::ARM64::V28, 127, 0); + if (reg_name == "z29") return maat::ir::Reg(maat::ARM64::V29, 127, 0); + if (reg_name == "z30") return maat::ir::Reg(maat::ARM64::V30, 127, 0); + if (reg_name == "z31") return maat::ir::Reg(maat::ARM64::V31, 127, 0); + if (reg_name == "pc") return maat::ir::Reg(maat::ARM64::PC, 63, 0); + if (reg_name == "sp") return maat::ir::Reg(maat::ARM64::SP, 63, 0); + if (reg_name == "wsp") return maat::ir::Reg(maat::ARM64::SP, 31, 0); + /* Conditional Flags */ + if (reg_name == "NG") return maat::ir::Reg(maat::ARM64::NF, 8); + if (reg_name == "ZR") return maat::ir::Reg(maat::ARM64::ZF, 8); + if (reg_name == "CY") return maat::ir::Reg(maat::ARM64::CF, 8); + if (reg_name == "OV") return maat::ir::Reg(maat::ARM64::VF, 8); + /* Temp Conditional Flags */ + if (reg_name == "tmpNG") return maat::ir::Reg(maat::ARM64::NF, 8); + if (reg_name == "tmpZR") return maat::ir::Reg(maat::ARM64::ZF, 8); + if (reg_name == "tmpCY") return maat::ir::Reg(maat::ARM64::CF, 8); + if (reg_name == "tmpOV") return maat::ir::Reg(maat::ARM64::VF, 8); + + + throw maat::runtime_exception(maat::Fmt() + << "ARM64: Register translation from SLEIGH to MAAT missing for register " + << reg_name + >> maat::Fmt::to_str + ); +} + } // namespace maat \ No newline at end of file diff --git a/src/third-party/sleigh/native/sleigh_interface.cpp b/src/third-party/sleigh/native/sleigh_interface.cpp index e31b5702..71474e8c 100644 --- a/src/third-party/sleigh/native/sleigh_interface.cpp +++ b/src/third-party/sleigh/native/sleigh_interface.cpp @@ -630,6 +630,8 @@ maat::ir::Param reg_name_to_maat_reg(maat::Arch::Type arch, const std::string& r return sleigh_reg_translate_X64(reg_name); else if (arch == Arch::Type::EVM) return sleigh_reg_translate_EVM(reg_name); + else if (arch == Arch::Type::ARM64) + return sleigh_reg_translate_ARM64(reg_name); else throw maat::runtime_exception("Register translation from SLEIGH to MAAT not implemented for this architecture!"); } diff --git a/src/third-party/sleigh/processors/AARCH64/data/aarch64-pltThunks.xml b/src/third-party/sleigh/processors/AARCH64/data/aarch64-pltThunks.xml new file mode 100644 index 00000000..0525c65a --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/aarch64-pltThunks.xml @@ -0,0 +1,15 @@ + + + + + ...10000 0x.. 0x.. 1..10000 # adrp x16, PLTGOT + n * 8 + 0x11 ......10 01...... 0xf9 # ldr x17, [x16, PLTGOT + n * 8] + 0x10 ......10 00...... 0x91 # add x16, x16, :lo12:PLTGOT + n * 8 + 0x20 0x02 0x1f 0xd6 # br x17 + + + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.cspec b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.cspec new file mode 100644 index 00000000..06aed80b --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.cspec @@ -0,0 +1,200 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.dwarf b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.dwarf new file mode 100644 index 00000000..c3ba26e5 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.dwarf @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.ldefs b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.ldefs new file mode 100644 index 00000000..d3e6477d --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.ldefs @@ -0,0 +1,35 @@ + + + + Generic ARM v8.5-A LE instructions, LE data, missing some 8.5 vector + + + + + + + + Generic ARM v8.5-A LE instructions, BE data, missing some 8.5 vector + + + + + + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.opinion b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.opinion new file mode 100644 index 00000000..40fd8d6a --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.opinion @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.pspec b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.pspec new file mode 100644 index 00000000..47aeecdb --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.pspec @@ -0,0 +1,101 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.slaspec b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.slaspec new file mode 100644 index 00000000..12cedc1b --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64.slaspec @@ -0,0 +1,5 @@ + +@define DATA_ENDIAN "little" + +@include "AARCH64instructions.sinc" + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64BE.slaspec b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64BE.slaspec new file mode 100644 index 00000000..e1f3970d --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64BE.slaspec @@ -0,0 +1,5 @@ + +@define DATA_ENDIAN "big" + +@include "AARCH64instructions.sinc" + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_AMXext.sinc b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_AMXext.sinc new file mode 100644 index 00000000..2082b6fa --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_AMXext.sinc @@ -0,0 +1,171 @@ +# +# Apple AARCH64 extended matrix instructions +# Contents based on evolving information published on Web +# +# + +define pcodeop __amx_ldx; +define pcodeop __amx_ldy; +define pcodeop __amx_stx; +define pcodeop __amx_sty; +define pcodeop __amx_ldz; +define pcodeop __amx_stz; +define pcodeop __amx_ldzi; +define pcodeop __amx_stzi; +define pcodeop __amx_extrx; +define pcodeop __amx_extry; +define pcodeop __amx_fma64; +define pcodeop __amx_fms64; +define pcodeop __amx_fma32; +define pcodeop __amx_fms32; +define pcodeop __amx_mac16; +define pcodeop __amx_fma16; +define pcodeop __amx_fms16; +define pcodeop __amx_enable; +define pcodeop __amx_disable; +define pcodeop __amx_vecint; +define pcodeop __amx_vecfp; +define pcodeop __amx_matint; +define pcodeop __amx_matfp; +define pcodeop __amx_genlut; + + +with : ImmS_ImmR_TestSet=1 { + +AMXAddr: is Rd_GPR64 { + addr:8 = Rd_GPR64 & 0x00FFFFFFFFFFFFFF; + export addr; +} + +AMXRegOff: is Rd_GPR64 { + registerOff:8 = (Rd_GPR64 >> 56) & 0x1F; + export registerOff; +} + +AMXSize: is Rd_GPR64 { + local size = ((Rd_GPR64 >> 62) & 1); + size = zext(size == 0) * 0x40 | zext(size ==1 ) * 0x80; + export size; +} + +:__amx_ldx Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=0 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 +{ + __amx_ldx(Rd_GPR64); +} + +:__amx_ldy Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=1 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 +{ + __amx_ldy(Rd_GPR64); +} + +:__amx_stx Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=2 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 +{ + __amx_stx(Rd_GPR64); +} + +:__amx_sty Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=3 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 +{ + __amx_sty(Rd_GPR64); +} + +:__amx_ldz Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=4 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 +{ + __amx_ldz(Rd_GPR64); +} + +:__amx_stz Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=5 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 +{ + __amx_stz(Rd_GPR64); +} + +:__amx_ldzi Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=6 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 +{ + __amx_ldzi(Rd_GPR64); +} + +:__amx_stzi Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=7 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 +{ + __amx_stzi(Rd_GPR64); +} + +:__amx_extrx Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=8 & Rd_GPR64 +{ + __amx_extrx(Rd_GPR64); +} + +:__amx_extry Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=9 & Rd_GPR64 +{ + __amx_extry(Rd_GPR64); +} + +:__amx_fma64 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=10 & Rd_GPR64 +{ + __amx_fma64(Rd_GPR64); +} + +:__amx_fms64 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=11 & Rd_GPR64 +{ + __amx_fms64(Rd_GPR64); +} + +:__amx_fma32 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=12 & Rd_GPR64 +{ + __amx_fma32(Rd_GPR64); +} + +:__amx_fms32 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=13 & Rd_GPR64 +{ + __amx_fms32(Rd_GPR64); +} + +:__amx_mac16 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=14 & Rd_GPR64 +{ + __amx_mac16(Rd_GPR64); +} + +:__amx_fma16 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=15 & Rd_GPR64 +{ + __amx_fma16(Rd_GPR64); +} + +:__amx_fms16 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=16 & Rd_GPR64 +{ + __amx_fms16(Rd_GPR64); +} + +:__amxdisable is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=17 & b_0004=1 +{ + __amx_disable(); +} + +:__amxenable is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=17 & b_0004=0 +{ + __amx_enable(); +} + +:__amx_vecint Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=18 & Rd_GPR64 +{ + __amx_vecint(Rd_GPR64); +} + +:__amx_vecfp Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=19 & Rd_GPR64 +{ + __amx_vecfp(Rd_GPR64); +} + +:__amx_matint Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=20 & Rd_GPR64 +{ + __amx_matint(Rd_GPR64); +} + +:__amx_matfp Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=21 & Rd_GPR64 +{ + __amx_matfp(Rd_GPR64); +} + +:__amx_genlut Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=22 & Rd_GPR64 +{ + __amx_genlut(Rd_GPR64); +} + +} \ No newline at end of file diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_AppleSilicon.slaspec b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_AppleSilicon.slaspec new file mode 100644 index 00000000..f1290bbd --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_AppleSilicon.slaspec @@ -0,0 +1,6 @@ + +@define DATA_ENDIAN "little" + +@include "AARCH64instructions.sinc" +@include "AARCH64_AMXext.sinc" + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_base_PACoptions.sinc b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_base_PACoptions.sinc new file mode 100644 index 00000000..fcefee05 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_base_PACoptions.sinc @@ -0,0 +1,164 @@ +autda__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = AuthDA(Rd_GPR64, Rn_GPR64xsp); } +autda__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { AuthDA(Rd_GPR64, Rn_GPR64xsp); } +autda__PACpart: "hide" is ShowPAC=0 { } + +autdza__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = AuthDA(Rd_GPR64, xzr); } +autdza__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { AuthDA(Rd_GPR64, xzr); } +autdza__PACpart: "hide" is ShowPAC=0 { } + +autdb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = AuthDB(Rd_GPR64, Rn_GPR64xsp); } +autdb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { AuthDB(Rd_GPR64, Rn_GPR64xsp); } +autdb__PACpart: "hide" is ShowPAC=0 { } + +autdzb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = AuthDB(Rd_GPR64, xzr); } +autdzb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { AuthDB(Rd_GPR64, xzr); } +autdzb__PACpart: "hide" is ShowPAC=0 { } + +autia__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = AuthIA(Rd_GPR64, Rn_GPR64xsp); } +autia__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { AuthIA(Rd_GPR64, Rn_GPR64xsp); } +autia__PACpart: "hide" is ShowPAC=0 { } + +autiza__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = AuthIA(Rd_GPR64, xzr); } +autiza__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { AuthIA(Rd_GPR64, xzr); } +autiza__PACpart: "hide" is ShowPAC=0 { } + +autia1716__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x17 = AuthIA(x17, x16); } +autia1716__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIA(x17, x16); } +autia1716__PACpart: "hide" is ShowPAC=0 { } + +autiasp__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = AuthIA(x30, sp); } +autiasp__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIA(x30, sp); } +autiasp__PACpart: "hide" is ShowPAC=0 { } + +autiaz__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = AuthIA(x30, xzr); } +autiaz__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIA(x30, xzr); } +autiaz__PACpart: "hide" is ShowPAC=0 { } + +autib__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = AuthIB(Rd_GPR64, Rn_GPR64xsp); } +autib__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { AuthIB(Rd_GPR64, Rn_GPR64xsp); } +autib__PACpart: "hide" is ShowPAC=0 { } + +autizb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = AuthIB(Rd_GPR64, xzr); } +autizb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { AuthIB(Rd_GPR64, xzr); } +autizb__PACpart: "hide" is ShowPAC=0 { } + +autib1716__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x17 = AuthIB(x17, x16); } +autib1716__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIB(x17, x16); } +autib1716__PACpart: "hide" is ShowPAC=0 { } + +autibsp__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = AuthIB(x30, sp); } +autibsp__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIB(x30, sp); } +autibsp__PACpart: "hide" is ShowPAC=0 { } + +autibz__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = AuthIB(x30, xzr); } +autibz__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIB(x30, xzr); } +autibz__PACpart: "hide" is ShowPAC=0 { } + +b_blinkop__raaz___PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64 { AuthIA(Rn_GPR64, xzr); } +b_blinkop__raaz___PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64 { AuthIA(Rn_GPR64, xzr); } +b_blinkop__raaz___PACpart: "hide" is ShowPAC=0 { } + +b_blinkop__raa___PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64xsp & Rn_GPR64 { AuthIA(Rn_GPR64, Rd_GPR64xsp); } +b_blinkop__raa___PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64xsp & Rn_GPR64 { AuthIA(Rn_GPR64, Rd_GPR64xsp); } +b_blinkop__raa___PACpart: "hide" is ShowPAC=0 { } + +b_blinkop__rabz___PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64 { AuthIB(Rn_GPR64, xzr); } +b_blinkop__rabz___PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64 { AuthIB(Rn_GPR64, xzr); } +b_blinkop__rabz___PACpart: "hide" is ShowPAC=0 { } + +b_blinkop__rab___PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64xsp & Rn_GPR64 { AuthIB(Rn_GPR64, Rd_GPR64xsp); } +b_blinkop__rab___PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64xsp & Rn_GPR64 { AuthIB(Rn_GPR64, Rd_GPR64xsp); } +b_blinkop__rab___PACpart: "hide" is ShowPAC=0 { } + +eretaa__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { AuthIA(pc, sp); } +eretaa__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIA(pc, sp); } +eretaa__PACpart: "hide" is ShowPAC=0 { } + +eretab__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { AuthIB(pc, sp); } +eretab__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIB(pc, sp); } +eretab__PACpart: "hide" is ShowPAC=0 { } + +ldraa__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp { AuthDA(Rn_GPR64xsp, xzr); } +ldraa__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp { AuthDA(Rn_GPR64xsp, xzr); } +ldraa__PACpart: "hide" is ShowPAC=0 { } + +ldrab__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp { AuthDB(Rn_GPR64xsp, xzr); } +ldrab__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp { AuthDB(Rn_GPR64xsp, xzr); } +ldrab__PACpart: "hide" is ShowPAC=0 { } + +pacda__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = pacda(Rd_GPR64, Rn_GPR64xsp); } +pacda__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { pacda(Rd_GPR64, Rn_GPR64xsp); } +pacda__PACpart: "hide" is ShowPAC=0 { } + +pacdza__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = pacdza(Rd_GPR64); } +pacdza__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { pacdza(Rd_GPR64); } +pacdza__PACpart: "hide" is ShowPAC=0 { } + +pacdb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = pacdb(Rd_GPR64, Rn_GPR64xsp); } +pacdb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { pacdb(Rd_GPR64, Rn_GPR64xsp); } +pacdb__PACpart: "hide" is ShowPAC=0 { } + +pacdzb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = pacdzb(Rd_GPR64); } +pacdzb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { pacdzb(Rd_GPR64); } +pacdzb__PACpart: "hide" is ShowPAC=0 { } + +pacia__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = pacia(Rd_GPR64, Rn_GPR64xsp); } +pacia__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { pacia(Rd_GPR64, Rn_GPR64xsp); } +pacia__PACpart: "hide" is ShowPAC=0 { } + +paciza__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = paciza(Rd_GPR64); } +paciza__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { paciza(Rd_GPR64); } +paciza__PACpart: "hide" is ShowPAC=0 { } + +pacia1716__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x17 = pacia(x17, x16); } +pacia1716__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { pacia(x17, x16); } +pacia1716__PACpart: "hide" is ShowPAC=0 { } + +paciasp__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = pacia(x30, sp); } +paciasp__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { pacia(x30, sp); } +paciasp__PACpart: "hide" is ShowPAC=0 { } + +paciaz__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = paciza(x30); } +paciaz__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { paciza(x30); } +paciaz__PACpart: "hide" is ShowPAC=0 { } + +pacib__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = pacib(Rd_GPR64, Rn_GPR64xsp); } +pacib__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { pacib(Rd_GPR64, Rn_GPR64xsp); } +pacib__PACpart: "hide" is ShowPAC=0 { } + +pacizb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = pacizb(Rd_GPR64); } +pacizb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { pacizb(Rd_GPR64); } +pacizb__PACpart: "hide" is ShowPAC=0 { } + +pacib1716__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x17 = pacib(x17, x16); } +pacib1716__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { pacib(x17, x16); } +pacib1716__PACpart: "hide" is ShowPAC=0 { } + +pacibsp__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = pacib(x30, sp); } +pacibsp__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { pacib(x30, sp); } +pacibsp__PACpart: "hide" is ShowPAC=0 { } + +pacibz__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = pacizb(x30); } +pacibz__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { pacizb(x30); } +pacibz__PACpart: "hide" is ShowPAC=0 { } + +retaa__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { AuthIA(x30, sp); } +retaa__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIA(x30, sp); } +retaa__PACpart: "hide" is ShowPAC=0 { } + +retab__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { AuthIB(x30, sp); } +retab__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIB(x30, sp); } +retab__PACpart: "hide" is ShowPAC=0 { } + +xpacd__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = xpac(Rd_GPR64, 1:1); } +xpacd__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { xpac(Rd_GPR64, 1:1); } +xpacd__PACpart: "hide" is ShowPAC=0 { } + +xpaci__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = xpac(Rd_GPR64, 0:1); } +xpaci__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { xpac(Rd_GPR64, 0:1); } +xpaci__PACpart: "hide" is ShowPAC=0 { } + +xpaclri__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = xpac(x30, 0:1); } +xpaclri__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { xpac(x30, 0:1); } +xpaclri__PACpart: "hide" is ShowPAC=0 { } + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_win.cspec b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_win.cspec new file mode 100644 index 00000000..156fc99d --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64_win.cspec @@ -0,0 +1,196 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64base.sinc b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64base.sinc new file mode 100644 index 00000000..ca764510 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64base.sinc @@ -0,0 +1,8134 @@ +# C6.2.1 ADC page C6-772 line 43573 MATCH x1a000000/mask=x7fe0fc00 +# C6.2.2 ADCS page C6-774 line 43659 MATCH x3a000000/mask=x7fe0fc00 +# CONSTRUCT x1a000000/mask=xdfe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1a000000/mask=xdfe0fc00 --status pass --comment "flags" + +:adc^SBIT_CZNO Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & b_30=0 & S & SBIT_CZNO & b_2428=0x1a & b_2123=0 & Rm_GPR32 & b_1015=0 & Rd_GPR32 & Rd_GPR64 & Rn_GPR32 +{ + add_with_carry_flags(Rn_GPR32, Rm_GPR32); + tmp:4 = Rm_GPR32 + Rn_GPR32 + zext(CY); + Rd_GPR64 = zext(tmp); + resultflags(tmp); + build SBIT_CZNO; +} + +# C6.2.1 ADC page C6-772 line 43573 MATCH x1a000000/mask=x7fe0fc00 +# C6.2.2 ADCS page C6-774 line 43659 MATCH x3a000000/mask=x7fe0fc00 +# CONSTRUCT x9a000000/mask=xdfe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9a000000/mask=xdfe0fc00 --status pass --comment "flags" + +:adc^SBIT_CZNO Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & b_30=0 & S & SBIT_CZNO & b_2428=0x1a & b_2123=0 & Rm_GPR64 & b_1015=0 & Rd_GPR64 & Rn_GPR64 +{ + add_with_carry_flags(Rn_GPR64, Rm_GPR64); + Rd_GPR64 = Rn_GPR64 + Rm_GPR64 + zext(CY); + resultflags(Rd_GPR64); + build SBIT_CZNO; +} + +# C6.2.3 ADD (extended register) page C6-776 line 43748 MATCH x0b200000/mask=x7fe00000 +# C6.2.7 ADDS (extended register) page C6-784 line 44172 MATCH x2b200000/mask=x7fe00000 +# C6.2.57 CMN (extended register) page C6-869 line 48602 MATCH x2b20001f/mask=x7fe0001f +# CONSTRUCT x0b200000/mask=xdfe00000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x0b200000/mask=xdfe00000 --status pass --comment "flags" + +:add^SBIT_CZNO Rd_GPR32wsp, Rn_GPR32wsp, ExtendRegShift32 +is sf=0 & op=0 & S & SBIT_CZNO & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift32 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp +{ + tmp_2:4 = ExtendRegShift32; + addflags(Rn_GPR32wsp, tmp_2); + tmp_1:4 = Rn_GPR32wsp + tmp_2; + resultflags(tmp_1); + Rd_GPR64xsp = zext(tmp_1); + build SBIT_CZNO; +} + +# C6.2.3 ADD (extended register) page C6-776 line 43748 MATCH x0b200000/mask=x7fe00000 +# C6.2.7 ADDS (extended register) page C6-784 line 44172 MATCH x2b200000/mask=x7fe00000 +# C6.2.57 CMN (extended register) page C6-869 line 48602 MATCH x2b20001f/mask=x7fe0001f +# CONSTRUCT x8b200000/mask=xdfe00000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x8b200000/mask=xdfe00000 --status pass --comment "flags" + +:add^SBIT_CZNO Rd_GPR64xsp, Rn_GPR64xsp, ExtendRegShift64 +is sf=1 & op=0 & S & SBIT_CZNO & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift64 & Rn_GPR64xsp & Rd_GPR64xsp +{ + tmp_2:8 = ExtendRegShift64; + addflags(Rn_GPR64xsp, tmp_2); + tmp_1:8 = Rn_GPR64xsp + tmp_2; + resultflags(tmp_1); + Rd_GPR64xsp = tmp_1; + build SBIT_CZNO; +} + +# C6.2.4 ADD (immediate) page C6-779 line 43893 MATCH x11000000/mask=x7f800000 +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# C6.2.185 MOV (to/from SP) page C6-1110 line 62111 MATCH x11000000/mask=x7ffffc00 +# CONSTRUCT x11000000/mask=xdf000000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x11000000/mask=xdf000000 --status pass --comment "flags" + +:add^SBIT_CZNO Rd_GPR32xsp, Rn_GPR32xsp, ImmShift32 +is sf=0 & b_30=0 & S & SBIT_CZNO & b_2428=0x011 & ImmShift32 & Rn_GPR32xsp & Rd_GPR32xsp & Rd_GPR64xsp +{ + addflags(Rn_GPR32xsp, ImmShift32); + tmp:4 = Rn_GPR32xsp + ImmShift32; + resultflags(tmp); + build SBIT_CZNO; + Rd_GPR64xsp = zext(tmp); +} + +# C6.2.4 ADD (immediate) page C6-779 line 43893 MATCH x11000000/mask=x7f800000 +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# C6.2.185 MOV (to/from SP) page C6-1110 line 62111 MATCH x11000000/mask=x7ffffc00 +# CONSTRUCT x91000000/mask=xdf000000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x91000000/mask=xdf000000 --status pass --comment "flags" + +:add^SBIT_CZNO Rd_GPR64xsp, Rn_GPR64xsp, ImmShift64 +is sf=1 & b_30=0 & S & SBIT_CZNO & b_2428=0x11 & ImmShift64 & Rn_GPR64xsp & Rd_GPR64xsp +{ + addflags(Rn_GPR64xsp, ImmShift64); + Rd_GPR64xsp = Rn_GPR64xsp + ImmShift64; + resultflags(Rd_GPR64xsp); + build SBIT_CZNO; +} + +# C6.2.4 ADD (immediate) page C6-779 line 43893 MATCH x11000000/mask=x7f800000 +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# C6.2.185 MOV (to/from SP) page C6-1110 line 62111 MATCH x11000000/mask=x7ffffc00 +# CONSTRUCT x11000000/mask=xdfc00000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x11000000/mask=xdfc00000 --status pass --comment "flags" + +:add^SBIT_CZNO Rd_GPR32wsp, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_posimm_lsl0 +is sf=0 & op=0 & S & SBIT_CZNO & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i32_posimm_lsl0 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp +{ + tmp_2:4 = Imm12_addsubimm_operand_i32_posimm_lsl0; + addflags(Rn_GPR32wsp, tmp_2); + tmp_1:4 = Rn_GPR32wsp + tmp_2; + resultflags(tmp_1); + Rd_GPR64xsp = zext(tmp_1); + build SBIT_CZNO; +} + +# C6.2.4 ADD (immediate) page C6-779 line 43893 MATCH x11000000/mask=x7f800000 +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# CONSTRUCT x11400000/mask=xdfc00000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x11400000/mask=xdfc00000 --status pass --comment "flags" + +:add^SBIT_CZNO Rd_GPR32wsp, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_posimm_lsl12 +is sf=0 & op=0 & S & SBIT_CZNO & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i32_posimm_lsl12 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp +{ + tmp_2:4 = Imm12_addsubimm_operand_i32_posimm_lsl12; + addflags(Rn_GPR32wsp, tmp_2); + tmp_1:4 = Rn_GPR32wsp + tmp_2; + resultflags(tmp_1); + Rd_GPR64xsp = zext(tmp_1); + build SBIT_CZNO; +} + +# C6.2.4 ADD (immediate) page C6-779 line 43893 MATCH x11000000/mask=x7f800000 +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# C6.2.185 MOV (to/from SP) page C6-1110 line 62111 MATCH x11000000/mask=x7ffffc00 +# CONSTRUCT x91000000/mask=xdfc00000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x91000000/mask=xdfc00000 --status pass --comment "flags" + +:add^SBIT_CZNO Rd_GPR64xsp, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_posimm_lsl0 +is sf=1 & op=0 & S & SBIT_CZNO & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i64_posimm_lsl0 & Rn_GPR64xsp & Rd_GPR64xsp +{ + tmp_2:8 = Imm12_addsubimm_operand_i64_posimm_lsl0; + addflags(Rn_GPR64xsp, tmp_2); + tmp_1:8 = Rn_GPR64xsp + tmp_2; + resultflags(tmp_1); + Rd_GPR64xsp = tmp_1; + build SBIT_CZNO; +} + +# C6.2.4 ADD (immediate) page C6-779 line 43893 MATCH x11000000/mask=x7f800000 +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# CONSTRUCT x91400000/mask=xdfc00000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x91400000/mask=xdfc00000 --status pass --comment "flags" + +:add^SBIT_CZNO Rd_GPR64xsp, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_posimm_lsl12 +is sf=1 & op=0 & S & SBIT_CZNO & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i64_posimm_lsl12 & Rn_GPR64xsp & Rd_GPR64xsp +{ + tmp_2:8 = Imm12_addsubimm_operand_i64_posimm_lsl12; + addflags(Rn_GPR64xsp, tmp_2); + tmp_1:8 = Rn_GPR64xsp + tmp_2; + resultflags(tmp_1); + Rd_GPR64xsp = tmp_1; + build SBIT_CZNO; +} + +# C6.2.5 ADD (shifted register) page C6-781 line 44002 MATCH x0b000000/mask=x7f200000 +# C6.2.9 ADDS (shifted register) page C6-789 line 44428 MATCH x2b000000/mask=x7f200000 +# C6.2.59 CMN (shifted register) page C6-873 line 48819 MATCH x2b00001f/mask=x7f20001f +# CONSTRUCT x0b000000/mask=xdf208000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x0b000000/mask=xdf208000 --status pass --comment "flags" +# if shift == '11' then ReservedValue(); + +:add^SBIT_CZNO Rd_GPR32, Rn_GPR32, RegShift32 +is sf=0 & op=0 & S & SBIT_CZNO & b_2428=0xb & b_2121=0 & b_15=0 & RegShift32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = RegShift32; + addflags(Rn_GPR32, tmp_2); + tmp_1:4 = Rn_GPR32 + tmp_2; + resultflags(tmp_1); + Rd_GPR64 = zext(tmp_1); + build SBIT_CZNO; +} + +# C6.2.5 ADD (shifted register) page C6-781 line 44002 MATCH x0b000000/mask=x7f200000 +# C6.2.9 ADDS (shifted register) page C6-789 line 44428 MATCH x2b000000/mask=x7f200000 +# C6.2.59 CMN (shifted register) page C6-873 line 48819 MATCH x2b00001f/mask=x7f20001f +# CONSTRUCT x8b000000/mask=xdf200000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x8b000000/mask=xdf200000 --status pass --comment "flags" + +:add^SBIT_CZNO Rd_GPR64, Rn_GPR64, RegShift64 +is sf=1 & op=0 & S & SBIT_CZNO & b_2428=0xb & b_2121=0 & RegShift64 & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = RegShift64; + addflags(Rn_GPR64, tmp_2); + tmp_1:8 = Rn_GPR64 + tmp_2; + resultflags(tmp_1); + Rd_GPR64 = tmp_1; + build SBIT_CZNO; +} + +# C6.2.10 ADR page C6-791 line 44547 MATCH x10000000/mask=x9f000000 +# CONSTRUCT x10000000/mask=x9f000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x10000000/mask=x9f000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:adr Rd_GPR64, AdrReloff +is b_31=0 & AdrReloff & b_2428=0x10 & Rd_GPR64 +{ + Rd_GPR64 = &AdrReloff; +} + +# C6.2.11 ADRP page C6-792 line 44593 MATCH x90000000/mask=x9f000000 +# CONSTRUCT x90000000/mask=x9f000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x90000000/mask=x9f000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:adrp Rd_GPR64, AdrReloff +is b_31=1 & AdrReloff & b_2428=0x10 & Rd_GPR64 +{ + Rd_GPR64 = &AdrReloff; +} + +# C6.2.12 AND (immediate) page C6-793 line 44641 MATCH x12000000/mask=x7f800000 +# CONSTRUCT x12000000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x12000000/mask=xff800000 --status pass + +:and Rd_GPR32wsp, Rn_GPR32, DecodeWMask32 +is sf=0 & opc=0 & b_2428=0x12 & b_2323=0 & DecodeWMask32 & Rn_GPR32 & Rd_GPR32wsp & Rd_GPR64xsp +{ + tmp_1:4 = Rn_GPR32 & DecodeWMask32; + Rd_GPR64xsp = zext(tmp_1); +} + +# C6.2.12 AND (immediate) page C6-793 line 44641 MATCH x12000000/mask=x7f800000 +# CONSTRUCT x92000000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x92000000/mask=xff800000 --status pass + +:and Rd_GPR64xsp, Rn_GPR64, DecodeWMask64 +is sf=1 & opc=0 & b_2428=0x12 & b_2323=0 & DecodeWMask64 & Rn_GPR64 & Rd_GPR64xsp +{ + tmp_1:8 = Rn_GPR64 & DecodeWMask64; + Rd_GPR64xsp = tmp_1; +} + +# C6.2.13 AND (shifted register) page C6-795 line 44731 MATCH x0a000000/mask=x7f200000 +# CONSTRUCT x0a000000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x0a000000/mask=xff200000 --status pass + +:and Rd_GPR32, Rn_GPR32, RegShift32Log +is sf=0 & opc=0 & b_2428=0xa & N=0 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = RegShift32Log; + tmp_1:4 = Rn_GPR32 & tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.13 AND (shifted register) page C6-795 line 44731 MATCH x0a000000/mask=x7f200000 +# CONSTRUCT x8a000000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x8a000000/mask=xff200000 --status pass + +:and Rd_GPR64, Rn_GPR64, RegShift64Log +is sf=1 & opc=0 & b_2428=0xa & N=0 & RegShift64Log & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = RegShift64Log; + tmp_1:8 = Rn_GPR64 & tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.14 ANDS (immediate) page C6-797 line 44831 MATCH x72000000/mask=x7f800000 +# C6.2.330 TST (immediate) page C6-1368 line 75910 MATCH x7200001f/mask=x7f80001f +# CONSTRUCT x72000000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x72000000/mask=xff800000 --status pass --comment "flags" + +:ands Rd_GPR32, Rn_GPR32, DecodeWMask32 +is sf=0 & opc=3 & b_2428=0x12 & b_2323=0 & DecodeWMask32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_1:4 = Rn_GPR32 & DecodeWMask32; + resultflags(tmp_1); + Rd_GPR64 = zext(tmp_1); + affectLflags(); +} + +# C6.2.14 ANDS (immediate) page C6-797 line 44831 MATCH x72000000/mask=x7f800000 +# C6.2.330 TST (immediate) page C6-1368 line 75910 MATCH x7200001f/mask=x7f80001f +# CONSTRUCT xf2000000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xf2000000/mask=xff800000 --status pass --comment "flags" + +:ands Rd_GPR64, Rn_GPR64, DecodeWMask64 +is sf=1 & opc=3 & b_2428=0x12 & b_2323=0 & DecodeWMask64 & Rn_GPR64 & Rd_GPR64 +{ + tmp_1:8 = Rn_GPR64 & DecodeWMask64; + resultflags(tmp_1); + Rd_GPR64 = tmp_1; + affectLflags(); +} + +# C6.2.15 ANDS (shifted register) page C6-799 line 44931 MATCH x6a000000/mask=x7f200000 +# C6.2.331 TST (shifted register) page C6-1369 line 75974 MATCH x6a00001f/mask=x7f20001f +# CONSTRUCT x6a000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x6a000000/mask=xff200000 --status pass --comment "flags" + +:ands Rd_GPR32, Rn_GPR32, RegShift32Log +is sf=0 & opc=3 & b_2428=0xa & N=0 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = RegShift32Log; + tmp_1:4 = Rn_GPR32 & tmp_2; + resultflags(tmp_1); + Rd_GPR64 = zext(tmp_1); + affectLflags(); +} + +# C6.2.15 ANDS (shifted register) page C6-799 line 44931 MATCH x6a000000/mask=x7f200000 +# C6.2.331 TST (shifted register) page C6-1369 line 75974 MATCH x6a00001f/mask=x7f20001f +# CONSTRUCT xea000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xea000000/mask=xff200000 --status pass --comment "flags" + +:ands Rd_GPR64, Rn_GPR64, RegShift64Log +is sf=1 & opc=3 & b_2428=0xa & N=0 & RegShift64Log & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = RegShift64Log; + tmp_1:8 = Rn_GPR64 & tmp_2; + resultflags(tmp_1); + Rd_GPR64 = tmp_1; + affectLflags(); +} + +# C6.2.16 ASR (register) page C6-801 line 45045 MATCH x1ac02800/mask=x7fe0fc00 +# C6.2.18 ASRV page C6-805 line 45229 MATCH x1ac02800/mask=x7fe0fc00 +# CONSTRUCT x1ac02800/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1ac02800/mask=xffe0fc00 --status pass + +:asr Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0xa & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = Rm_GPR32 & 0x1f; + tmp_1:4 = Rn_GPR32 s>> tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.16 ASR (register) page C6-801 line 45045 MATCH x1ac02800/mask=x7fe0fc00 +# C6.2.18 ASRV page C6-805 line 45229 MATCH x1ac02800/mask=x7fe0fc00 +# CONSTRUCT x9ac02800/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9ac02800/mask=xffe0fc00 --status pass + +:asr Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0xa & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = Rm_GPR64 & 0x3f; + tmp_1:8 = Rn_GPR64 s>> tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# CONSTRUCT x13007c00/mask=xffe0fc02 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x13007c00/mask=xffe0fc02 --status pass +# Alias for sbfm when imms == '011111' +# imms is MAX_INT5, so it will never be less than immr. Note that immr is limited to [0,31] +# Ha! Two explicit cases passes -l +# if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); + +:asr Rd_GPR32, Rn_GPR32, ImmRConst32 +is ImmS=0x1f & ImmS_LT_ImmR=0 & (ImmS_EQ_ImmR=0 | ImmS_EQ_ImmR=1) & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = ImmRConst32; + tmp_1:4 = Rn_GPR32 s>> tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# CONSTRUCT x9340fc00/mask=xffc0fc02 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x9340fc00/mask=xffc0fc02 --status pass +# Alias for sbfm when imms == '111111' +# imms is MAX_INT6, so it will never be less than immr (6-bit field) +# Ha! Two explicit cases passes -l + +:asr Rd_GPR64, Rn_GPR64, ImmRConst64 +is ImmS=0x3f & ImmS_LT_ImmR=0 & (ImmS_EQ_ImmR=0 | ImmS_EQ_ImmR=1) & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = ImmRConst64; + tmp_1:8 = Rn_GPR64 s>> tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087800/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd5087800/mask=xffffffe0 --status noqemu + +:at "S1E1R", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b000 & Rt_GPR64 +{ par_el1 = AT_S1E1R(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c7800/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd50c7800/mask=xffffffe0 --status noqemu + +:at "S1E2R", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b000 & Rt_GPR64 +{ par_el1 = AT_S1E2R(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50e7800/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd50e7800/mask=xffffffe0 --status noqemu + +:at "S1E3R", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b000 & Rt_GPR64 +{ par_el1 = AT_S1E3R(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087820/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd5087820/mask=xffffffe0 --status noqemu + +:at "S1E1W", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b001 & Rt_GPR64 +{ par_el1 = AT_S1E1W(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c7820/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd50c7820/mask=xffffffe0 --status noqemu + +:at "S1E2W", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b001 & Rt_GPR64 +{ par_el1 = AT_S1E2W(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50e7820/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd50e7820/mask=xffffffe0 --status noqemu + +:at "S1E3W", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b001 & Rt_GPR64 +{ par_el1 = AT_S1E3W(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087840/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd5087840/mask=xffffffe0 --status noqemu + +:at "S1E0R", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b010 & Rt_GPR64 +{ par_el1 = AT_S1E0R(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087860/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd5087860/mask=xffffffe0 --status noqemu + +:at "S1E0W", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b011 & Rt_GPR64 +{ par_el1 = AT_S1E0W(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c7880/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd50c7880/mask=xffffffe0 --status noqemu + +:at "S12E1R", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b100 & Rt_GPR64 +{ par_el1 = AT_S12E1R(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c78a0/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd50c78a0/mask=xffffffe0 --status noqemu + +:at "S12E1W", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b101 & Rt_GPR64 +{ par_el1 = AT_S12E1W(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c78c0/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd50c78c0/mask=xffffffe0 --status noqemu + +:at "S12E0R", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b110 & Rt_GPR64 +{ par_el1 = AT_S12E0R(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c78e0/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd50c78e0/mask=xffffffe0 --status noqemu + +:at "S12E0W", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b111 & Rt_GPR64 +{ par_el1 = AT_S12E0W(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087900/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd5087900/mask=xffffffe0 --status noqemu + +:at "S1E1RP", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1001 & b_0507=0b000 & Rt_GPR64 +{ par_el1 = AT_S1E1RP(Rt_GPR64); } + +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087920/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xd5087920/mask=xffffffe0 --status noqemu + +:at "S1E1WP", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1001 & b_0507=0b001 & Rt_GPR64 +{ par_el1 = AT_S1E1WP(Rt_GPR64); } + +# C6.2.20 AUTDA, AUTDZA page C6-809 line 45398 MATCH xdac11800/mask=xffffdc00 +# CONSTRUCT xdac11800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac11800/mask=xfffffc00 --status noqemu + +:autda Rd_GPR64, Rn_GPR64xsp +is autda__PACpart & b_1431=0b110110101100000100 & b_1012=0b110 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 +{ + build autda__PACpart; +} + +# C6.2.20 AUTDA, AUTDZA page C6-809 line 45398 MATCH xdac11800/mask=xffffdc00 +# CONSTRUCT xdac13be0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac13be0/mask=xffffffe0 --status noqemu + +:autdza Rd_GPR64 +is autdza__PACpart & b_1431=0b110110101100000100 & b_1012=0b110 & b_13=1 & b_0509=0b11111 & Rd_GPR64 +{ + build autdza__PACpart; +} + +# C6.2.21 AUTDB, AUTDZB page C6-810 line 45473 MATCH xdac11c00/mask=xffffdc00 +# CONSTRUCT xdac11c00/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac11c00/mask=xfffffc00 --status noqemu + +:autdb Rd_GPR64, Rn_GPR64xsp +is autdb__PACpart & b_1431=0b110110101100000100 & b_1012=0b111 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 +{ + build autdb__PACpart; +} + +# C6.2.21 AUTDB, AUTDZB page C6-810 line 45473 MATCH xdac11c00/mask=xffffdc00 +# CONSTRUCT xdac13fe0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac13fe0/mask=xffffffe0 --status noqemu + +:autdzb Rd_GPR64 +is autdzb__PACpart & b_1431=0b110110101100000100 & b_1012=0b111 & b_13=1 & b_0509=0b11111 & Rd_GPR64 +{ + build autdzb__PACpart; +} + +# C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-811 line 45548 MATCH xdac11000/mask=xffffdc00 +# CONSTRUCT xdac11000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac11000/mask=xfffffc00 --status noqemu + +:autia Rd_GPR64, Rn_GPR64xsp +is autia__PACpart & b_1431=0b110110101100000100 & b_1012=0b100 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 +{ + build autia__PACpart; +} + +# C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-811 line 45548 MATCH xdac11000/mask=xffffdc00 +# CONSTRUCT xdac133e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac133e0/mask=xffffffe0 --status noqemu + +:autiza Rd_GPR64 +is autiza__PACpart & b_1431=0b110110101100000100 & b_1012=0b100 & b_13=1 & b_0509=0b11111 & Rd_GPR64 +{ + build autiza__PACpart; +} + +# C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-811 line 45548 MATCH xd503219f/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503219f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503219f/mask=xffffffff --status nodest + +:autia1716 +is autia1716__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0001 & b_0507=0b100 & b_0004=0b11111 +{ + build autia1716__PACpart; +} + +# C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-811 line 45548 MATCH xd503219f/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd50323bf/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50323bf/mask=xffffffff --status nodest + +:autiasp +is autiasp__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b101 & b_0004=0b11111 +{ + build autiasp__PACpart; +} + +# C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-811 line 45548 MATCH xd503219f/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503239f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503239f/mask=xffffffff --status nodest + +:autiaz +is autiaz__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b100 & b_0004=0b11111 +{ + build autiaz__PACpart; +} + +# C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-813 line 45695 MATCH xdac11400/mask=xffffdc00 +# CONSTRUCT xdac11400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac11400/mask=xfffffc00 --status noqemu + +:autib Rd_GPR64, Rn_GPR64xsp +is autib__PACpart & b_1431=0b110110101100000100 & b_1012=0b101 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 +{ + build autib__PACpart; +} + +# C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-813 line 45695 MATCH xdac11400/mask=xffffdc00 +# CONSTRUCT xdac137e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac137e0/mask=xffffffe0 --status noqemu + +:autizb Rd_GPR64 +is autizb__PACpart & b_1431=0b110110101100000100 & b_1012=0b101 & b_13=1 & b_0509=0b11111 & Rd_GPR64 +{ + build autizb__PACpart; +} + +# C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-813 line 45695 MATCH xd50321df/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd50321df/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50321df/mask=xffffffff --status nodest + +:autib1716 +is autib1716__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0001 & b_0507=0b110 & b_0004=0b11111 +{ + build autib1716__PACpart; +} + +# C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-813 line 45695 MATCH xd50321df/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd50323ff/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50323ff/mask=xffffffff --status nodest + +:autibsp +is autibsp__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b111 & b_0004=0b11111 +{ + build autibsp__PACpart; +} + +# C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-813 line 45695 MATCH xd50321df/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd50323df/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50323df/mask=xffffffff --status nodest + +:autibz +is autibz__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b110 & b_0004=0b11111 +{ + build autibz__PACpart; +} + +# C6.2.25 B.cond page C6-816 line 45885 MATCH x54000000/mask=xff000010 +# CONSTRUCT x5400000f/mask=xff00001f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x5400000f/mask=xff00001f --status nodest --comment "noflags qemuerr(illegal addresses cause qemu exit)" + +:b^"."^BranchCondOp Addr19 +is b_2531=0x2a & o1=0 & Addr19 & o0=0 & br_cond_op=15 & BranchCondOp +{ + goto Addr19; +} + +# C6.2.25 B.cond page C6-816 line 45885 MATCH x54000000/mask=xff000010 +# CONSTRUCT x54000000/mask=xff000010 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x54000000/mask=xff000010 --status nodest --comment "flags qemuerr(illegal addresses cause qemu exit)" + +:b^"."^BranchCondOp Addr19 +is b_2531=0x2a & o1=0 & Addr19 & o0=0 & br_cond_op & BranchCondOp +{ + if (BranchCondOp) goto Addr19; +} + +# C6.2.26 B page C6-817 line 45927 MATCH x14000000/mask=xfc000000 +# CONSTRUCT x14000000/mask=xfc000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x14000000/mask=xfc000000 --status nodest --comment "flags qemuerr(illegal addresses cause qemu exit)" + +:b Addr26 +is b_31=0 & b_2630=0x05 & Addr26 +{ + goto Addr26; +} + +# C6.2.29 BFM page C6-822 line 46149 MATCH x33000000/mask=x7f800000 +# C6.2.27 BFC page C6-818 line 45966 MATCH x330003e0/mask=x7f8003e0 +# C6.2.28 BFI page C6-820 line 46057 MATCH x33000000/mask=x7f800000 +# C6.2.30 BFXIL page C6-824 line 46272 MATCH x33000000/mask=x7f800000 +# CONSTRUCT x33000000/mask=xffe08000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x33000000/mask=xffe08000 --status pass +# if sf == '0' && (N != '0' || immr<5> (b_21) != '0' || imms<5> (b_15) != '0') then ReservedValue(); + +:bfm Rd_GPR32, Rn_GPR32, ImmR_bitfield32_imm, ImmS_bitfield32_imm +is sf=0 & opc=1 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmR_bitfield32_imm & ImmS_bitfield32_imm & ImmRConst32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 & DecodeWMask32 & DecodeTMask32 +{ + local wmask:4 = DecodeWMask32; + local tmask:4 = DecodeTMask32; + local dst:4 = Rd_GPR32; + local src:4 = Rn_GPR32; + local bot:4 = (dst & ~(wmask)) | (((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask); + Rd_GPR64 = zext((dst & ~(tmask)) | (bot & tmask)); +} + +# C6.2.29 BFM page C6-822 line 46149 MATCH x33000000/mask=x7f800000 +# C6.2.27 BFC page C6-818 line 45966 MATCH x330003e0/mask=x7f8003e0 +# C6.2.28 BFI page C6-820 line 46057 MATCH x33000000/mask=x7f800000 +# C6.2.30 BFXIL page C6-824 line 46272 MATCH x33000000/mask=x7f800000 +# CONSTRUCT xb3400002/mask=xffc00002 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xb3400002/mask=xffc00002 --status pass + +:bfm Rd_GPR64, Rn_GPR64, ImmR_bitfield64_imm, ImmS_bitfield64_imm +is ImmS_LT_ImmR=1 & sf=1 & opc=1 & b_2428=0x13 & b_2323=0 & n=1 & ImmR_bitfield64_imm & ImmRConst64 & ImmS_bitfield64_imm & Rn_GPR64 & Rd_GPR64 & DecodeWMask64 & DecodeTMask64 +{ + local wmask:8 = DecodeWMask64; + local tmask:8 = DecodeTMask64; + local dst:8 = Rd_GPR64; + local src:8 = Rn_GPR64; + local bot:8 = (dst & ~(wmask)) | (((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask); + Rd_GPR64 = (dst & ~(tmask)) | (bot & tmask); +} + +# C6.2.28 BFXIL page C6-567 line 33333 KEEPWITH + +BFextractWidth32: "#"^imm is ImmR & ImmS [ imm = ImmS - ImmR + 1; ] { export *[const]:4 imm; } +BFextractWidth64: "#"^imm is ImmR & ImmS [ imm = ImmS - ImmR + 1; ] { export *[const]:8 imm; } + +# C6.2.30 BFXIL page C6-824 line 46272 MATCH x33000000/mask=x7f800000 +# C6.2.27 BFC page C6-818 line 45966 MATCH x330003e0/mask=x7f8003e0 +# C6.2.28 BFI page C6-820 line 46057 MATCH x33000000/mask=x7f800000 +# C6.2.29 BFM page C6-822 line 46149 MATCH x33000000/mask=x7f800000 +# CONSTRUCT x33000000/mask=xffe08002 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x33000000/mask=xffe08002 --status pass + +# Alias for bfm where UInt(imms) >= UInt(immr) + +:bfxil Rd_GPR32, Rn_GPR32, ImmRConst32, BFextractWidth32 +is ImmS_LT_ImmR=0 & sf=0 & opc=1 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & BFextractWidth32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + dst:4 = Rd_GPR32; + src:4 = Rn_GPR32; + mask:4 = (0xffffffff >> (32 - BFextractWidth32)); + tmp:4 = (src >> ImmRConst32) & mask; + Rd_GPR64 = zext((dst & ~(mask)) | tmp); +} + +# C6.2.30 BFXIL page C6-824 line 46272 MATCH x33000000/mask=x7f800000 +# C6.2.27 BFC page C6-818 line 45966 MATCH x330003e0/mask=x7f8003e0 +# C6.2.28 BFI page C6-820 line 46057 MATCH x33000000/mask=x7f800000 +# C6.2.29 BFM page C6-822 line 46149 MATCH x33000000/mask=x7f800000 +# CONSTRUCT xb3400000/mask=xffc00002 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xb3400000/mask=xffc00002 --status pass + +# Alias for bfm where UInt(imms) >= UInt(immr) + +:bfxil Rd_GPR64, Rn_GPR64, ImmRConst64, BFextractWidth64 +is ImmS_LT_ImmR=0 & sf=1 & opc=1 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & BFextractWidth64 & Rn_GPR64 & Rd_GPR64 +{ + dst:8 = Rd_GPR64; + src:8 = Rn_GPR64; + mask:8 = (0xffffffffffffffff >> (64 - BFextractWidth64)); + tmp:8 = (src >> ImmRConst64) & mask; + Rd_GPR64 = ((dst & ~(mask)) | tmp); +} + +# C6.2.31 BIC (shifted register) page C6-826 line 46365 MATCH x0a200000/mask=x7f200000 +# CONSTRUCT x0a200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x0a200000/mask=xff200000 --status pass + +:bic Rd_GPR32, Rn_GPR32, RegShift32Log +is sf=0 & opc=0 & b_2428=0xa & N=1 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_3:4 = RegShift32Log; + tmp_2:4 = tmp_3 ^ -1:4; + tmp_1:4 = Rn_GPR32 & tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.31 BIC (shifted register) page C6-826 line 46365 MATCH x0a200000/mask=x7f200000 +# CONSTRUCT x8a200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x8a200000/mask=xff200000 --status pass + +:bic Rd_GPR64, Rn_GPR64, RegShift64Log +is sf=1 & opc=0 & b_2428=0xa & N=1 & RegShift64Log & Rn_GPR64 & Rd_GPR64 +{ + tmp_3:8= RegShift64Log; + tmp_2:8 = tmp_3 ^ -1:8; + tmp_1:8 = Rn_GPR64 & tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.32 BICS (shifted register) page C6-828 line 46467 MATCH x6a200000/mask=x7f200000 +# CONSTRUCT x6a200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x6a200000/mask=xff200000 --status pass --comment "flags" + +:bics Rd_GPR32, Rn_GPR32, RegShift32Log +is sf=0 & opc=3 & b_2428=0xa & N=1 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_3:4 = RegShift32Log; + tmp_2:4 = tmp_3 ^ -1:4; + tmp_1:4 = Rn_GPR32 & tmp_2; + resultflags(tmp_1); + Rd_GPR64 = zext(tmp_1); + affectLflags(); +} + +# C6.2.32 BICS (shifted register) page C6-828 line 46467 MATCH x6a200000/mask=x7f200000 +# CONSTRUCT xea200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xea200000/mask=xff200000 --status pass --comment "flags" + +:bics Rd_GPR64, Rn_GPR64, RegShift64Log +is sf=1 & opc=3 & b_2428=0xa & N=1 & RegShift64Log & Rn_GPR64 & Rd_GPR64 +{ + tmp_3:8= RegShift64Log; + tmp_2:8 = tmp_3 ^ -1:8; + tmp_1:8 = Rn_GPR64 & tmp_2; + resultflags(tmp_1); + Rd_GPR64 = tmp_1; + affectLflags(); +} + +# C6.2.33 BL page C6-830 line 46571 MATCH x94000000/mask=xfc000000 +# CONSTRUCT x94000000/mask=xfc000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x94000000/mask=xfc000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:bl Addr26 +is b_31=1 & b_2630=0x05 & Addr26 +{ + x30 = inst_start + 4; + call Addr26; +} + +# C6.2.34 BLR page C6-831 line 46612 MATCH xd63f0000/mask=xfffffc1f +# CONSTRUCT xd63f0000/mask=xfffffc1f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd63f0000/mask=xfffffc1f --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:blr Rn_GPR64 +is b_2531=0x6b & b_2324=0 & b_2122=1 & b_1620=0x1f & b_1015=0 & Rn_GPR64 & b_0004=0 +{ + pc = Rn_GPR64; + x30 = inst_start + 4; + call [pc]; +} + +# C6.2.33 BLRAA, BLRAAZ, BLRAB, BLRABZ page C6-574 line 33668 KEEPWITH + +# Z == 0 && M == 0 && Rm = 11111 Key A, zero modifier variant + +blinkop: "l" is b_2122=0b01 { x30 = inst_start + 4; call [pc]; } +blinkop: "" is b_2122=0b00 { goto[pc]; } + +# C6.2.35 BLRAA, BLRAAZ, BLRAB, BLRABZ page C6-832 line 46654 MATCH xd63f0800/mask=xfefff800 +# C6.2.37 BRAA, BRAAZ, BRAB, BRABZ page C6-835 line 46800 MATCH xd61f0800/mask=xfefff800 +# C6.2.220 RETAA, RETAB page C6-1170 line 65226 MATCH xd65f0bff/mask=xfffffbff +# CONSTRUCT xd61f081f/mask=xff9ffc1f MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd61f081f/mask=xff9ffc1f --status nodest + +:b^blinkop^"raaz" Rn_GPR64 +is b_blinkop__raaz___PACpart & b_2531=0b1101011 & b_24=0 & b_23=0 & blinkop & b_1220=0b111110000 & b_11=1 & b_10=0 & b_0004=0b11111 & Rn_GPR64 +{ + build b_blinkop__raaz___PACpart; + pc = Rn_GPR64; + build blinkop; +} + +# C6.2.35 BLRAA, BLRAAZ, BLRAB, BLRABZ page C6-832 line 46654 MATCH xd63f0800/mask=xfefff800 +# C6.2.37 BRAA, BRAAZ, BRAB, BRABZ page C6-835 line 46800 MATCH xd61f0800/mask=xfefff800 +# CONSTRUCT xd71f0800/mask=xff9ffc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd71f0800/mask=xff9ffc00 --status nodest +# Z == 1 && M == 0 Key A, register modifier variant + +:b^blinkop^"raa" Rn_GPR64, Rd_GPR64xsp +is b_blinkop__raa___PACpart & b_2531=0b1101011 & b_24=1 & b_23=0 & blinkop & b_1220=0b111110000 & b_11=1 & b_10=0 & Rd_GPR64xsp & Rn_GPR64 +{ + build b_blinkop__raa___PACpart; + pc = Rn_GPR64; + build blinkop; +} + +# C6.2.35 BLRAA, BLRAAZ, BLRAB, BLRABZ page C6-832 line 46654 MATCH xd63f0800/mask=xfefff800 +# C6.2.37 BRAA, BRAAZ, BRAB, BRABZ page C6-835 line 46800 MATCH xd61f0800/mask=xfefff800 +# C6.2.220 RETAA, RETAB page C6-1170 line 65226 MATCH xd65f0bff/mask=xfffffbff +# CONSTRUCT xd61f0c1f/mask=xff9ffc1f MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd61f0c1f/mask=xff9ffc1f --status nodest +# Z == 0 && M == 1 && Rm = 11111 Key B, zero modifier variant + +:b^blinkop^"rabz" Rn_GPR64 +is b_blinkop__rabz___PACpart & b_2531=0b1101011 & b_24=0 & b_23=0 & blinkop & b_1220=0b111110000 & b_11=1 & b_10=1 & b_0004=0b11111 & Rn_GPR64 +{ + build b_blinkop__rabz___PACpart; + pc = Rn_GPR64; + build blinkop; +} + +# C6.2.35 BLRAA, BLRAAZ, BLRAB, BLRABZ page C6-832 line 46654 MATCH xd63f0800/mask=xfefff800 +# C6.2.37 BRAA, BRAAZ, BRAB, BRABZ page C6-835 line 46800 MATCH xd61f0800/mask=xfefff800 +# CONSTRUCT xd71f0c00/mask=xff9ffc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd71f0c00/mask=xff9ffc00 --status nodest +# Z == 1 && M == 1 Key B, register modifier variant + +:b^blinkop^"rab" Rn_GPR64, Rd_GPR64xsp +is b_blinkop__rab___PACpart & b_2531=0b1101011 & b_24=1 & b_23=0 & blinkop & b_1220=0b111110000 & b_11=1 & b_10=1 & Rd_GPR64xsp & Rn_GPR64 +{ + build b_blinkop__rab___PACpart; + pc = Rn_GPR64; + build blinkop; +} + +# C6.2.36 BR page C6-834 line 46760 MATCH xd61f0000/mask=xfffffc1f +# CONSTRUCT xd61f0000/mask=xfffffc1f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd61f0000/mask=xfffffc1f --status nodest + +:br Rn_GPR64 +is b_2531=0x6b & b_2324=0 & b_2122=0 & b_1620=0x1f & b_1015=0 & Rn_GPR64 & b_0004=0 +{ + pc = Rn_GPR64; + goto [pc]; +} + +# C6.2.38 BRK page C6-837 line 46903 MATCH xd4200000/mask=xffe0001f +# CONSTRUCT xd4200000/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd4200000/mask=xffe0001f --status nodest + +:brk "#"^imm16 +is ALL_BTITARGETS & b_2431=0xd4 & b_2123=1 & imm16 & b_0204=0 & b_0001=0 +{ + tmp:2 = imm16; + preferred_exception_return:8 = inst_next; + pc = SoftwareBreakpoint(tmp, preferred_exception_return); + goto [pc]; +} + +# C6.2.37 CASB, CASAB, CASALB, CASLB page C6-580 line 33952 KEEPWITH + +cas_var: "a" is b_22=1 & b_15=0 { } +cas_var: "al" is b_22=1 & b_15=1 { } +cas_var: "" is b_22=0 & b_15=0 { } +cas_var: "l" is b_22=0 & b_15=1 { } + +# C6.2.40 CASB, CASAB, CASALB, CASLB page C6-841 line 47114 MATCH x08a07c00/mask=xffa07c00 +# CONSTRUCT x08a07c00/mask=xffa07c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x08a07c00/mask=xffa07c00 --status nomem + +# CAS{,A,AL,L}B size=0b10 (b_3031) + +:cas^cas_var^"b" aa_Ws, aa_Wt, [Rn_GPR64xsp] +is b_3031=0b00 & b_2329=0b0010001 & b_21=1 & b_1014=0b11111 & cas_var & aa_Wt & Rn_GPR64xsp & aa_Ws +{ + comparevalue:1 = aa_Ws:1; + newvalue:1 = aa_Wt:1; + data:1 = *:1 Rn_GPR64xsp; + if (data != comparevalue) goto ; + *:1 Rn_GPR64xsp = newvalue; + + aa_Ws = zext(data); +} + +# C6.2.41 CASH, CASAH, CASALH, CASLH page C6-843 line 47236 MATCH x48a07c00/mask=xffa07c00 +# CONSTRUCT x48a07c00/mask=xffa07c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x48a07c00/mask=xffa07c00 --status nomem + +# CAS{,A,AL,L}H size=0b10 (b_3031) + +:cas^cas_var^"h" aa_Ws, aa_Wt, [Rn_GPR64xsp] +is b_3031=0b01 & b_2329=0b0010001 & b_21=1 & b_1014=0b11111 & cas_var & aa_Wt & Rn_GPR64xsp & aa_Ws +{ + comparevalue:2 = aa_Ws:2; + newvalue:2 = aa_Wt:2; + data:2 = *:2 Rn_GPR64xsp; + if (data != comparevalue) goto ; + *:2 Rn_GPR64xsp = newvalue; + + aa_Ws = zext(data); +} + +# C6.2.42 CASP, CASPA, CASPAL, CASPL page C6-845 line 47358 MATCH x08207c00/mask=xbfa07c00 +# CONSTRUCT x08207c00/mask=xffa17c01 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x08207c00/mask=xffa17c01 --status nomem + +# CASP{,A,AL,L} size=0b00 (b_3031) + +:casp^cas_var aa_Ws, aa_Wss, aa_Wt, aa_Wtt, [Rn_GPR64xsp] +is b_3031=0b00 & b_2329=0b0010000 & b_21=1 & b_1014=0b11111 & b_16=0 & b_00=0 & cas_var & aa_Ws & aa_Wss & aa_Wt & aa_Wtt & Rn_GPR64xsp +{ +@if DATA_ENDIAN == "big" + comparevalue:8 = (zext(aa_Ws) << 32) | zext(aa_Wss); + newvalue:8 = (zext(aa_Wt) << 32) | zext(aa_Wtt); +@else + comparevalue:8 = (zext(aa_Wss) << 32) | zext(aa_Ws); + newvalue:8 = (zext(aa_Wtt) << 32) | zext(aa_Wt); +@endif + data:8 = *:8 Rn_GPR64xsp; + if (data != comparevalue) goto ; + *:8 Rn_GPR64xsp = newvalue; + +@if DATA_ENDIAN == "big" + aa_Ws = data(4); + aa_Wss = data:4; +@else + aa_Ws = data:4; + aa_Wss = data(4); +@endif +} + +# C6.2.42 CASP, CASPA, CASPAL, CASPL page C6-845 line 47358 MATCH x08207c00/mask=xbfa07c00 +# CONSTRUCT x48207c00/mask=xffa17c01 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x48207c00/mask=xffa17c01 --status nomem + +# CASP{,A,AL,L} size=0b01 (b_3031) + +:casp^cas_var aa_Xs, aa_Xss, aa_Xt, aa_Xtt, [Rn_GPR64xsp] +is b_3031=0b01 & b_2329=0b0010000 & b_21=1 & b_1014=0b11111 & b_16=0 & b_00=0 & cas_var & aa_Xs & aa_Xss & aa_Xt & aa_Xtt & Rn_GPR64xsp +{ + local tmp_s:8 = aa_Xs; + local tmp_ss:8 = aa_Xss; + local tmp_t:8 = aa_Xt; + local tmp_tt:8 = aa_Xtt; + +@if DATA_ENDIAN == "little" + # for little endian, swap Xss/Xs and Xtt/Xt + tmp_s = aa_Xss; + tmp_ss = aa_Xs; + tmp_t = aa_Xtt; + tmp_tt = aa_Xt; +@endif + + local tmp_addr:8 = Rn_GPR64xsp; + local tmp_d:8 = *:8 tmp_addr; + tmp_addr = tmp_addr + 8; + local tmp_dd:8 = *:8 tmp_addr; + + if (tmp_d != tmp_s) goto ; + if (tmp_dd != tmp_ss) goto ; + + tmp_addr = Rn_GPR64xsp; + *:8 tmp_addr = tmp_t; + tmp_addr = tmp_addr + 8; + *:8 tmp_addr = tmp_tt; + + aa_Xs = tmp_d; + aa_Xss = tmp_dd; +} + +# C6.2.43 CAS, CASA, CASAL, CASL page C6-848 line 47540 MATCH x88a07c00/mask=xbfa07c00 +# CONSTRUCT x88a07c00/mask=xffa07c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88a07c00/mask=xffa07c00 --status nomem +# CAS{,A,AL,L} size=0b10 (b_3031) + +:cas^cas_var aa_Ws, aa_Wt, [Rn_GPR64xsp] +is b_3031=0b10 & b_2329=0b0010001 & b_21=1 & b_1014=0b11111 & cas_var & aa_Wt & Rn_GPR64xsp & aa_Ws +{ + comparevalue:4 = aa_Ws; + newvalue:4 = aa_Wt; + data:4 = *:4 Rn_GPR64xsp; + if (data != comparevalue) goto ; + *:4 Rn_GPR64xsp = newvalue; + + aa_Ws = data; +} + +# C6.2.43 CAS, CASA, CASAL, CASL page C6-848 line 47540 MATCH x88a07c00/mask=xbfa07c00 +# CONSTRUCT xc8a07c00/mask=xffa07c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8a07c00/mask=xffa07c00 --status nomem + +# CAS{,A,AL,L} size=0b11 (b_3031) + +:cas^cas_var aa_Xs, aa_Xt, [Rn_GPR64xsp] +is b_3031=0b11 & b_2329=0b0010001 & b_21=1 & b_1014=0b11111 & cas_var & aa_Xt & Rn_GPR64xsp & aa_Xs +{ + comparevalue:8 = aa_Xs; + newvalue:8 = aa_Xt; + data:8 = *:8 Rn_GPR64xsp; + if (data != comparevalue) goto ; + *:8 Rn_GPR64xsp = newvalue; + + aa_Xs = data; +} + +# C6.2.41 CBNZ page C6-589 line 34530 KEEPWITH + +ZeroOp: "z" is cmpr_op=0 { export 1:1; } +ZeroOp: "nz" is cmpr_op=1 { export 0:1; } + +BitPos: "#"^bitpos is sf=1 & b_31 & b_1923 & Rt_GPR64 [ bitpos = b_31 << 5 | b_1923; ] +{ + tmp:1 = ((Rt_GPR64 >> bitpos) & 1) == 0; + export tmp; +} + +BitPos: "#"^bitpos is sf=0 & b_31 & b_1923 & Rt_GPR32 [ bitpos = b_31 << 5 | b_1923; ] +{ + tmp:1 = ((Rt_GPR32 >> bitpos) & 1) == 0; + export tmp; +} + +# C6.2.44 CBNZ page C6-850 line 47690 MATCH x35000000/mask=x7f000000 +# C6.2.45 CBZ page C6-851 line 47747 MATCH x34000000/mask=x7f000000 +# CONSTRUCT xb4000000/mask=xfe000000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb4000000/mask=xfe000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:cb^ZeroOp Rd_GPR64, Addr19 +is sf=1 & b_2530=0x1a & ZeroOp & Addr19 & Rd_GPR64 +{ + tmp:1 = Rd_GPR64 == 0; + if (tmp == ZeroOp) goto Addr19; +} + +# C6.2.44 CBNZ page C6-850 line 47690 MATCH x35000000/mask=x7f000000 +# C6.2.45 CBZ page C6-851 line 47747 MATCH x34000000/mask=x7f000000 +# CONSTRUCT x34000000/mask=xfe000000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x34000000/mask=xfe000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:cb^ZeroOp Rd_GPR32, Addr19 +is sf=0 & b_2530=0x1a & ZeroOp & Addr19 & Rd_GPR32 +{ + tmp:1 = Rd_GPR32 == 0; + if (tmp == ZeroOp) goto Addr19; +} + +# C6.2.44 CBNZ page C6-850 line 47690 MATCH x35000000/mask=x7f000000 +# CONSTRUCT x35000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x35000000/mask=xff000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:cbnz Rt_GPR32, Addr19 +is sf=0 & b_2530=0x1a & cmpr_op=1 & Addr19 & Rt_GPR32 +{ + if (Rt_GPR32 != 0) goto Addr19; +} + +# C6.2.44 CBNZ page C6-850 line 47690 MATCH x35000000/mask=x7f000000 +# CONSTRUCT xb5000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xb5000000/mask=xff000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:cbnz Rt_GPR64, Addr19 +is sf=1 & b_2530=0x1a & cmpr_op=1 & Addr19 & Rt_GPR64 +{ + if (Rt_GPR64 != 0) goto Addr19; +} + +# C6.2.45 CBZ page C6-851 line 47747 MATCH x34000000/mask=x7f000000 +# CONSTRUCT x34000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x34000000/mask=xff000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:cbz Rt_GPR32, Addr19 +is sf=0 & b_2530=0x1a & cmpr_op=0 & Addr19 & Rt_GPR32 +{ + if (Rt_GPR32 == 0) goto Addr19; +} + +# C6.2.45 CBZ page C6-851 line 47747 MATCH x34000000/mask=x7f000000 +# CONSTRUCT xb4000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xb4000000/mask=xff000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:cbz Rt_GPR64, Addr19 +is sf=1 & b_2530=0x1a & cmpr_op=0 & Addr19 & Rt_GPR64 +{ + if (Rt_GPR64 == 0) goto Addr19; +} + +# C6.2.46 CCMN (immediate) page C6-852 line 47804 MATCH x3a400800/mask=x7fe00c10 +# CONSTRUCT x3a400800/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x3a400800/mask=xffe00c10 --status pass --comment "flags" + +:ccmn Rn_GPR32, UImm5, NZCVImm_uimm4, CondOp +is sf=0 & op=0 & s=1 & b_2428=0x1a & b_2123=2 & UImm5 & CondOp & b_1111=1 & o2=0 & Rn_GPR32 & o3=0 & NZCVImm_uimm4 +{ + condition:1 = CondOp; + condMask:1 = NZCVImm_uimm4; + setCC_NZCV(condMask); + if (!condition) goto inst_next; + tmp:4 = UImm5; + addflags(Rn_GPR32, tmp); + result:4 = Rn_GPR32 + tmp; + resultflags(result); + affectflags(); +} + +# C6.2.46 CCMN (immediate) page C6-852 line 47804 MATCH x3a400800/mask=x7fe00c10 +# CONSTRUCT xba400800/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xba400800/mask=xffe00c10 --status pass --comment "flags" + +:ccmn Rn_GPR64, UImm5, NZCVImm_uimm4, CondOp +is sf=1 & op=0 & s=1 & b_2428=0x1a & b_2123=2 & UImm5 & CondOp & b_1111=1 & o2=0 & Rn_GPR64 & o3=0 & NZCVImm_uimm4 +{ + condition:1 = CondOp; + condMask:1 = NZCVImm_uimm4; + setCC_NZCV(condMask); + if (!condition) goto inst_next; + tmp:8 = zext(UImm5); + addflags(Rn_GPR64, tmp); + result:8 = Rn_GPR64 + tmp; + resultflags(result); + affectflags(); +} + +# C6.2.47 CCMN (register) page C6-854 line 47887 MATCH x3a400000/mask=x7fe00c10 +# CONSTRUCT x3a400000/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x3a400000/mask=xffe00c10 --status pass --comment "flags" + +:ccmn Rn_GPR32, Rm_GPR32, NZCVImm_uimm4, CondOp +is sf=0 & op=0 & s=1 & b_2428=0x1a & b_2123=2 & Rm_GPR32 & CondOp & b_1111=0 & o2=0 & Rn_GPR32 & o3=0 & NZCVImm_uimm4 +{ + condition:1 = CondOp; + condMask:1 = NZCVImm_uimm4; + setCC_NZCV(condMask); + if (!condition) goto inst_next; + tmp:4 = Rm_GPR32; + addflags(Rn_GPR32, tmp); + result:4 = Rn_GPR32 + tmp; + resultflags(result); + affectflags(); +} + +# C6.2.47 CCMN (register) page C6-854 line 47887 MATCH x3a400000/mask=x7fe00c10 +# CONSTRUCT xba400000/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xba400000/mask=xffe00c10 --status pass --comment "flags" + +:ccmn Rn_GPR64, Rm_GPR64, NZCVImm_uimm4, CondOp +is sf=1 & op=0 & s=1 & b_2428=0x1a & b_2123=2 & Rm_GPR64 & CondOp & b_1111=0 & o2=0 & Rn_GPR64 & o3=0 & NZCVImm_uimm4 +{ + condition:1 = CondOp; + condMask:1 = NZCVImm_uimm4; + setCC_NZCV(condMask); + if (!condition) goto inst_next; + tmp:8 = Rm_GPR64; + addflags(Rn_GPR64, tmp); + result:8 = Rn_GPR64 + tmp; + resultflags(result); + affectflags(); +} + +# C6.2.48 CCMP (immediate) page C6-856 line 47972 MATCH x7a400800/mask=x7fe00c10 +# CONSTRUCT x7a400800/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x7a400800/mask=xffe00c10 --status pass --comment "flags" + +:ccmp Rn_GPR32, UImm5, NZCVImm_uimm4, CondOp +is sf=0 & op=1 & s=1 & b_2428=0x1a & b_2123=2 & UImm5 & CondOp & b_1111=1 & o2=0 & Rn_GPR32 & o3=0 & NZCVImm_uimm4 +{ + condition:1 = CondOp; + condMask:1 = NZCVImm_uimm4; + setCC_NZCV(condMask); + if (!condition) goto inst_next; + subflags(Rn_GPR32, UImm5); + tmp:4 = Rn_GPR32 - UImm5; + resultflags(tmp); + affectflags(); +} + +# C6.2.48 CCMP (immediate) page C6-856 line 47972 MATCH x7a400800/mask=x7fe00c10 +# CONSTRUCT xfa400800/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xfa400800/mask=xffe00c10 --status pass --comment "flags" + +:ccmp Rn_GPR64, UImm5, NZCVImm_uimm4, CondOp +is sf=1 & op=1 & s=1 & b_2428=0x1a & b_2123=2 & UImm5 & CondOp & b_1111=1 & o2=0 & Rn_GPR64 & o3=0 & NZCVImm_uimm4 +{ + condition:1 = CondOp; + condMask:1 = NZCVImm_uimm4; + setCC_NZCV(condMask); + if (!condition) goto inst_next; + tmp:8 = zext(UImm5); + subflags(Rn_GPR64, tmp); + tmp = Rn_GPR64 - tmp; + resultflags(tmp); + affectflags(); +} + +# C6.2.49 CCMP (register) page C6-858 line 48057 MATCH x7a400000/mask=x7fe00c10 +# CONSTRUCT x7a400000/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x7a400000/mask=xffe00c10 --status pass --comment "flags" + +:ccmp Rn_GPR32, Rm_GPR32, NZCVImm_uimm4, CondOp +is sf=0 & op=1 & s=1 & b_2428=0x1a & b_2123=2 & Rm_GPR32 & CondOp & b_1111=0 & o2=0 & Rn_GPR32 & o3=0 & NZCVImm_uimm4 +{ + condition:1 = CondOp; + condMask:1 = NZCVImm_uimm4; + setCC_NZCV(condMask); + if (!condition) goto inst_next; + subflags(Rn_GPR32, Rm_GPR32); + tmp:4 = Rn_GPR32 - Rm_GPR32; + resultflags(tmp); + affectflags(); +} + +# C6.2.49 CCMP (register) page C6-858 line 48057 MATCH x7a400000/mask=x7fe00c10 +# CONSTRUCT xfa400000/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xfa400000/mask=xffe00c10 --status pass --comment "flags" + +:ccmp Rn_GPR64, Rm_GPR64, NZCVImm_uimm4, CondOp +is sf=1 & op=1 & s=1 & b_2428=0x1a & b_2123=2 & Rm_GPR64 & CondOp & b_1111=0 & o2=0 & Rn_GPR64 & o3=0 & NZCVImm_uimm4 +{ + condition:1 = CondOp; + condMask:1 = NZCVImm_uimm4; + setCC_NZCV(condMask); + if (!condition) goto inst_next; + subflags(Rn_GPR64, Rm_GPR64); + tmp:8 = Rn_GPR64 - Rm_GPR64; + resultflags(tmp); + affectflags(); +} + +# C6.2.50 CFINV page C6-860 line 48145 MATCH xd500401f/mask=xfffff0ff +# C6.2.194 MSR (immediate) page C6-1126 line 62879 MATCH xd500401f/mask=xfff8f01f +# CONSTRUCT xd500401f/mask=xfffff0ff MATCHED 2 DOCUMENTED OPCODES +# xd500401f/mask=xfffff0ff NOT MATCHED BY ANY CONSTRUCTOR + +:cfinv +is b_1231=0b11010101000000000100 & b_0811 & b_0007=0b00011111 +{ + CY = !CY; +} + +# C6.2.52 CINC page C6-862 line 48243 MATCH x1a800400/mask=x7fe00c00 +# C6.2.70 CSET page C6-892 line 49783 MATCH x1a9f07e0/mask=x7fff0fe0 +# C6.2.72 CSINC page C6-896 line 49956 MATCH x1a800400/mask=x7fe00c00 +# CONSTRUCT x1a800400/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x1a800400/mask=xffe00c00 --status pass --comment "flags" + +:cinc Rd_GPR32, Rn_GPR32, InvCondOp +is sf=0 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & InvCondOp & b_1011=1 & Rn=Rm & (Rn!=0x1f) & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + condition:1 = InvCondOp; + tmp:4 = Rn_GPR32; + if (!condition) goto ; + tmp = Rn_GPR32 + 1; + + Rd_GPR64 = zext(tmp); +} + +# C6.2.52 CINC page C6-862 line 48243 MATCH x1a800400/mask=x7fe00c00 +# C6.2.70 CSET page C6-892 line 49783 MATCH x1a9f07e0/mask=x7fff0fe0 +# C6.2.72 CSINC page C6-896 line 49956 MATCH x1a800400/mask=x7fe00c00 +# CONSTRUCT x9a800400/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x9a800400/mask=xffe00c00 --status pass --comment "flags" + +:cinc Rd_GPR64, Rn_GPR64, InvCondOp +is sf=1 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & InvCondOp & b_1011=1 & Rn=Rm & (Rn!=0x1f) & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR64 & Rd_GPR64 +{ + condition:1 = InvCondOp; + tmp:8 = Rn_GPR64; + if (!condition) goto ; + tmp = Rn_GPR64 + 1; + + Rd_GPR64 = tmp; +} + +# C6.2.53 CINV page C6-864 line 48333 MATCH x5a800000/mask=x7fe00c00 +# C6.2.71 CSETM page C6-894 line 49869 MATCH x5a9f03e0/mask=x7fff0fe0 +# C6.2.73 CSINV page C6-898 line 50060 MATCH x5a800000/mask=x7fe00c00 +# CONSTRUCT x5a800000/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x5a800000/mask=xffe00c00 --status pass --comment "flags" + +:cinv Rd_GPR32, Rn_GPR32, InvCondOp +is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & InvCondOp & b_1011=0 & Rn=Rm & (Rn!=0x1f) & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + condition:1 = InvCondOp; + tmp:4 = Rn_GPR32; + if (!condition) goto ; + tmp = ~Rn_GPR32; + + Rd_GPR64 = zext(tmp); +} + +# C6.2.53 CINV page C6-864 line 48333 MATCH x5a800000/mask=x7fe00c00 +# C6.2.71 CSETM page C6-894 line 49869 MATCH x5a9f03e0/mask=x7fff0fe0 +# C6.2.73 CSINV page C6-898 line 50060 MATCH x5a800000/mask=x7fe00c00 +# CONSTRUCT xda800000/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xda800000/mask=xffe00c00 --status pass --comment "flags" + +:cinv Rd_GPR64, Rn_GPR64, InvCondOp +is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & InvCondOp & b_1011=0 & Rn=Rm & (Rn!=0x1f) & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR64 & Rd_GPR64 +{ + condition:1 = InvCondOp; + tmp:8 = Rn_GPR64; + if (!condition) goto ; + tmp = ~Rn_GPR64; + + Rd_GPR64 = tmp; +} + +# C6.2.54 CLREX page C6-866 line 48423 MATCH xd503305f/mask=xfffff0ff +# CONSTRUCT xd503305f/mask=xfffff0ff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd503305f/mask=xfffff0ff --status nodest + +:clrex CRm_uimm4_def15 +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_uimm4_def15 & Op2=2 & Rt=0x1f +{ + ClearExclusiveLocal(); +} + +# C6.2.55 CLS page C6-867 line 48462 MATCH x5ac01400/mask=x7ffffc00 +# CONSTRUCT x5ac01400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x5ac01400/mask=xfffffc00 --status pass + +:cls Rd_GPR32, Rn_GPR32 +is sf=0 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x5 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp:4 = (Rn_GPR32 ^ (Rn_GPR32<<1))|0x1; + # first make all lower bits =1 + tmp = tmp | (tmp >> 1); + tmp = tmp | (tmp >> 2); + tmp = tmp | (tmp >> 4); + tmp = tmp | (tmp >> 8); + tmp = tmp | (tmp >> 16); + # now add the 1 bits together, voila + tmp = ((tmp & 0xaaaaaaaa)>>1) + (tmp & 0x55555555); + tmp = ((tmp & 0xcccccccc)>>2) + (tmp & 0x33333333); + tmp = ((tmp & 0xf0f0f0f0)>>4) + (tmp & 0x0f0f0f0f); + tmp = ((tmp & 0xff00ff00)>>8) + (tmp & 0x00ff00ff); + tmp = ((tmp & 0xffff0000)>>16) + (tmp & 0x0000ffff); + Rd_GPR64 = zext(32 - (tmp & 0x3f)); +} + +# C6.2.55 CLS page C6-867 line 48462 MATCH x5ac01400/mask=x7ffffc00 +# CONSTRUCT xdac01400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac01400/mask=xfffffc00 --status pass + +:cls Rd_GPR64, Rn_GPR64 +is sf=1 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x5 & Rn_GPR64 & Rd_GPR64 +{ + local tmp:8 = (Rn_GPR64 ^ (Rn_GPR64<<1))|0x1; + # first make all lower bits =1 + tmp = tmp | (tmp >> 1); + tmp = tmp | (tmp >> 2); + tmp = tmp | (tmp >> 4); + tmp = tmp | (tmp >> 8); + tmp = tmp | (tmp >> 16); + tmp = tmp | (tmp >> 32); + # now add the 1 bits together, voila + tmp = ((tmp & 0xaaaaaaaaaaaaaaaa)>>1) + (tmp & 0x5555555555555555); + tmp = ((tmp & 0xcccccccccccccccc)>>2) + (tmp & 0x3333333333333333); + tmp = ((tmp & 0xf0f0f0f0f0f0f0f0)>>4) + (tmp & 0x0f0f0f0f0f0f0f0f); + tmp = ((tmp & 0xff00ff00ff00ff00)>>8) + (tmp & 0x00ff00ff00ff00ff); + tmp = ((tmp & 0xffff0000ffff0000)>>16) + (tmp & 0x0000ffff0000ffff); + tmp = ((tmp & 0xffffffff00000000)>>32) + (tmp & 0x00000000ffffffff); + Rd_GPR64 = 64 - (tmp & 0x7f); +} + +# C6.2.56 CLZ page C6-868 line 48532 MATCH x5ac01000/mask=x7ffffc00 +# CONSTRUCT x5ac01000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x5ac01000/mask=xfffffc00 --status pass + +:clz Rd_GPR32, Rn_GPR32 +is sf=0 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x4 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp:4 = Rn_GPR32; + # first make all lower bits =1 + tmp = tmp | (tmp >> 1); + tmp = tmp | (tmp >> 2); + tmp = tmp | (tmp >> 4); + tmp = tmp | (tmp >> 8); + tmp = tmp | (tmp >> 16); + # now add the 1 bits together, voila + tmp = ((tmp & 0xaaaaaaaa)>>1) + (tmp & 0x55555555); + tmp = ((tmp & 0xcccccccc)>>2) + (tmp & 0x33333333); + tmp = ((tmp & 0xf0f0f0f0)>>4) + (tmp & 0x0f0f0f0f); + tmp = ((tmp & 0xff00ff00)>>8) + (tmp & 0x00ff00ff); + tmp = ((tmp & 0xffff0000)>>16) + (tmp & 0x0000ffff); + Rd_GPR64 = zext(32 - (tmp & 0x3f)); +} + +# C6.2.56 CLZ page C6-868 line 48532 MATCH x5ac01000/mask=x7ffffc00 +# CONSTRUCT xdac01000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac01000/mask=xfffffc00 --status pass + +:clz Rd_GPR64, Rn_GPR64 +is sf=1 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x4 & Rn_GPR64 & Rd_GPR64 +{ + local tmp:8 = Rn_GPR64; + # first make all lower bits =1 + tmp = tmp | (tmp >> 1); + tmp = tmp | (tmp >> 2); + tmp = tmp | (tmp >> 4); + tmp = tmp | (tmp >> 8); + tmp = tmp | (tmp >> 16); + tmp = tmp | (tmp >> 32); + # now add the 1 bits together, voila + tmp = ((tmp & 0xaaaaaaaaaaaaaaaa)>>1) + (tmp & 0x5555555555555555); + tmp = ((tmp & 0xcccccccccccccccc)>>2) + (tmp & 0x3333333333333333); + tmp = ((tmp & 0xf0f0f0f0f0f0f0f0)>>4) + (tmp & 0x0f0f0f0f0f0f0f0f); + tmp = ((tmp & 0xff00ff00ff00ff00)>>8) + (tmp & 0x00ff00ff00ff00ff); + tmp = ((tmp & 0xffff0000ffff0000)>>16) + (tmp & 0x0000ffff0000ffff); + tmp = ((tmp & 0xffffffff00000000)>>32) + (tmp & 0x00000000ffffffff); + Rd_GPR64 = 64 - (tmp & 0x7f); +} + +# C6.2.57 CMN (extended register) page C6-869 line 48602 MATCH x2b20001f/mask=x7fe0001f +# C6.2.7 ADDS (extended register) page C6-784 line 44172 MATCH x2b200000/mask=x7fe00000 +# CONSTRUCT x2b20001f/mask=xffe0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x2b20001f/mask=xffe0001f --status pass --comment "flags" + +:cmn Rn_GPR32wsp, ExtendRegShift32 +is sf=0 & op=0 & S=1 & SBIT_CZNO & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift32 & Rn_GPR32wsp & Rd=0x1f +{ + tmp_1:4 = ExtendRegShift32; + addflags(Rn_GPR32wsp, tmp_1); + result:4 = Rn_GPR32wsp + tmp_1; + resultflags(result); + build SBIT_CZNO; +} + +# C6.2.57 CMN (extended register) page C6-869 line 48602 MATCH x2b20001f/mask=x7fe0001f +# C6.2.7 ADDS (extended register) page C6-784 line 44172 MATCH x2b200000/mask=x7fe00000 +# CONSTRUCT xab20001f/mask=xffe0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xab20001f/mask=xffe0001f --status pass --comment "flags" + +:cmn Rn_GPR64xsp, ExtendRegShift64 +is sf=1 & op=0 & S=1 & SBIT_CZNO & b_2428=0xb & b_2121=1 & opt=0 & ExtendRegShift64 & Rn_GPR64xsp & Rd=0x1f +{ + tmp_1:8 = ExtendRegShift64; + addflags(Rn_GPR64xsp, tmp_1); + result:8 = Rn_GPR64xsp + tmp_1; + resultflags(result); + build SBIT_CZNO; +} + +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# CONSTRUCT x3100001f/mask=xff00001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x3100001f/mask=xff00001f --status pass --comment "flags" + +:cmn Rn_GPR32xsp, ImmShift32 +is sf=0 & b_30=0 & b_29=1 & aa_Xd=31 & b_2428=0x11 & ImmShift32 & Rn_GPR32xsp +{ + addflags(Rn_GPR32xsp, ImmShift32); + tmp:4 = Rn_GPR32xsp + ImmShift32; + resultflags(tmp); + affectflags(); +} + +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# CONSTRUCT xb100001f/mask=xff00001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb100001f/mask=xff00001f --status pass --comment "flags" + +:cmn Rn_GPR64xsp, ImmShift64 +is sf=1 & b_30=0 & b_29=1 & aa_Xd=31 & b_2428=0x11 & ImmShift64 & Rn_GPR64xsp +{ + addflags(Rn_GPR64xsp, ImmShift64); + tmp:8 = Rn_GPR64xsp + ImmShift64; + resultflags(tmp); + affectflags(); +} + +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# CONSTRUCT x3100001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x3100001f/mask=xffc0001f --status pass --comment "flags" + +:cmn Rn_GPR32wsp, Imm12_addsubimm_operand_i32_posimm_lsl0 +is sf=0 & op=0 & S=1 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i32_posimm_lsl0 & Rn_GPR32wsp & Rd=0x1f +{ + tmp_1:4 = Imm12_addsubimm_operand_i32_posimm_lsl0; + addflags(Rn_GPR32wsp, tmp_1); + result:4 = Rn_GPR32wsp + tmp_1; + resultflags(result); + affectflags(); +} + +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# CONSTRUCT x3140001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x3140001f/mask=xffc0001f --status pass --comment "flags" + +:cmn Rn_GPR32wsp, Imm12_addsubimm_operand_i32_posimm_lsl12 +is sf=0 & op=0 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i32_posimm_lsl12 & Rn_GPR32wsp & Rd=0x1f +{ + tmp_1:4 = Imm12_addsubimm_operand_i32_posimm_lsl12; + addflags(Rn_GPR32wsp, tmp_1); + result:4 = Rn_GPR32wsp + tmp_1; + resultflags(result); + affectflags(); +} + +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# CONSTRUCT xb100001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb100001f/mask=xffc0001f --status pass --comment "flags" + +:cmn Rn_GPR64xsp, Imm12_addsubimm_operand_i64_posimm_lsl0 +is sf=1 & op=0 & S=1 & SBIT_CZNO & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i64_posimm_lsl0 & Rn_GPR64xsp & Rd=0x1f +{ + tmp_1:8 = Imm12_addsubimm_operand_i64_posimm_lsl0; + addflags(Rn_GPR64xsp, tmp_1); + result:8 = Rn_GPR64xsp + tmp_1; + resultflags(result); + build SBIT_CZNO; +} + +# C6.2.58 CMN (immediate) page C6-871 line 48729 MATCH x3100001f/mask=x7f80001f +# C6.2.8 ADDS (immediate) page C6-787 line 44323 MATCH x31000000/mask=x7f800000 +# CONSTRUCT xb140001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb140001f/mask=xffc0001f --status pass --comment "flags" + +:cmn Rn_GPR64xsp, Imm12_addsubimm_operand_i64_posimm_lsl12 +is sf=1 & op=0 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i64_posimm_lsl12 & Rn_GPR64xsp & Rd=0x1f +{ + tmp_1:8 = Imm12_addsubimm_operand_i64_posimm_lsl12; + addflags(Rn_GPR64xsp, tmp_1); + result:8 = Rn_GPR64xsp + tmp_1; + resultflags(result); + affectflags(); +} + +# C6.2.59 CMN (shifted register) page C6-873 line 48819 MATCH x2b00001f/mask=x7f20001f +# C6.2.9 ADDS (shifted register) page C6-789 line 44428 MATCH x2b000000/mask=x7f200000 +# CONSTRUCT x2b00001f/mask=xff20801f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x2b00001f/mask=xff20801f --status pass --comment "flags" +# if shift == '11' then ReservedValue(); + +:cmn Rn_GPR32, RegShift32 +is sf=0 & op=0 & S=1 & SBIT_CZNO & b_2428=0xb & b_2121=0 & b_15=0 & RegShift32 & Rn_GPR32 & Rd=0x1f +{ + tmp_1:4 = RegShift32; + addflags(Rn_GPR32, tmp_1); + result:4 = Rn_GPR32 + tmp_1; + resultflags(result); + build SBIT_CZNO; +} + +# C6.2.59 CMN (shifted register) page C6-873 line 48819 MATCH x2b00001f/mask=x7f20001f +# C6.2.9 ADDS (shifted register) page C6-789 line 44428 MATCH x2b000000/mask=x7f200000 +# CONSTRUCT xab00001f/mask=xff20001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xab00001f/mask=xff20001f --status pass --comment "flags" + +:cmn Rn_GPR64, RegShift64 +is sf=1 & op=0 & S=1 & SBIT_CZNO & b_2428=0xb & b_2121=0 & RegShift64 & Rn_GPR64 & Rd=0x1f +{ + tmp_1:8 = RegShift64; + addflags(Rn_GPR64, tmp_1); + result:8 = Rn_GPR64 + tmp_1; + resultflags(result); + build SBIT_CZNO; +} + +# C6.2.60 CMP (extended register) page C6-875 line 48916 MATCH x6b20001f/mask=x7fe0001f +# C6.2.314 SUBS (extended register) page C6-1340 line 74449 MATCH x6b200000/mask=x7fe00000 +# CONSTRUCT x6b20001f/mask=xffe0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x6b20001f/mask=xffe0001f --status pass --comment "flags" + +:cmp Rn_GPR32wsp, ExtendRegShift32 +is sf=0 & op=1 & S=1 & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift32 & Rn_GPR32wsp & Rd=0x1f +{ + subflags(Rn_GPR32wsp, ExtendRegShift32); + tmp:4 = Rn_GPR32wsp - ExtendRegShift32; + resultflags(tmp); + affectflags(); +} + +# C6.2.60 CMP (extended register) page C6-875 line 48916 MATCH x6b20001f/mask=x7fe0001f +# C6.2.314 SUBS (extended register) page C6-1340 line 74449 MATCH x6b200000/mask=x7fe00000 +# CONSTRUCT xeb20001f/mask=xffe0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xeb20001f/mask=xffe0001f --status pass --comment "flags" + +:cmp Rn_GPR64xsp, ExtendRegShift64 +is sf=1 & op=1 & S=1 & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift64 & Rn_GPR64xsp & Rd=0x1f +{ + subflags(Rn_GPR64xsp, ExtendRegShift64); + tmp:8 = Rn_GPR64xsp - ExtendRegShift64; + resultflags(tmp); + affectflags(); +} + +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# CONSTRUCT x7100001f/mask=xff00001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x7100001f/mask=xff00001f --status pass --comment "flags" + +:cmp Rn_GPR32xsp, ImmShift32 +is sf=0 & b_30=1 & b_29=1 & b_2428=0x11 & ImmShift32 & Rn_GPR32xsp & aa_Wd=31 +{ + subflags(Rn_GPR32xsp, ImmShift32); + tmp:4 = Rn_GPR32xsp - ImmShift32; + resultflags(tmp); + affectflags(); +} + +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# CONSTRUCT xf100001f/mask=xff00001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xf100001f/mask=xff00001f --status pass --comment "flags" + +:cmp Rn_GPR64xsp, ImmShift64 +is sf=1 & b_30=1 & b_29=1 & b_2428=0x11 & ImmShift64 & Rn_GPR64xsp & aa_Wd=31 +{ + subflags(Rn_GPR64xsp, ImmShift64); + tmp:8 = Rn_GPR64xsp - ImmShift64; + resultflags(tmp); + affectflags(); +} + +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# CONSTRUCT x7100001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x7100001f/mask=xffc0001f --status pass --comment "flags" + +:cmp Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl0 +is sf=0 & op=1 & S=1 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i32_negimm_lsl0 & Rn_GPR32wsp & Rd=0x1f +{ + tmp_1:4 = Imm12_addsubimm_operand_i32_negimm_lsl0; + subflags(Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl0); + result:4 = Rn_GPR32wsp - tmp_1; + resultflags(result); + affectflags(); +} + +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# CONSTRUCT x7140001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x7140001f/mask=xffc0001f --status pass --comment "flags" + +:cmp Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl12 +is sf=0 & op=1 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i32_negimm_lsl12 & Rn_GPR32wsp & Rd=0x1f +{ + tmp_2:4 = Imm12_addsubimm_operand_i32_negimm_lsl12; + subflags(Rn_GPR32wsp, tmp_2); + tmp_1:4 = Rn_GPR32wsp - tmp_2; + resultflags(tmp_1); + affectflags(); +} + +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# CONSTRUCT xf100001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xf100001f/mask=xffc0001f --status pass --comment "flags" + +:cmp Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl0 +is sf=1 & op=1 & S=1 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i64_negimm_lsl0 & Rn_GPR64xsp & Rd=0x1f +{ + tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl0; + subflags(Rn_GPR64xsp, tmp_2); + tmp_1:8 = Rn_GPR64xsp - tmp_2; + resultflags(tmp_1); + affectflags(); +} + +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# CONSTRUCT xf140001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xf140001f/mask=xffc0001f --status pass --comment "flags" + +:cmp Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl12 +is sf=1 & op=1 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i64_negimm_lsl12 & Rn_GPR64xsp & Rd=0x1f +{ + tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl12; + subflags(Rn_GPR64xsp, tmp_2); + tmp_1:8 = Rn_GPR64xsp - tmp_2; + resultflags(tmp_1); + affectflags(); +} + +# C6.2.62 CMP (shifted register) page C6-879 line 49133 MATCH x6b00001f/mask=x7f20001f +# C6.2.200 NEGS page C6-1137 line 63476 MATCH x6b0003e0/mask=x7f2003e0 +# C6.2.316 SUBS (shifted register) page C6-1345 line 74711 MATCH x6b000000/mask=x7f200000 +# CONSTRUCT x6b00001f/mask=xff20001f MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x6b00001f/mask=xff20001f --status pass --comment "flags" + +:cmp Rn_GPR32, RegShift32 +is sf=0 & op=1 & S=1 & b_2428=0xb & b_2121=0 & RegShift32 & Rn!=0x1f & Rn_GPR32 & Rd=0x1f +{ + subflags(Rn_GPR32, RegShift32); + tmp:4 = Rn_GPR32 - RegShift32; + resultflags(tmp); + affectflags(); +} + +# C6.2.62 CMP (shifted register) page C6-879 line 49133 MATCH x6b00001f/mask=x7f20001f +# C6.2.200 NEGS page C6-1137 line 63476 MATCH x6b0003e0/mask=x7f2003e0 +# C6.2.316 SUBS (shifted register) page C6-1345 line 74711 MATCH x6b000000/mask=x7f200000 +# CONSTRUCT xeb00001f/mask=xff20001f MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xeb00001f/mask=xff20001f --status pass --comment "flags" + +:cmp Rn_GPR64, RegShift64 +is sf=1 & op=1 & S=1 & b_2428=0xb & b_2121=0 & Rm_GPR64 & RegShift64 & Rn!=0x1f & Rn_GPR64 & Rd=0x1f +{ + subflags(Rn_GPR64, RegShift64); + tmp:8 = Rn_GPR64 - RegShift64; + resultflags(tmp); + affectflags(); +} + +# C6.2.64 CNEG page C6-882 line 49282 MATCH x5a800400/mask=x7fe00c00 +# C6.2.74 CSNEG page C6-900 line 50164 MATCH x5a800400/mask=x7fe00c00 +# CONSTRUCT x5a800400/mask=xffe00c00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x5a800400/mask=xffe00c00 --status pass --comment "flags" + +:cneg Rd_GPR32, Rn_GPR32, InvCondOp +is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & InvCondOp & b_1011=1 & Rn=Rm & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + condition:1 = InvCondOp; + tmp:4 = -Rn_GPR32; + if (condition) goto ; + tmp = Rn_GPR32; + + Rd_GPR64 = zext(tmp); +} + +# C6.2.64 CNEG page C6-882 line 49282 MATCH x5a800400/mask=x7fe00c00 +# C6.2.74 CSNEG page C6-900 line 50164 MATCH x5a800400/mask=x7fe00c00 +# CONSTRUCT xda800400/mask=xffe00c00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xda800400/mask=xffe00c00 --status pass --comment "flags" + +:cneg Rd_GPR64, Rn_GPR64, InvCondOp +is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & InvCondOp & b_1011=1 & Rn=Rm & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR64 & Rd_GPR64 +{ + condition:1 = InvCondOp; + tmp:8 = -Rn_GPR64; + if (condition) goto ; + tmp = Rn_GPR64; + + Rd_GPR64 = tmp; +} + +# C6.2.59 CRC32B, CRC32H, CRC32W, CRC32X page C6-611 line 35802 KEEPWITH +# sf == 0 && sz = 00 CRC32CB variant + +crcpoly: "" is b_12=0 {tmp:4 = 0x04C11DB7; export *[const]:4 tmp; } +crcpoly: "c" is b_12=1 { tmp:4 = 0x1EDC6F41; export *[const]:4 tmp; } + +# C6.2.66 CRC32B, CRC32H, CRC32W, CRC32X page C6-885 line 49423 MATCH x1ac04000/mask=x7fe0f000 +# C6.2.67 CRC32CB, CRC32CH, CRC32CW, CRC32CX page C6-887 line 49531 MATCH x1ac05000/mask=x7fe0f000 +# CONSTRUCT x1ac04000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1ac04000/mask=xffe0ec00 --status noqemu + +:crc32^crcpoly^"b" Rd_GPR32, Rn_GPR32, Rm_GPR32 +is b_31=0 & b_2130=0b0011010110 & b_1315=0b010 & b_1011=0b00 & crcpoly & Rm_GPR32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp_Rd:4 = crc32b(Rn_GPR32, Rm_GPR32, crcpoly); + Rd_GPR64 = zext(tmp_Rd); +} + +# C6.2.66 CRC32B, CRC32H, CRC32W, CRC32X page C6-885 line 49423 MATCH x1ac04000/mask=x7fe0f000 +# C6.2.67 CRC32CB, CRC32CH, CRC32CW, CRC32CX page C6-887 line 49531 MATCH x1ac05000/mask=x7fe0f000 +# CONSTRUCT x1ac04400/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1ac04400/mask=xffe0ec00 --status noqemu +# sf == 0 && sz = 01 CRC32CH variant + +:crc32^crcpoly^"h" Rd_GPR32, Rn_GPR32, Rm_GPR32 +is b_31=0 & b_2130=0b0011010110 & b_1315=0b010 & b_1011=0b01 & crcpoly & Rm_GPR32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp_Rd:4 = crc32h(Rn_GPR32, Rm_GPR32, crcpoly); + Rd_GPR64 = zext(tmp_Rd); +} + +# C6.2.66 CRC32B, CRC32H, CRC32W, CRC32X page C6-885 line 49423 MATCH x1ac04000/mask=x7fe0f000 +# C6.2.67 CRC32CB, CRC32CH, CRC32CW, CRC32CX page C6-887 line 49531 MATCH x1ac05000/mask=x7fe0f000 +# CONSTRUCT x1ac04800/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1ac04800/mask=xffe0ec00 --status noqemu +# sf == 0 && sz = 10 CRC32CW variant + +:crc32^crcpoly^"w" Rd_GPR32, Rn_GPR32, Rm_GPR32 +is b_31=0 & b_2130=0b0011010110 & b_1315=0b010 & b_1011=0b10 & crcpoly & Rm_GPR32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp_Rd:4 = crc32w(Rn_GPR32, Rm_GPR32, crcpoly); + Rd_GPR64 = zext(tmp_Rd); +} + +# C6.2.66 CRC32B, CRC32H, CRC32W, CRC32X page C6-885 line 49423 MATCH x1ac04000/mask=x7fe0f000 +# C6.2.67 CRC32CB, CRC32CH, CRC32CW, CRC32CX page C6-887 line 49531 MATCH x1ac05000/mask=x7fe0f000 +# CONSTRUCT x9ac04c00/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9ac04c00/mask=xffe0ec00 --status noqemu +# sf == 1 && sz = 11 CRC32CX variant + +:crc32^crcpoly^"x" Rd_GPR32, Rn_GPR32, Rm_GPR64 +is b_31=1 & b_2130=0b0011010110 & b_1315=0b010 & b_1011=0b11 & crcpoly & Rm_GPR64 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp_Rd:4 = crc32x(Rn_GPR32, Rm_GPR64, crcpoly); + Rd_GPR64 = zext(tmp_Rd); +} + +# C6.2.69 CSEL page C6-890 line 49692 MATCH x1a800000/mask=x7fe00c00 +# CONSTRUCT x1a800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x1a800000/mask=xffe00c00 --status pass --comment "flags" + +:csel Rd_GPR32, Rn_GPR32, Rm_GPR32, CondOp +is sf=0 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & CondOp & b_1011=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + condition:1 = CondOp; + tmp:4 = Rn_GPR32; + if (condition) goto ; + tmp = Rm_GPR32; + + Rd_GPR64 = zext(tmp); +} + +# C6.2.69 CSEL page C6-890 line 49692 MATCH x1a800000/mask=x7fe00c00 +# CONSTRUCT x9a800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x9a800000/mask=xffe00c00 --status pass --comment "flags" + +:csel Rd_GPR64, Rn_GPR64, Rm_GPR64, CondOp +is sf=1 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & CondOp & b_1011=0 & Rn_GPR64 & Rd_GPR64 +{ + condition:1 = CondOp; + tmp:8 = Rn_GPR64; + if (condition) goto ; + tmp = Rm_GPR64; + + Rd_GPR64 = tmp; +} + +# C6.2.70 CSET page C6-892 line 49783 MATCH x1a9f07e0/mask=x7fff0fe0 +# C6.2.52 CINC page C6-862 line 48243 MATCH x1a800400/mask=x7fe00c00 +# C6.2.72 CSINC page C6-896 line 49956 MATCH x1a800400/mask=x7fe00c00 +# CONSTRUCT x1a9f07e0/mask=xffff0fe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x1a9f07e0/mask=xffff0fe0 --status pass --comment "flags" + +:cset Rd_GPR32, InvCondOp +is sf=0 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & InvCondOp & b_1011=1 & Rn=0x1f & Rm=0x1f & (b_15=0 | b_14=0 | b_13=0) & Rd_GPR32 & Rd_GPR64 +{ + condition:1 = InvCondOp; + Rd_GPR64 = zext(condition); +} + +# C6.2.70 CSET page C6-892 line 49783 MATCH x1a9f07e0/mask=x7fff0fe0 +# C6.2.52 CINC page C6-862 line 48243 MATCH x1a800400/mask=x7fe00c00 +# C6.2.72 CSINC page C6-896 line 49956 MATCH x1a800400/mask=x7fe00c00 +# CONSTRUCT x9a9f07e0/mask=xffff0fe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x9a9f07e0/mask=xffff0fe0 --status pass --comment "flags" + +:cset Rd_GPR64, InvCondOp +is sf=1 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & InvCondOp & b_1011=1 & Rn=0x1f & Rm=0x1f & (b_15=0 | b_14=0 | b_13=0) & Rd_GPR64 +{ + condition:1 = InvCondOp; + Rd_GPR64 = zext(condition); +} + +# C6.2.71 CSETM page C6-894 line 49869 MATCH x5a9f03e0/mask=x7fff0fe0 +# C6.2.53 CINV page C6-864 line 48333 MATCH x5a800000/mask=x7fe00c00 +# C6.2.73 CSINV page C6-898 line 50060 MATCH x5a800000/mask=x7fe00c00 +# CONSTRUCT x5a9f03e0/mask=xffff0fe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x5a9f03e0/mask=xffff0fe0 --status pass --comment "flags" + +:csetm Rd_GPR32, InvCondOp +is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & InvCondOp & b_1011=0 & Rn=0x1f & Rm=0x1f & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + condition:1 = InvCondOp; + tmp:4 = zext(condition) * -1; + Rd_GPR64 = zext(tmp); +} + +# C6.2.71 CSETM page C6-894 line 49869 MATCH x5a9f03e0/mask=x7fff0fe0 +# C6.2.53 CINV page C6-864 line 48333 MATCH x5a800000/mask=x7fe00c00 +# C6.2.73 CSINV page C6-898 line 50060 MATCH x5a800000/mask=x7fe00c00 +# CONSTRUCT xda9f03e0/mask=xffff0fe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xda9f03e0/mask=xffff0fe0 --status pass --comment "flags" + +:csetm Rd_GPR64, InvCondOp +is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & InvCondOp & b_1011=0 & Rn=0x1f & Rm=0x1f & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR64 & Rd_GPR64 +{ + condition:1 = InvCondOp; + Rd_GPR64 = zext(condition) * -1; +} + +# C6.2.72 CSINC page C6-896 line 49956 MATCH x1a800400/mask=x7fe00c00 +# C6.2.52 CINC page C6-862 line 48243 MATCH x1a800400/mask=x7fe00c00 +# C6.2.70 CSET page C6-892 line 49783 MATCH x1a9f07e0/mask=x7fff0fe0 +# CONSTRUCT x1a800400/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x1a800400/mask=xffe00c00 --status pass --comment "flags" + +:csinc Rd_GPR32, Rn_GPR32, Rm_GPR32, CondOp +is sf=0 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & CondOp & b_1011=1 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + condition:1 = CondOp; + tmp:4 = Rn_GPR32; + if (condition) goto ; + tmp = Rm_GPR32 + 1; + + Rd_GPR64 = zext(tmp); +} + +# C6.2.72 CSINC page C6-896 line 49956 MATCH x1a800400/mask=x7fe00c00 +# C6.2.52 CINC page C6-862 line 48243 MATCH x1a800400/mask=x7fe00c00 +# C6.2.70 CSET page C6-892 line 49783 MATCH x1a9f07e0/mask=x7fff0fe0 +# CONSTRUCT x9a800400/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x9a800400/mask=xffe00c00 --status pass --comment "flags" + +:csinc Rd_GPR64, Rn_GPR64, Rm_GPR64, CondOp +is sf=1 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & CondOp & b_1011=1 & Rn_GPR64 & Rd_GPR64 +{ + condition:1 = CondOp; + tmp:8 = Rn_GPR64; + if (condition) goto ; + tmp = Rm_GPR64 + 1; + + Rd_GPR64 = tmp; +} + +# C6.2.73 CSINV page C6-898 line 50060 MATCH x5a800000/mask=x7fe00c00 +# C6.2.53 CINV page C6-864 line 48333 MATCH x5a800000/mask=x7fe00c00 +# C6.2.71 CSETM page C6-894 line 49869 MATCH x5a9f03e0/mask=x7fff0fe0 +# CONSTRUCT x5a800000/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x5a800000/mask=xffe00c00 --status pass --comment "flags" + +:csinv Rd_GPR32, Rn_GPR32, Rm_GPR32, CondOp +is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & CondOp & b_1011=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + condition:1 = CondOp; + tmp:4 = Rn_GPR32; + if (condition) goto ; + tmp = ~Rm_GPR32; + + Rd_GPR64 = zext(tmp); +} + +# C6.2.73 CSINV page C6-898 line 50060 MATCH x5a800000/mask=x7fe00c00 +# C6.2.53 CINV page C6-864 line 48333 MATCH x5a800000/mask=x7fe00c00 +# C6.2.71 CSETM page C6-894 line 49869 MATCH x5a9f03e0/mask=x7fff0fe0 +# CONSTRUCT xda800000/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xda800000/mask=xffe00c00 --status pass --comment "flags" + +:csinv Rd_GPR64, Rn_GPR64, Rm_GPR64, CondOp +is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & CondOp & b_1011=0 & Rn_GPR64 & Rd_GPR64 +{ + condition:1 = CondOp; + tmp:8 = Rn_GPR64; + if (condition) goto ; + tmp = ~Rm_GPR64; + + Rd_GPR64 = tmp; +} + +# C6.2.74 CSNEG page C6-900 line 50164 MATCH x5a800400/mask=x7fe00c00 +# C6.2.64 CNEG page C6-882 line 49282 MATCH x5a800400/mask=x7fe00c00 +# CONSTRUCT x5a800400/mask=xffe00c00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x5a800400/mask=xffe00c00 --status pass --comment "flags" + +:csneg Rd_GPR32, Rn_GPR32, Rm_GPR32, CondOp +is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & CondOp & b_1011=1 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + condition:1 = CondOp; + tmp:4 = Rn_GPR32; + if (condition) goto ; + tmp = -Rm_GPR32; + + Rd_GPR64 = zext(tmp); +} + +# C6.2.74 CSNEG page C6-900 line 50164 MATCH x5a800400/mask=x7fe00c00 +# C6.2.64 CNEG page C6-882 line 49282 MATCH x5a800400/mask=x7fe00c00 +# CONSTRUCT xda800400/mask=xffe00c00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xda800400/mask=xffe00c00 --status pass --comment "flags" + +:csneg Rd_GPR64, Rn_GPR64, Rm_GPR64, CondOp +is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & CondOp & b_1011=1 & Rn_GPR64 & Rd_GPR64 +{ + condition:1 = CondOp; + tmp:8 = Rn_GPR64; + if (condition) goto ; + tmp = -Rm_GPR64; + + Rd_GPR64 = tmp; +} + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7420/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd50b7420/mask=xffffffe0 --status nodest + +:dc "ZVA", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b0100 & b_0507=0b001 & Rt_GPR64 +{ DC_ZVA(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087620/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd5087620/mask=xffffffe0 --status nodest + +:dc "IVAC", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b001 & Rt_GPR64 +{ DC_IVAC(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087640/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd5087640/mask=xffffffe0 --status nodest + +:dc "ISW", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b010 & Rt_GPR64 +{ DC_ISW(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7a20/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd50b7a20/mask=xffffffe0 --status nopcodeop + +:dc "CVAC", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b001 & Rt_GPR64 +{ DC_CVAC(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087a40/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd5087a40/mask=xffffffe0 --status nodest + +:dc "CSW", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b010 & Rt_GPR64 +{ DC_CSW(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7b20/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd50b7b20/mask=xffffffe0 --status nodest + +:dc "CVAU", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1011 & b_0507=0b001 & Rt_GPR64 +{ DC_CVAU(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7e20/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd50b7e20/mask=xffffffe0 --status nodest + +:dc "CIVAC", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b001 & Rt_GPR64 +{ DC_CIVAC(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087e40/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd5087e40/mask=xffffffe0 --status nodest + +:dc "CISW", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b010 & Rt_GPR64 +{ DC_CISW(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7c20/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd50b7c20/mask=xffffffe0 --status nodest + +:dc "CVAP", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1100 & b_0507=0b001 & Rt_GPR64 +{ DC_CVAP(Rt_GPR64); } + +# C6.2.76 DCPS1 page C6-904 line 50363 MATCH xd4a00001/mask=xffe0001f +# CONSTRUCT xd4a00001/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd4a00001/mask=xffe0001f --status nodest + +:dcps1 imm16 +is b_2431=0xd4 & excCode=5 & imm16 & excCode2=0 & ll=1 +{ + DCPSInstruction(1:2, imm16:2); +} + +# C6.2.77 DCPS2 page C6-905 line 50428 MATCH xd4a00002/mask=xffe0001f +# CONSTRUCT xd4a00002/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd4a00002/mask=xffe0001f --status nodest + +:dcps2 imm16 +is b_2431=0xd4 & excCode=5 & imm16 & excCode2=0 & ll=2 +{ + DCPSInstruction(2:2, imm16:2); +} + +# C6.2.78 DCPS3 page C6-906 line 50498 MATCH xd4a00003/mask=xffe0001f +# CONSTRUCT xd4a00003/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd4a00003/mask=xffe0001f --status nodest + +:dcps3 imm16 +is b_2431=0xd4 & excCode=5 & imm16 & excCode2=0 & ll=3 +{ + DCPSInstruction(3:2, imm16:2); +} + +# C6.2.80 DMB page C6-908 line 50599 MATCH xd50330bf/mask=xfffff0ff +# CONSTRUCT xd50330bf/mask=xfffff3ff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd50330bf/mask=xfffff3ff --status nodest + +:dmb CRm_CRx +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_dbarrier_op & CRm_CRx & CRm_32 & CRm_10=0 & Op2=5 & Rt=0x1f +{ + types:1 = 0x0; + domain:1 = CRm_32; + DataMemoryBarrier(domain, types); +} + +# C6.2.80 DMB page C6-908 line 50599 MATCH xd50330bf/mask=xfffff0ff +# CONSTRUCT xd50330bf/mask=xfffff0ff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd50330bf/mask=xfffff0ff --status nodest + +:dmb CRm_dbarrier_op +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_dbarrier_op & CRm_32 & CRm_10 & Op2=5 & Rt=0x1f +{ + types:1 = CRm_10; + domain:1 = CRm_32; + DataMemoryBarrier(domain, types); +} + +# C6.2.81 DRPS page C6-910 line 50692 MATCH xd6bf03e0/mask=xffffffff +# CONSTRUCT xd6bf03e0/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd6bf03e0/mask=xffffffff --status nodest + +:drps +is b_2531=0x6b & b_2324=1 & b_2122=1 & b_1620=0x1f & b_1015=0 & aa_Xn=31 & b_0004=0 +{ + pc = DRPSInstruction(); + return [pc]; +} + +# C6.2.82 DSB page C6-911 line 50726 MATCH xd503309f/mask=xfffff0ff +# C6.2.217 PSSBB page C6-1167 line 65054 MATCH xd503349f/mask=xffffffff +# C6.2.245 SSBB page C6-1208 line 67198 MATCH xd503309f/mask=xffffffff +# CONSTRUCT xd503309f/mask=xfffff3ff MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd503309f/mask=xfffff3ff --status nodest + +:dsb CRm_CRx +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_dbarrier_op & CRm_CRx & CRm_32 & CRm_10=0 & Op2=4 & Rt=0x1f +{ + types:1 = 0x0; + domain:1 = CRm_32; + DataSynchronizationBarrier(domain, types); +} + +# C6.2.82 DSB page C6-911 line 50726 MATCH xd503309f/mask=xfffff0ff +# CONSTRUCT xd503309f/mask=xfffff0ff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd503309f/mask=xfffff0ff --status nodest + +:dsb CRm_dbarrier_op +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_dbarrier_op & CRm_32 & CRm_10 & Op2=4 & Rt=0x1f +{ + types:1 = CRm_10; + domain:1 = CRm_32; + DataSynchronizationBarrier(domain, types); +} + +# C6.2.84 EON (shifted register) page C6-914 line 50874 MATCH x4a200000/mask=x7f200000 +# CONSTRUCT x4a200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x4a200000/mask=xff200000 --status pass + +:eon Rd_GPR32, Rn_GPR32, RegShift32Log +is sf=0 & opc=2 & b_2428=0xa & N=1 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_3:4 = RegShift32Log; + tmp_2:4 = tmp_3 ^ -1:4; + tmp_1:4 = Rn_GPR32 ^ tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.84 EON (shifted register) page C6-914 line 50874 MATCH x4a200000/mask=x7f200000 +# CONSTRUCT xca200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xca200000/mask=xff200000 --status pass + +:eon Rd_GPR64, Rn_GPR64, RegShift64Log +is sf=1 & opc=2 & b_2428=0xa & N=1 & Rm_GPR64 & RegShift64Log & Rn_GPR64 & Rd_GPR64 +{ + tmp_3:8= RegShift64Log; + tmp_2:8 = tmp_3 ^ -1:8; + tmp_1:8 = Rn_GPR64 ^ tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.85 EOR (immediate) page C6-916 line 50977 MATCH x52000000/mask=x7f800000 +# CONSTRUCT x52000000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x52000000/mask=xff800000 --status pass + +:eor Rd_GPR32wsp, Rn_GPR32, DecodeWMask32 +is sf=0 & opc=2 & b_2428=0x12 & b_2323=0 & DecodeWMask32 & Rn_GPR32 & Rd_GPR32wsp & Rd_GPR64xsp +{ + tmp_1:4 = Rn_GPR32 ^ DecodeWMask32; + Rd_GPR64xsp = zext(tmp_1); +} + +# C6.2.85 EOR (immediate) page C6-916 line 50977 MATCH x52000000/mask=x7f800000 +# CONSTRUCT xd2000000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd2000000/mask=xff800000 --status pass + +:eor Rd_GPR64xsp, Rn_GPR64, DecodeWMask64 +is sf=1 & opc=2 & b_2428=0x12 & b_2323=0 & DecodeWMask64 & Rn_GPR64 & Rd_GPR64xsp +{ + tmp_1:8 = Rn_GPR64 ^ DecodeWMask64; + Rd_GPR64xsp = tmp_1; +} + +# C6.2.86 EOR (shifted register) page C6-918 line 51068 MATCH x4a000000/mask=x7f200000 +# CONSTRUCT x4a000000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x4a000000/mask=xff200000 --status pass + +:eor Rd_GPR32, Rn_GPR32, RegShift32Log +is sf=0 & opc=2 & b_2428=0xa & N=0 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = RegShift32Log; + tmp_1:4 = Rn_GPR32 ^ tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.86 EOR (shifted register) page C6-918 line 51068 MATCH x4a000000/mask=x7f200000 +# CONSTRUCT xca000000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xca000000/mask=xff200000 --status pass + +:eor Rd_GPR64, Rn_GPR64, RegShift64Log +is sf=1 & opc=2 & b_2428=0xa & N=0 & Rm_GPR64 & RegShift64Log & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = RegShift64Log; + tmp_1:8 = Rn_GPR64 ^ tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.87 ERET page C6-920 line 51169 MATCH xd69f03e0/mask=xffffffff +# CONSTRUCT xd69f03e0/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd69f03e0/mask=xffffffff --status nodest + +:eret +is b_2531=0x6b & b_2324=1 & b_2122=0 & b_1620=0x1f & b_1015=0 & aa_Xn=31 & b_0004=0 +{ + pc = ExceptionReturn(); + return [pc]; +} + +# C6.2.88 ERETAA, ERETAB page C6-921 line 51210 MATCH xd69f0bff/mask=xfffffbff +# CONSTRUCT xd69f0bff/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd69f0bff/mask=xffffffff --status nodest + +:eretaa +is eretaa__PACpart & b_0031=0xd69f0bff +{ + pc = ExceptionReturn(); + build eretaa__PACpart; + return [pc]; +} + +# C6.2.88 ERETAA, ERETAB page C6-921 line 51210 MATCH xd69f0bff/mask=xfffffbff +# CONSTRUCT xd69f0fff/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd69f0fff/mask=xffffffff --status nodest + +:eretab +is eretab__PACpart & b_0031=0xd69f0fff +{ + pc = ExceptionReturn(); + build eretab__PACpart; + return [pc]; +} + +# C6.2.90 EXTR page C6-923 line 51323 MATCH x13800000/mask=x7fa00000 +# C6.2.226 ROR (immediate) page C6-1179 line 65715 MATCH x13800000/mask=x7fa00000 +# CONSTRUCT x13800000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x13800000/mask=xffe00000 --status pass + +:extr Rd_GPR32, Rn_GPR32, Rm_GPR32, LSB_bitfield32_imm +is sf=0 & b_2930=0 & b_2428=0x13 & b_2323=1 & n=0 & b_21=0 & Rm_GPR32 & LSB_bitfield32_imm & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + lsb:8 = LSB_bitfield32_imm; + result:8 = (zext(Rn_GPR32) << 32) | zext(Rm_GPR32); + result = (result >> lsb); + Rd_GPR64 = zext(result:4); +} + +# C6.2.90 EXTR page C6-923 line 51323 MATCH x13800000/mask=x7fa00000 +# C6.2.226 ROR (immediate) page C6-1179 line 65715 MATCH x13800000/mask=x7fa00000 +# CONSTRUCT x93c00000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x93c00000/mask=xffe00000 --status pass + +:extr Rd_GPR64, Rn_GPR64, Rm_GPR64, LSB_bitfield64_imm +is sf=1 & b_2930=0 & b_2428=0x13 & b_2323=1 & n=1 & b_21=0 & Rm_GPR64 & LSB_bitfield64_imm & Rn_GPR64 & Rd_GPR64 +{ + local tmp:8 = (Rm_GPR64 >> LSB_bitfield64_imm:1); + Rd_GPR64 = tmp | (Rn_GPR64 << (64:1 - LSB_bitfield64_imm:1)); +} + +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-811 line 45548 MATCH xd503219f/mask=xfffffddf +# C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-813 line 45695 MATCH xd50321df/mask=xfffffddf +# C6.2.68 CSDB page C6-889 line 49639 MATCH xd503229f/mask=xffffffff +# C6.2.79 DGH page C6-907 line 50562 MATCH xd50320df/mask=xffffffff +# C6.2.89 ESB page C6-922 line 51277 MATCH xd503221f/mask=xffffffff +# C6.2.210 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1153 line 64322 MATCH xd503211f/mask=xfffffddf +# C6.2.211 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1156 line 64481 MATCH xd503215f/mask=xfffffddf +# C6.2.216 PSB CSYNC page C6-1166 line 65014 MATCH xd503223f/mask=xffffffff +# C6.2.329 TSB CSYNC page C6-1367 line 75873 MATCH xd503225f/mask=xffffffff +# CONSTRUCT xd503201f/mask=xfffff01f MATCHED 10 DOCUMENTED OPCODES +# AUNIT --inst xd503201f/mask=xfffff01f --status nodest + +:hint imm7Low +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low & Rt=0x1f {} + +# C6.2.93 HLT page C6-929 line 51683 MATCH xd4400000/mask=xffe0001f +# CONSTRUCT xd4400000/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd4400000/mask=xffe0001f --status nodest + +:hlt imm16 +is ALL_BTITARGETS & b_2431=0xd4 & excCode=2 & imm16 & excCode2=0 & ll=0 +{ + HaltBreakPoint(); +} + +# C6.2.94 HVC page C6-930 line 51724 MATCH xd4000002/mask=xffe0001f +# CONSTRUCT xd4000002/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd4000002/mask=xffe0001f --status nodest + +:hvc imm16 +is b_2431=0xd4 & excCode=0 & imm16 & excCode2=0 & ll=2 +{ + CallHyperVisor(imm16:2); +} + +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087100/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd5087100/mask=xffffffe0 --status nodest + +:ic "IALLUIS" +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0001 & b_0507=0b000 +{ IC_IALLUIS(); } + +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087500/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd5087500/mask=xffffffe0 --status nodest + +:ic "IALLU" +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0101 & b_0507=0b000 +{ IC_IALLU(); } + +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7520/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd50b7520/mask=xffffffe0 --status nopcodeop + +:ic "IVAU", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b0101 & b_0507=0b001 & Rt_GPR64 +{ IC_IVAU(Rt_GPR64); } + +# C6.2.85 ISB page C6-647 line 37682 KEEPWITH + +IsbOption: "#"^CRm_isb_op is CRm_isb_op { export *[const]:4 CRm_isb_op; } +IsbOption: "" is CRm_isb_op=0xf { tmp:4 = 0xf; export tmp; } + +# C6.2.97 ISB page C6-933 line 51915 MATCH xd50330df/mask=xfffff0ff +# CONSTRUCT xd50330df/mask=xfffff0ff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd50330df/mask=xfffff0ff --status nodest + +:isb IsbOption +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & IsbOption & Op2=6 & Rt=0x1f +{ + InstructionSynchronizationBarrier(); +} + +# C6.2.86 LDADDB, LDADDAB, LDADDALB, LDADDLB page C6-648 line 37726 KEEPWITH + +# variants, a=acquire, al=acquire+release, l=release +# build ls_loa to acquire and ls_lor to release + +ls_loa: "a" is b_23=1 & b_22=0 { LOAcquire(); } +ls_loa: "al" is b_23=1 & b_22=1 { LOAcquire(); } +ls_loa: "" is b_23=0 & b_22=0 { } +ls_loa: "l" is b_23=0 & b_22=1 { } + +ls_lor: "a" is b_23=1 & b_22=0 { } +ls_lor: "al" is b_23=1 & b_22=1 { LORelease(); } +ls_lor: "" is b_23=0 & b_22=0 { } +ls_lor: "l" is b_23=0 & b_22=1 { LORelease(); } + +ls_data1: is b_3031=0b00 & Rn_GPR64xsp { tmp_ldWn = zext(*:1 Rn_GPR64xsp); } +ls_data2: is b_3031=0b01 & Rn_GPR64xsp { tmp_ldWn = zext(*:2 Rn_GPR64xsp); } +ls_data4: is b_3031=0b10 & Rn_GPR64xsp { tmp_ldWn = *:4 Rn_GPR64xsp; } +ls_data8: is b_3031=0b11 & Rn_GPR64xsp { tmp_ldXn = *:8 Rn_GPR64xsp; } + +ls_mem1: is Rn_GPR64xsp { *:1 Rn_GPR64xsp = tmp_stWn:1; } +ls_mem2: is Rn_GPR64xsp { *:2 Rn_GPR64xsp = tmp_stWn:2; } +ls_mem4: is Rn_GPR64xsp { *:4 Rn_GPR64xsp = tmp_stWn; } +ls_mem8: is Rn_GPR64xsp { *:8 Rn_GPR64xsp = tmp_stXn; } + +macro ls_opc_add (data, value, dest) { dest = data + value; } +macro ls_opc_clr (data, value, dest) { dest = data & (~ value); } +macro ls_opc_eor (data, value, dest) { dest = data ^ value; } +macro ls_opc_set (data, value, dest) { dest = data | value; } +macro ls_opc_smax(data, value, dest) { dest = zext(data s> value) * data + zext(data s<= value) * value; } +macro ls_opc_smin(data, value, dest) { dest = zext(data s> value) * value + zext(data s<= value) * data; } +macro ls_opc_umax(data, value, dest) { dest = zext(data > value) * data + zext(data <= value) * value; } +macro ls_opc_umin(data, value, dest) { dest = zext(data > value) * value + zext(data <= value) * data; } +macro ls_opc_swp (data, value, dest) { dest = value; } + +ls_opc1: "add" is b_3031=0b00 & b_1215=0b0000 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_add(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } +ls_opc2: "add" is b_3031=0b01 & b_1215=0b0000 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_add(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } +ls_opc4: "add" is b_3031=0b10 & b_1215=0b0000 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_add(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } +ls_opc8: "add" is b_3031=0b11 & b_1215=0b0000 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_add(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } + +ls_opc1: "clr" is b_3031=0b00 & b_1215=0b0001 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_clr(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } +ls_opc2: "clr" is b_3031=0b01 & b_1215=0b0001 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_clr(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } +ls_opc4: "clr" is b_3031=0b10 & b_1215=0b0001 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_clr(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } +ls_opc8: "clr" is b_3031=0b11 & b_1215=0b0001 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_clr(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } + +ls_opc1: "eor" is b_3031=0b00 & b_1215=0b0010 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_eor(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } +ls_opc2: "eor" is b_3031=0b01 & b_1215=0b0010 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_eor(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } +ls_opc4: "eor" is b_3031=0b10 & b_1215=0b0010 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_eor(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } +ls_opc8: "eor" is b_3031=0b11 & b_1215=0b0010 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_eor(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } + +ls_opc1: "set" is b_3031=0b00 & b_1215=0b0011 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_set(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } +ls_opc2: "set" is b_3031=0b01 & b_1215=0b0011 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_set(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } +ls_opc4: "set" is b_3031=0b10 & b_1215=0b0011 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_set(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } +ls_opc8: "set" is b_3031=0b11 & b_1215=0b0011 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_set(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } + +ls_opc1: "smax" is b_3031=0b00 & b_1215=0b0100 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_smax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } +ls_opc2: "smax" is b_3031=0b01 & b_1215=0b0100 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_smax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } +ls_opc4: "smax" is b_3031=0b10 & b_1215=0b0100 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_smax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } +ls_opc8: "smax" is b_3031=0b11 & b_1215=0b0100 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_smax(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } + +ls_opc1: "smin" is b_3031=0b00 & b_1215=0b0101 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_smin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } +ls_opc2: "smin" is b_3031=0b01 & b_1215=0b0101 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_smin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } +ls_opc4: "smin" is b_3031=0b10 & b_1215=0b0101 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_smin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } +ls_opc8: "smin" is b_3031=0b11 & b_1215=0b0101 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_smin(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } + +ls_opc1: "umax" is b_3031=0b00 & b_1215=0b0110 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_umax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } +ls_opc2: "umax" is b_3031=0b01 & b_1215=0b0110 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_umax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } +ls_opc4: "umax" is b_3031=0b10 & b_1215=0b0110 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_umax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } +ls_opc8: "umax" is b_3031=0b11 & b_1215=0b0110 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_umax(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } + +ls_opc1: "umin" is b_3031=0b00 & b_1215=0b0111 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_umin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } +ls_opc2: "umin" is b_3031=0b01 & b_1215=0b0111 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_umin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } +ls_opc4: "umin" is b_3031=0b10 & b_1215=0b0111 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_umin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } +ls_opc8: "umin" is b_3031=0b11 & b_1215=0b0111 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_umin(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } + +# Nearly all of these instructions have the same "operation" in the +# manual, the differences being load vs store, the operation (o3:opc), +# the data size, and the load store semantics of the atomic load and +# store types (AccType). The opcode mnemonic varies, however. And to +# facilitate reading, the LD/ST/SWP variants have been separated out. + +# C6.2.98 LDADDB, LDADDAB, LDADDALB, LDADDLB page C6-934 line 51959 MATCH x38200000/mask=xff20fc00 +# C6.2.117 LDCLRB, LDCLRAB, LDCLRALB, LDCLRLB page C6-969 line 53884 MATCH x38201000/mask=xff20fc00 +# C6.2.120 LDEORB, LDEORAB, LDEORALB, LDEORLB page C6-976 line 54306 MATCH x38202000/mask=xff20fc00 +# C6.2.146 LDSETB, LDSETAB, LDSETALB, LDSETLB page C6-1032 line 57673 MATCH x38203000/mask=xff20fc00 +# C6.2.149 LDSMAXB, LDSMAXAB, LDSMAXALB, LDSMAXLB page C6-1039 line 58095 MATCH x38204000/mask=xff20fc00 +# C6.2.152 LDSMINB, LDSMINAB, LDSMINALB, LDSMINLB page C6-1046 line 58517 MATCH x38205000/mask=xff20fc00 +# C6.2.161 LDUMAXB, LDUMAXAB, LDUMAXALB, LDUMAXLB page C6-1065 line 59617 MATCH x38206000/mask=xff20fc00 +# C6.2.164 LDUMINB, LDUMINAB, LDUMINALB, LDUMINLB page C6-1072 line 60039 MATCH x38207000/mask=xff20fc00 +# CONSTRUCT x38200000/mask=xff208c00 MATCHED 8 DOCUMENTED OPCODES +# AUNIT --inst x38200000/mask=xff208c00 --status nomem + +# size=0b00 (3031) + +:ld^ls_opc1^ls_lor^"b" aa_Ws, aa_Wt, [Rn_GPR64xsp] +is b_3031=0b00 & b_2429=0b111000 & b_21=1 & b_1515=0 & b_1011=0b00 & ls_opc1 & ls_loa & ls_lor & aa_Wt & aa_Ws & Rn_GPR64xsp +{ build ls_loa; build ls_opc1; aa_Wt = tmp_ldWn; build ls_lor; } + +# C6.2.99 LDADDH, LDADDAH, LDADDALH, LDADDLH page C6-936 line 52084 MATCH x78200000/mask=xff20fc00 +# C6.2.118 LDCLRH, LDCLRAH, LDCLRALH, LDCLRLH page C6-971 line 54010 MATCH x78201000/mask=xff20fc00 +# C6.2.121 LDEORH, LDEORAH, LDEORALH, LDEORLH page C6-978 line 54432 MATCH x78202000/mask=xff20fc00 +# C6.2.147 LDSETH, LDSETAH, LDSETALH, LDSETLH page C6-1034 line 57799 MATCH x78203000/mask=xff20fc00 +# C6.2.150 LDSMAXH, LDSMAXAH, LDSMAXALH, LDSMAXLH page C6-1041 line 58221 MATCH x78204000/mask=xff20fc00 +# C6.2.153 LDSMINH, LDSMINAH, LDSMINALH, LDSMINLH page C6-1048 line 58643 MATCH x78205000/mask=xff20fc00 +# C6.2.162 LDUMAXH, LDUMAXAH, LDUMAXALH, LDUMAXLH page C6-1067 line 59743 MATCH x78206000/mask=xff20fc00 +# C6.2.165 LDUMINH, LDUMINAH, LDUMINALH, LDUMINLH page C6-1074 line 60165 MATCH x78207000/mask=xff20fc00 +# CONSTRUCT x78200000/mask=xff208c00 MATCHED 8 DOCUMENTED OPCODES +# AUNIT --inst x78200000/mask=xff208c00 --status nomem + +# size=0b01 (3031) + +:ld^ls_opc2^ls_lor^"h" aa_Ws, aa_Wt, [Rn_GPR64xsp] +is b_3031=0b01 & b_2429=0b111000 & b_21=1 & b_1515=0 & b_1011=0b00 & ls_opc2 & ls_loa & ls_lor & aa_Wt & aa_Ws & Rn_GPR64xsp +{ build ls_loa; build ls_opc2; aa_Wt = tmp_ldWn; build ls_lor; } + +# C6.2.100 LDADD, LDADDA, LDADDAL, LDADDL page C6-938 line 52210 MATCH xb8200000/mask=xbf20fc00 +# C6.2.119 LDCLR, LDCLRA, LDCLRAL, LDCLRL page C6-973 line 54136 MATCH xb8201000/mask=xbf20fc00 +# C6.2.122 LDEOR, LDEORA, LDEORAL, LDEORL page C6-980 line 54558 MATCH xb8202000/mask=xbf20fc00 +# C6.2.148 LDSET, LDSETA, LDSETAL, LDSETL page C6-1036 line 57925 MATCH xb8203000/mask=xbf20fc00 +# C6.2.151 LDSMAX, LDSMAXA, LDSMAXAL, LDSMAXL page C6-1043 line 58347 MATCH xb8204000/mask=xbf20fc00 +# C6.2.154 LDSMIN, LDSMINA, LDSMINAL, LDSMINL page C6-1050 line 58769 MATCH xb8205000/mask=xbf20fc00 +# C6.2.163 LDUMAX, LDUMAXA, LDUMAXAL, LDUMAXL page C6-1069 line 59869 MATCH xb8206000/mask=xbf20fc00 +# C6.2.166 LDUMIN, LDUMINA, LDUMINAL, LDUMINL page C6-1076 line 60291 MATCH xb8207000/mask=xbf20fc00 +# C6.2.249 STADD, STADDL page C6-1215 line 67552 MATCH xb820001f/mask=xbfa0fc1f +# C6.2.252 STCLR, STCLRL page C6-1221 line 67842 MATCH xb820101f/mask=xbfa0fc1f +# C6.2.255 STEOR, STEORL page C6-1227 line 68131 MATCH xb820201f/mask=xbfa0fc1f +# C6.2.282 STSET, STSETL page C6-1280 line 71130 MATCH xb820301f/mask=xbfa0fc1f +# C6.2.285 STSMAX, STSMAXL page C6-1286 line 71425 MATCH xb820401f/mask=xbfa0fc1f +# C6.2.288 STSMIN, STSMINL page C6-1292 line 71721 MATCH xb820501f/mask=xbfa0fc1f +# C6.2.294 STUMAX, STUMAXL page C6-1304 line 72324 MATCH xb820601f/mask=xbfa0fc1f +# C6.2.297 STUMIN, STUMINL page C6-1310 line 72621 MATCH xb820701f/mask=xbfa0fc1f +# CONSTRUCT xb8200000/mask=xff208c00 MATCHED 16 DOCUMENTED OPCODES +# AUNIT --inst xb8200000/mask=xff208c00 --status nomem + +# size=0b10 (3031) + +:ld^ls_opc4^ls_lor aa_Ws, aa_Wt, [Rn_GPR64xsp] +is b_3031=0b10 & b_2429=0b111000 & b_21=1 & b_1515=0 & b_1011=0b00 & ls_opc4 & ls_loa & ls_lor & aa_Wt & aa_Ws & Rn_GPR64xsp +{ build ls_loa; build ls_opc4; aa_Wt = tmp_ldWn; build ls_lor; } + +# C6.2.100 LDADD, LDADDA, LDADDAL, LDADDL page C6-938 line 52210 MATCH xb8200000/mask=xbf20fc00 +# C6.2.119 LDCLR, LDCLRA, LDCLRAL, LDCLRL page C6-973 line 54136 MATCH xb8201000/mask=xbf20fc00 +# C6.2.122 LDEOR, LDEORA, LDEORAL, LDEORL page C6-980 line 54558 MATCH xb8202000/mask=xbf20fc00 +# C6.2.148 LDSET, LDSETA, LDSETAL, LDSETL page C6-1036 line 57925 MATCH xb8203000/mask=xbf20fc00 +# C6.2.151 LDSMAX, LDSMAXA, LDSMAXAL, LDSMAXL page C6-1043 line 58347 MATCH xb8204000/mask=xbf20fc00 +# C6.2.154 LDSMIN, LDSMINA, LDSMINAL, LDSMINL page C6-1050 line 58769 MATCH xb8205000/mask=xbf20fc00 +# C6.2.163 LDUMAX, LDUMAXA, LDUMAXAL, LDUMAXL page C6-1069 line 59869 MATCH xb8206000/mask=xbf20fc00 +# C6.2.166 LDUMIN, LDUMINA, LDUMINAL, LDUMINL page C6-1076 line 60291 MATCH xb8207000/mask=xbf20fc00 +# C6.2.249 STADD, STADDL page C6-1215 line 67552 MATCH xb820001f/mask=xbfa0fc1f +# C6.2.252 STCLR, STCLRL page C6-1221 line 67842 MATCH xb820101f/mask=xbfa0fc1f +# C6.2.255 STEOR, STEORL page C6-1227 line 68131 MATCH xb820201f/mask=xbfa0fc1f +# C6.2.282 STSET, STSETL page C6-1280 line 71130 MATCH xb820301f/mask=xbfa0fc1f +# C6.2.285 STSMAX, STSMAXL page C6-1286 line 71425 MATCH xb820401f/mask=xbfa0fc1f +# C6.2.288 STSMIN, STSMINL page C6-1292 line 71721 MATCH xb820501f/mask=xbfa0fc1f +# C6.2.294 STUMAX, STUMAXL page C6-1304 line 72324 MATCH xb820601f/mask=xbfa0fc1f +# C6.2.297 STUMIN, STUMINL page C6-1310 line 72621 MATCH xb820701f/mask=xbfa0fc1f +# CONSTRUCT xf8200000/mask=xff208c00 MATCHED 16 DOCUMENTED OPCODES +# AUNIT --inst xf8200000/mask=xff208c00 --status nomem + +# size=0b11 (3031) + +:ld^ls_opc8^ls_lor aa_Xs, aa_Xt, [Rn_GPR64xsp] +is b_3031=0b11 & b_2429=0b111000 & b_21=1 & b_1515=0 & b_1011=0b00 & ls_opc8 & ls_loa & ls_lor & aa_Xt & aa_Xs & Rn_GPR64xsp +{ build ls_loa; build ls_opc8; aa_Xt = tmp_ldXn; build ls_lor; } + +# C6.2.101 LDAPR page C6-941 line 52380 MATCH xb8a0c000/mask=xbfe0fc00 +# CONSTRUCT xb8a0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xb8a0c000/mask=xffe0fc00 --status nomem +# TODO unsure of load/release semantics for this instruction +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 +# size == 10 32-bit variant + +:ldapr aa_Wt, [Rn_GPR64xsp] +is b_3031=0b10 & b_2129=0b111000101 & b_1015=0b110000 & Rn_GPR64xsp & aa_Wt & ls_data4 +{ + aa_Wt = tmp_ldWn; +} + +# C6.2.101 LDAPR page C6-941 line 52380 MATCH xb8a0c000/mask=xbfe0fc00 +# CONSTRUCT xf8a0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8a0c000/mask=xffe0fc00 --status nomem +# TODO unsure of load/release semantics for this instruction +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 +# size == 11 64-bit variant + +:ldapr aa_Xt, [Rn_GPR64xsp] +is b_3031=0b11 & b_2129=0b111000101 & b_1015=0b110000 & Rn_GPR64xsp & aa_Xt +{ + aa_Xt = tmp_ldXn; +} + +# C6.2.102 LDAPRB page C6-943 line 52478 MATCH x38a0c000/mask=xffe0fc00 +# CONSTRUCT x38a0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x38a0c000/mask=xffe0fc00 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 +# TODO unsure of load/release semantics for this instruction + +:ldaprb aa_Wt, [Rn_GPR64xsp] +is b_3031=0b00 & b_2129=0b111000101 & b_1015=0b110000 & Rn_GPR64xsp & aa_Wt & ls_data1 +{ + aa_Wt = tmp_ldWn; +} + +# C6.2.103 LDAPRH page C6-945 line 52562 MATCH x78a0c000/mask=xffe0fc00 +# CONSTRUCT x78a0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x78a0c000/mask=xffe0fc00 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 + +:ldaprh aa_Wt, [Rn_GPR64xsp] +is b_3031=0b01 & b_2129=0b111000101 & b_1015=0b110000 & Rn_GPR64xsp & aa_Wt & ls_data2 +{ + aa_Wt = tmp_ldWn; +} + + +# C6.2.104 LDAPUR page C6-947 line 52646 MATCH x99400000/mask=xbfe00c00 +# CONSTRUCT x99400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# The following commands are not yet implemented. + + +# x99400000/mask=xbfe00c00 NOT MATCHED BY ANY CONSTRUCTOR + +:ldapur aa_Wt, addr_SIMM9 +is b_3031=0b10 & b_2129=0b011001010 & b_1011=0b00 & addr_SIMM9 & aa_Wt & aa_Xt +{ + aa_Xt = zext(*:4 addr_SIMM9); +} + +# C6.2.104 LDAPUR page C6-947 line 52646 MATCH x99400000/mask=xbfe00c00 +# CONSTRUCT xd9400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES + +:ldapur aa_Xt, addr_SIMM9 +is b_3031=0b11 & b_2129=0b011001010 & b_1011=0b00 & addr_SIMM9 & aa_Xt +{ + aa_Xt = *addr_SIMM9; +} + +# C6.2.105 LDAPURB page C6-949 line 52752 MATCH x19400000/mask=xffe00c00 +# CONSTRUCT x19400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# x19400000/mask=xffe00c00 NOT MATCHED BY ANY CONSTRUCTOR + +:ldapurb aa_Wt, addr_SIMM9 +is b_3031=0b00 & b_2129=0b011001010 & b_1011=0b00 & addr_SIMM9 & aa_Wt & aa_Xt +{ + aa_Xt = zext(*:1 addr_SIMM9); +} + +# C6.2.106 LDAPURH page C6-951 line 52846 MATCH x59400000/mask=xffe00c00 +# CONSTRUCT x59400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# x59400000/mask=xffe00c00 NOT MATCHED BY ANY CONSTRUCTOR + +:ldapurh aa_Wt, addr_SIMM9 +is b_3031=0b01 & b_2129=0b011001010 & b_1011=0b00 & addr_SIMM9 & aa_Wt & aa_Xt +{ + aa_Xt = zext(*:2 addr_SIMM9); +} + + +# C6.2.107 LDAPURSB page C6-953 line 52940 MATCH x19800000/mask=xffa00c00 +# CONSTRUCT x19c00000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# x19800000/mask=xffa00c00 NOT MATCHED BY ANY CONSTRUCTOR + +:ldapursb aa_Wt, addr_SIMM9 +is b_3031=0b00 & b_2329=0b0110011 & b_22=1 & b_2121=0b0 & b_1011=0b00 & addr_SIMM9 & aa_Wt & aa_Xt +{ + aa_Xt = 0; + aa_Wt = sext(*:1 addr_SIMM9); +} + +# C6.2.107 LDAPURSB page C6-953 line 52940 MATCH x19800000/mask=xffa00c00 +# CONSTRUCT x19800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES + +:ldapursb aa_Xt, addr_SIMM9 +is b_3031=0b00 & b_2329=0b0110011 & b_22=0 & b_2121=0b0 & b_1011=0b00 & addr_SIMM9 & aa_Xt +{ + aa_Xt = sext(*:1 addr_SIMM9); +} + +# C6.2.108 LDAPURSH page C6-955 line 53070 MATCH x59800000/mask=xffa00c00 +# CONSTRUCT x59c00000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# x59800000/mask=xffa00c00 NOT MATCHED BY ANY CONSTRUCTOR + +:ldapursh aa_Wt, addr_SIMM9 +is b_3031=0b01 & b_2329=0b0110011 & b_22=1 & b_2121=0b0 & b_1011=0b00 & addr_SIMM9 & aa_Wt & aa_Xt +{ + aa_Xt = 0; + aa_Wt = sext(*:2 addr_SIMM9); +} + +# C6.2.108 LDAPURSH page C6-955 line 53070 MATCH x59800000/mask=xffa00c00 +# CONSTRUCT x59800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES + +:ldapursh aa_Xt, addr_SIMM9 +is b_3031=0b01 & b_2329=0b0110011 & b_22=0 & b_2121=0b0 & b_1011=0b00 & addr_SIMM9 & aa_Xt +{ + aa_Xt = sext(*:2 addr_SIMM9); +} + +# C6.2.109 LDAPURSW page C6-957 line 53200 MATCH x99800000/mask=xffe00c00 +# CONSTRUCT x99800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# x99800000/mask=xffe00c00 NOT MATCHED BY ANY CONSTRUCTOR + +:ldapursw aa_Xt, addr_SIMM9 +is b_3031=0b10 & b_2129=0b011001100 & b_1011=0b00 & addr_SIMM9 & aa_Xt +{ + aa_Xt = sext(*:4 addr_SIMM9); +} + +# C6.2.110 LDAR page C6-959 line 53294 MATCH x88c08000/mask=xbfe08000 +# CONSTRUCT xc8c08000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8c08000/mask=xffe08000 --status nomem +# The manual states that Rs and Rt2 should be all ones, which is +# optionally enforced. +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:ldar Rt_GPR64, addrReg +is size.ldstr=3 & b_2429=0x8 & b_23=1 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR64 +{ + Rt_GPR64 = *addrReg; +} + +# C6.2.110 LDAR page C6-959 line 53294 MATCH x88c08000/mask=xbfe08000 +# CONSTRUCT x88dffc00/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88dffc00/mask=xfffffc00 --status nomem +# Enforce SHOULD BE ONE fields b_1620 & b_1014 + +:ldar Rt_GPR32, addrReg +is size.ldstr=2 & b_2429=0x8 & b_23=1 & L=1 & b_21=0 & b_1620=0b11111 & b_15=1 & b_1014=0b11111 & addrReg & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = *addrReg; +} + +# C6.2.111 LDARB page C6-961 line 53384 MATCH x08c08000/mask=xffe08000 +# CONSTRUCT x08c08000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x08c08000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:ldarb Rt_GPR32, addrReg +is size.ldstr=0 & b_2429=0x8 & b_23=1 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:1 addrReg); +} + +# C6.2.112 LDARH page C6-962 line 53450 MATCH x48c08000/mask=xffe08000 +# CONSTRUCT x48dffc00/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x48dffc00/mask=xfffffc00 --status nomem +# Enforce SHOULD BE ONE fields b_1620 & b_1014 + +:ldarh Rt_GPR32, addrReg +is size.ldstr=1 & b_2429=0x8 & b_23=1 & L=1 & b_21=0 & b_1620=0b11111 & b_15=1 & b_1014=0b11111 & addrReg & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:2 addrReg); +} + +# C6.2.113 LDAXP page C6-963 line 53516 MATCH x88608000/mask=xbfe08000 +# CONSTRUCT xc8608000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8608000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 + +:ldaxp Rt_GPR64, Rt2_GPR64, addrReg +is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=1 & b_21=1 & b_15=1 & Rt2_GPR64 & addrReg & Rt_GPR64 +{ + Rt_GPR64 = *(addrReg); + Rt2_GPR64 = *(addrReg+8); +} + +# C6.2.113 LDAXP page C6-963 line 53516 MATCH x88608000/mask=xbfe08000 +# CONSTRUCT x88608000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88608000/mask=xffe08000 --status nomem + +:ldaxp Rt_GPR32, Rt2_GPR32, addrReg +is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=1 & b_21=1 & b_1620 & b_15=1 & Rt2_GPR32 & addrReg & Rt_GPR32 & Rt_GPR64 & Rt2_GPR64 +{ + Rt_GPR64 = zext(*:4(addrReg)); + Rt2_GPR64 = zext(*:4(addrReg+4)); +} + +# C6.2.114 LDAXR page C6-965 line 53649 MATCH x88408000/mask=xbfe08000 +# CONSTRUCT xc8408000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8408000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:ldaxr Rt_GPR64, addrReg +is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR64 +{ + Rt_GPR64 = *addrReg; +} + +# C6.2.114 LDAXR page C6-965 line 53649 MATCH x88408000/mask=xbfe08000 +# CONSTRUCT x88408000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88408000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:ldaxr Rt_GPR32, addrReg +is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 & Rt_GPR64 +{ + tmp:4 = *addrReg; + Rt_GPR64 = zext(tmp); +} + +# C6.2.115 LDAXRB page C6-967 line 53742 MATCH x08408000/mask=xffe08000 +# CONSTRUCT x08408000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x08408000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:ldaxrb Rt_GPR32, addrReg +is size.ldstr=0 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 & Rt_GPR64 +{ + tmp:1 = *addrReg; + Rt_GPR64 = zext(tmp); +} + +# C6.2.116 LDAXRH page C6-968 line 53813 MATCH x48408000/mask=xffe08000 +# CONSTRUCT x48408000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x48408000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:ldaxrh Rt_GPR32, addrReg +is size.ldstr=1 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 & Rt_GPR64 +{ + tmp:2 = *addrReg; + Rt_GPR64 = zext(tmp); +} + +# C6.2.125 LDLARB page C6-985 line 54865 MATCH x08c00000/mask=xffe08000 +# CONSTRUCT x08c00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x08c00000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 +# size=0b00 (3031) + +:ldlarb aa_Wt, [Rn_GPR64xsp] +is b_3031=0b00 & b_2329=0b0010001 & b_22=1 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp +{ LOAcquire(); aa_Wt = zext(*:1 Rn_GPR64xsp); } + +# C6.2.126 LDLARH page C6-986 line 54932 MATCH x48c00000/mask=xffe08000 +# CONSTRUCT x48c00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x48c00000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 +# size=0b01 (3031) + +:ldlarh aa_Wt, [Rn_GPR64xsp] +is b_3031=0b01 & b_2329=0b0010001 & b_22=1 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp +{ LOAcquire(); aa_Wt = zext(*:2 Rn_GPR64xsp); } + +# C6.2.127 LDLAR page C6-987 line 54999 MATCH x88c00000/mask=xbfe08000 +# CONSTRUCT x88c00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88c00000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 +# size=0b10 (3031) + +:ldlar aa_Wt, [Rn_GPR64xsp] +is b_3031=0b10 & b_2329=0b0010001 & b_22=1 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp +{ LOAcquire(); aa_Wt = *:4 Rn_GPR64xsp; } + +# C6.2.127 LDLAR page C6-987 line 54999 MATCH x88c00000/mask=xbfe08000 +# CONSTRUCT xc8c00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8c00000/mask=xffe08000 --status nomem +# size=0b11 (3031) + +:ldlar aa_Xt, [Rn_GPR64xsp] +is b_3031=0b11 & b_2329=0b0010001 & b_22=1 & b_21=0 & b_15=0 & aa_Xt & Rn_GPR64xsp +{ LOAcquire(); aa_Xt = *:8 Rn_GPR64xsp; } + +# C6.2.128 LDNP page C6-989 line 55089 MATCH x28400000/mask=x7fc00000 +# CONSTRUCT x28400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x28400000/mask=xffc00000 --status nomem + +:ldnp Rt_GPR32, Rt2_GPR32, addrPairIndexed +is b_3031=0b00 & b_2229=0b10100001 & Rt2_GPR32 & addrPairIndexed & Rt_GPR32 & Rt_GPR64 & Rt2_GPR64 +{ + Rt_GPR64 = zext(*:4 addrPairIndexed); + Rt2_GPR64 = zext(*:4 (addrPairIndexed + 4)); +} + +# C6.2.128 LDNP page C6-989 line 55089 MATCH x28400000/mask=x7fc00000 +# CONSTRUCT xa8400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xa8400000/mask=xffc00000 --status nomem + +:ldnp Rt_GPR64, Rt2_GPR64, addrPairIndexed +is b_3031=0b10 & b_2229=0b10100001 & Rt2_GPR64 & addrPairIndexed & Rt_GPR64 +{ + Rt_GPR64 = *addrPairIndexed; + Rt2_GPR64 = *(addrPairIndexed + 8); +} + +# C6.2.129 LDP page C6-991 line 55214 MATCH x28c00000/mask=x7fc00000 +# C6.2.129 LDP page C6-991 line 55214 MATCH x29c00000/mask=x7fc00000 +# C6.2.129 LDP page C6-991 line 55214 MATCH x29400000/mask=x7fc00000 +# C6.2.128 LDNP page C6-989 line 55089 MATCH x28400000/mask=x7fc00000 +# CONSTRUCT x28400000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x28400000/mask=xfe400000 --status nomem +# opc == 00 post-index, pre-index, and signed 32-bit variant + +:ldp Rt_GPR32, Rt2_GPR32, addrPairIndexed +is b_3031=0b00 & b_2529=0b10100 & (b_24=1 | b_23=1) & b_22=1 & Rt2_GPR32 & addrPairIndexed & Rt_GPR32 & Rt_GPR64 & Rt2_GPR64 +{ + Rt_GPR64 = zext(*:4 addrPairIndexed); + Rt2_GPR64 = zext(*:4 (addrPairIndexed + 4)); +} + +# C6.2.129 LDP page C6-991 line 55214 MATCH x28c00000/mask=x7fc00000 +# C6.2.129 LDP page C6-991 line 55214 MATCH x29c00000/mask=x7fc00000 +# C6.2.129 LDP page C6-991 line 55214 MATCH x29400000/mask=x7fc00000 +# C6.2.128 LDNP page C6-989 line 55089 MATCH x28400000/mask=x7fc00000 +# CONSTRUCT xa8400000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xa8400000/mask=xfe400000 --status nomem +# opc == 10 post-index, pre-index, and signed 64-bit variant + +:ldp Rt_GPR64, Rt2_GPR64, addrPairIndexed +is b_3031=0b10 & b_2529=0b10100 & (b_24=1 | b_23=1) & b_22=1 & Rt2_GPR64 & addrPairIndexed & Rt_GPR64 +{ + Rt_GPR64 = *addrPairIndexed; + Rt2_GPR64 = *(addrPairIndexed + 8); +} + +# C6.2.130 LDPSW page C6-994 line 55428 MATCH x68c00000/mask=xffc00000 +# C6.2.130 LDPSW page C6-994 line 55428 MATCH x69c00000/mask=xffc00000 +# C6.2.130 LDPSW page C6-994 line 55428 MATCH x69400000/mask=xffc00000 +# CONSTRUCT x68400000/mask=xfe400000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x68400000/mask=xfe400000 --status nomem + +:ldpsw Rt_GPR64, Rt2_GPR64, addrPairIndexed +is b_2531=0b0110100 & (b_24=1 | b_23=1) & b_22=1 & Rt2_GPR64 & addrPairIndexed & Rt_GPR64 +{ + Rt_GPR64 = *(addrPairIndexed); + Rt2_GPR64 = *(addrPairIndexed+8); +} + +# C6.2.131 LDR (immediate) page C6-997 line 55599 MATCH xb9400000/mask=xbfc00000 +# CONSTRUCT xb9400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xb9400000/mask=xffc00000 --status nomem + +:ldr Rt_GPR32, addrUIMM +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=1 & addrUIMM & Rn_GPR64xsp & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:4 addrUIMM); +} + +# C6.2.131 LDR (immediate) page C6-997 line 55599 MATCH xb8400400/mask=xbfe00c00 +# C6.2.131 LDR (immediate) page C6-997 line 55599 MATCH xb8400c00/mask=xbfe00c00 +# C6.2.155 LDTR page C6-1053 line 58939 MATCH xb8400800/mask=xbfe00c00 +# C6.2.167 LDUR page C6-1079 line 60461 MATCH xb8400000/mask=xbfe00c00 +# CONSTRUCT xb8400000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xb8400000/mask=xffe00000 --status nomem + +:ld^UnscPriv^"r" Rt_GPR32, addrIndexed +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:4 addrIndexed); +} + +# C6.2.131 LDR (immediate) page C6-997 line 55599 MATCH xb8400400/mask=xbfe00c00 +# C6.2.131 LDR (immediate) page C6-997 line 55599 MATCH xb8400c00/mask=xbfe00c00 +# CONSTRUCT xb8400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb8400400/mask=xffe00400 --status nomem + +:ldr Rt_GPR32, addrIndexed +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:4 addrIndexed); +} + +# C6.2.131 LDR (immediate) page C6-997 line 55599 MATCH xb9400000/mask=xbfc00000 +# CONSTRUCT xf9400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf9400000/mask=xffc00000 --status nomem + +:ldr Rt_GPR64, addrUIMM +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=1 & addrUIMM & Rn_GPR64xsp & Rt_GPR64 +{ + Rt_GPR64 = *addrUIMM; +} + +# C6.2.131 LDR (immediate) page C6-997 line 55599 MATCH xb8400400/mask=xbfe00c00 +# C6.2.131 LDR (immediate) page C6-997 line 55599 MATCH xb8400c00/mask=xbfe00c00 +# CONSTRUCT xf8400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xf8400400/mask=xffe00400 --status nomem + +:ldr Rt_GPR64, addrIndexed +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = *addrIndexed; +} + +# C6.2.132 LDR (literal) page C6-1000 line 55789 MATCH x18000000/mask=xbf000000 +# CONSTRUCT x18000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x18000000/mask=xff000000 --status nomem + +:ldr Rt_GPR32, AddrLoc19 +is size.ldstr=0 & b_2729=3 & v=0 & b_2425=0 & AddrLoc19 & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:4 AddrLoc19); +} + +# C6.2.132 LDR (literal) page C6-1000 line 55789 MATCH x18000000/mask=xbf000000 +# CONSTRUCT x58000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x58000000/mask=xff000000 --status nomem + +:ldr Rt_GPR64, AddrLoc19 +is size.ldstr=1 & b_2729=3 & v=0 & b_2425=0 & AddrLoc19 & Rt_GPR64 +{ + Rt_GPR64 = *:4 AddrLoc19; +} + +# C6.2.133 LDR (register) page C6-1002 line 55887 MATCH xb8600800/mask=xbfe00c00 +# CONSTRUCT xb8600800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xb8600800/mask=xffe00c00 --status nomem + +:ldr Rt_GPR32, addrIndexed +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:4 addrIndexed); +} + +# C6.2.133 LDR (register) page C6-1002 line 55887 MATCH xb8600800/mask=xbfe00c00 +# CONSTRUCT xf8600800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8600800/mask=xffe00c00 --status nomem + +:ldr Rt_GPR64, addrIndexed +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = *addrIndexed; +} + + +# C6.2.134 LDRAA, LDRAB page C6-1004 line 56006 MATCH xf8200400/mask=xff200400 +# CONSTRUCT xf8200400/mask=xffa00400 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8200400/mask=xffa00400 --status nomem +# M == 0 && W == 0 key A, offset variant +# M == 0 && W == 1 key A, offset variant + +:ldraa Rt_GPR64, addrIndexed +is ldraa__PACpart & b_2431=0b11111000 & b_23=0 & b_21=1 & b_10=1 & addrIndexed & Rn_GPR64xsp & Rt_GPR64 +{ + build ldraa__PACpart; + build addrIndexed; + # Note: if writeback is used, the writeback'd value doesn't have a PAC code! It's the output of AuthDA. + Rt_GPR64 = *:8 addrIndexed; +} + +# C6.2.134 LDRAA, LDRAB page C6-1004 line 56006 MATCH xf8200400/mask=xff200400 +# CONSTRUCT xf8a00400/mask=xffa00400 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8a00400/mask=xffa00400 --status nomem +# M == 1 && W == 0 key B, offset variant +# M == 1 && W == 1 key B, offset variant + +:ldrab Rt_GPR64, addrIndexed +is ldrab__PACpart & b_2431=0b11111000 & b_23=1 & b_21=1 & b_10=1 & addrIndexed & Rn_GPR64xsp & Rt_GPR64 +{ + build ldrab__PACpart; + build addrIndexed; + # Note: if writeback is used, the writeback'd value doesn't have a PAC code! It's the output of AuthDB. + Rt_GPR64 = *:8 addrIndexed; +} + +# C6.2.135 LDRB (immediate) page C6-1006 line 56141 MATCH x38400400/mask=xffe00c00 +# C6.2.135 LDRB (immediate) page C6-1006 line 56141 MATCH x38400c00/mask=xffe00c00 +# CONSTRUCT x38400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x38400400/mask=xffe00400 --status nomem +# post-index and pre-index variants + +:ldrb Rt_GPR32, addrIndexed +is b_2131=0b00111000010 & b_10=1 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:1 addrIndexed); +} + +# C6.2.135 LDRB (immediate) page C6-1006 line 56141 MATCH x39400000/mask=xffc00000 +# CONSTRUCT x39400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x39400000/mask=xffc00000 --status nomem +# unsigned offset variant + +:ldrb Rt_GPR32, addrIndexed +is b_2231=0b0011100101 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:1 addrIndexed); +} + +# C6.2.136 LDRB (register) page C6-1009 line 56296 MATCH x38600800/mask=xffe00c00 +# CONSTRUCT x38600800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x38600800/mask=xffe00c00 --status nomem +# extended register and shifted register variant +# determined in addrIndexed subtable + +:ldrb Rt_GPR32, addrIndexed +is b_2131=0b00111000011 & b_1011=2 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:1 addrIndexed); +} + +# C6.2.137 LDRH (immediate) page C6-1011 line 56395 MATCH x79400000/mask=xffc00000 +# CONSTRUCT x79400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x79400000/mask=xffc00000 --status nomem + +:ldrh Rt_GPR32, addrUIMM +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=1 & addrUIMM & Rn_GPR64xsp & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:2 addrUIMM); +} + +# C6.2.137 LDRH (immediate) page C6-1011 line 56395 MATCH x78400400/mask=xffe00c00 +# C6.2.137 LDRH (immediate) page C6-1011 line 56395 MATCH x78400c00/mask=xffe00c00 +# CONSTRUCT x78400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x78400400/mask=xffe00400 --status nomem + +:ldrh Rt_GPR32, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:2 addrIndexed); +} + +# C6.2.138 LDRH (register) page C6-1014 line 56550 MATCH x78600800/mask=xffe00c00 +# CONSTRUCT x78600800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x78600800/mask=xffe00c00 --status nomem + +:ldrh Rt_GPR32, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:2 addrIndexed); +} + +# C6.2.139 LDRSB (immediate) page C6-1016 line 56651 MATCH x39800000/mask=xff800000 +# CONSTRUCT x39c00000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x39c00000/mask=xffc00000 --status nomem + +:ldrsb Rt_GPR32, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=1 & b_2223=3 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = sext(*:1 addrIndexed); +} + +# C6.2.139 LDRSB (immediate) page C6-1016 line 56651 MATCH x38800400/mask=xffa00c00 +# C6.2.139 LDRSB (immediate) page C6-1016 line 56651 MATCH x38800c00/mask=xffa00c00 +# C6.2.158 LDTRSB page C6-1059 line 59248 MATCH x38800800/mask=xffa00c00 +# C6.2.170 LDURSB page C6-1083 line 60695 MATCH x38800000/mask=xffa00c00 +# CONSTRUCT x38c00000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x38c00000/mask=xffe00000 --status nomem + +:ld^UnscPriv^"rsb" Rt_GPR32, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:1 addrIndexed); +} + +# C6.2.139 LDRSB (immediate) page C6-1016 line 56651 MATCH x38800400/mask=xffa00c00 +# C6.2.139 LDRSB (immediate) page C6-1016 line 56651 MATCH x38800c00/mask=xffa00c00 +# CONSTRUCT x38c00400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x38c00400/mask=xffe00400 --status nomem + +:ldrsb Rt_GPR32, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = sext(*:1 addrIndexed); +} + +# C6.2.139 LDRSB (immediate) page C6-1016 line 56651 MATCH x39800000/mask=xff800000 +# CONSTRUCT x39800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x39800000/mask=xffc00000 --status nomem + +:ldrsb Rt_GPR64, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=1 & b_2223=2 & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:1 addrIndexed); +} + +# C6.2.139 LDRSB (immediate) page C6-1016 line 56651 MATCH x38800400/mask=xffa00c00 +# C6.2.139 LDRSB (immediate) page C6-1016 line 56651 MATCH x38800c00/mask=xffa00c00 +# C6.2.158 LDTRSB page C6-1059 line 59248 MATCH x38800800/mask=xffa00c00 +# C6.2.170 LDURSB page C6-1083 line 60695 MATCH x38800000/mask=xffa00c00 +# CONSTRUCT x38800000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x38800000/mask=xffe00000 --status nomem + +:ld^UnscPriv^"rsb" Rt_GPR64, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:1 addrIndexed); +} + +# C6.2.139 LDRSB (immediate) page C6-1016 line 56651 MATCH x38800400/mask=xffa00c00 +# C6.2.139 LDRSB (immediate) page C6-1016 line 56651 MATCH x38800c00/mask=xffa00c00 +# CONSTRUCT x38800400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x38800400/mask=xffe00400 --status nomem + +:ldrsb Rt_GPR64, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:1 addrIndexed); +} + +# C6.2.140 LDRSB (register) page C6-1019 line 56871 MATCH x38a00800/mask=xffa00c00 +# CONSTRUCT x38e00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x38e00800/mask=xffe00c00 --status nomem + +:ldrsb Rt_GPR32, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = sext(*:1 addrIndexed); +} + +# C6.2.140 LDRSB (register) page C6-1019 line 56871 MATCH x38a00800/mask=xffa00c00 +# CONSTRUCT x38a00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x38a00800/mask=xffe00c00 --status nomem + +:ldrsb Rt_GPR64, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:1 addrIndexed); +} + +# C6.2.141 LDRSH (immediate) page C6-1021 line 57010 MATCH x79800000/mask=xff800000 +# CONSTRUCT x79c00000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x79c00000/mask=xffc00000 --status nomem + +:ldrsh Rt_GPR32, addrUIMM +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=1 & b_2223=3 & addrUIMM & Rn_GPR64xsp & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = sext(*:2 addrUIMM); +} + +# C6.2.141 LDRSH (immediate) page C6-1021 line 57010 MATCH x78800400/mask=xffa00c00 +# C6.2.141 LDRSH (immediate) page C6-1021 line 57010 MATCH x78800c00/mask=xffa00c00 +# C6.2.159 LDTRSH page C6-1061 line 59383 MATCH x78800800/mask=xffa00c00 +# C6.2.171 LDURSH page C6-1085 line 60810 MATCH x78800000/mask=xffa00c00 +# CONSTRUCT x78c00000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x78c00000/mask=xffe00000 --status nomem + +:ld^UnscPriv^"rsh" Rt_GPR32, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = sext(*:2 addrIndexed); +} + +# C6.2.141 LDRSH (immediate) page C6-1021 line 57010 MATCH x78800400/mask=xffa00c00 +# C6.2.141 LDRSH (immediate) page C6-1021 line 57010 MATCH x78800c00/mask=xffa00c00 +# CONSTRUCT x78c00400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x78c00400/mask=xffe00400 --status nomem + +:ldrsh Rt_GPR32, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = sext(*:2 addrIndexed); +} + +# C6.2.141 LDRSH (immediate) page C6-1021 line 57010 MATCH x79800000/mask=xff800000 +# CONSTRUCT x79800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x79800000/mask=xffc00000 --status nomem + +:ldrsh Rt_GPR64, addrUIMM +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=1 & b_2223=2 & addrUIMM & Rn_GPR64xsp & Rt_GPR64 +{ + Rt_GPR64 = sext(*:2 addrUIMM); +} + +# C6.2.141 LDRSH (immediate) page C6-1021 line 57010 MATCH x78800400/mask=xffa00c00 +# C6.2.141 LDRSH (immediate) page C6-1021 line 57010 MATCH x78800c00/mask=xffa00c00 +# C6.2.159 LDTRSH page C6-1061 line 59383 MATCH x78800800/mask=xffa00c00 +# C6.2.171 LDURSH page C6-1085 line 60810 MATCH x78800000/mask=xffa00c00 +# CONSTRUCT x78800000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x78800000/mask=xffe00000 --status nomem + +:ld^UnscPriv^"rsh" Rt_GPR64, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:2 addrIndexed); +} + +# C6.2.141 LDRSH (immediate) page C6-1021 line 57010 MATCH x78800400/mask=xffa00c00 +# C6.2.141 LDRSH (immediate) page C6-1021 line 57010 MATCH x78800c00/mask=xffa00c00 +# CONSTRUCT x78800400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x78800400/mask=xffe00400 --status nomem + +:ldrsh Rt_GPR64, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:2 addrIndexed); +} + +# C6.2.142 LDRSH (register) page C6-1024 line 57230 MATCH x78a00800/mask=xffa00c00 +# CONSTRUCT x78e00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x78e00800/mask=xffe00c00 --status nomem + +:ldrsh Rt_GPR32, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = sext(*:2 addrIndexed); +} + +# C6.2.142 LDRSH (register) page C6-1024 line 57230 MATCH x78a00800/mask=xffa00c00 +# CONSTRUCT x78a00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x78a00800/mask=xffe00c00 --status nomem + +:ldrsh Rt_GPR64, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:2 addrIndexed); +} + +# C6.2.143 LDRSW (immediate) page C6-1026 line 57364 MATCH xb8800400/mask=xffe00c00 +# C6.2.143 LDRSW (immediate) page C6-1026 line 57364 MATCH xb8800c00/mask=xffe00c00 +# CONSTRUCT xb8800400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb8800400/mask=xffe00400 --status nomem + +:ldrsw Rt_GPR64, addrIndexed +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:4 addrIndexed); +} + +# C6.2.143 LDRSW (immediate) page C6-1026 line 57364 MATCH xb9800000/mask=xffc00000 +# CONSTRUCT xb9800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xb9800000/mask=xffc00000 --status nomem + +:ldrsw Rt_GPR64, addrIndexed +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=1 & b_2223=2 & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:4 addrIndexed); +} + +# C6.2.144 LDRSW (literal) page C6-1029 line 57519 MATCH x98000000/mask=xff000000 +# CONSTRUCT x98000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x98000000/mask=xff000000 --status nomem + +:ldrsw Rt_GPR64, AddrLoc19 +is b_2431=0b10011000 & AddrLoc19 & Rt_GPR64 +{ + Rt_GPR64 = sext(*:4 AddrLoc19); +} + +# C6.2.145 LDRSW (register) page C6-1030 line 57575 MATCH xb8a00800/mask=xffe00c00 +# CONSTRUCT xb8a00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xb8a00800/mask=xffe00c00 --status nomem + +:ldrsw Rt_GPR64, addrIndexed +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:4 addrIndexed); +} + +# C6.2.155 LDTR page C6-1053 line 58939 MATCH xb8400800/mask=xbfe00c00 +# CONSTRUCT xf8400800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8400800/mask=xffe00c00 --status nomem + +:ld^UnscPriv^"r" Rt_GPR64, addrIndexed +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_2223=1 & b_2121=0 & b_1011=2 & UnscPriv & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = *addrIndexed; +} + +# C6.2.156 LDTRB page C6-1055 line 59052 MATCH x38400800/mask=xffe00c00 +# C6.2.168 LDURB page C6-1081 line 60555 MATCH x38400000/mask=xffe00c00 +# CONSTRUCT x38400000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x38400000/mask=xffe00000 --status nomem + +:ld^UnscPriv^"rb" Rt_GPR32, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:1 addrIndexed); +} + +# C6.2.157 LDTRH page C6-1057 line 59150 MATCH x78400800/mask=xffe00c00 +# C6.2.169 LDURH page C6-1082 line 60625 MATCH x78400000/mask=xffe00c00 +# CONSTRUCT x78400000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x78400000/mask=xffe00000 --status nomem + +:ld^UnscPriv^"rh" Rt_GPR32, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:2 addrIndexed); +} + +# C6.2.160 LDTRSW page C6-1063 line 59519 MATCH xb8800800/mask=xffe00c00 +# C6.2.172 LDURSW page C6-1087 line 60925 MATCH xb8800000/mask=xffe00c00 +# CONSTRUCT xb8800000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb8800000/mask=xffe00000 --status nomem + +:ld^UnscPriv^"rsw" Rt_GPR64, addrIndexed +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = sext(*:4 addrIndexed); +} + +# C6.2.167 LDUR page C6-1079 line 60461 MATCH xb8400000/mask=xbfe00c00 +# CONSTRUCT xf8400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8400000/mask=xffe00c00 --status nomem + +:ld^UnscPriv^"r" Rt_GPR64, addrIndexed +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2122=2 & b_1011=0 & UnscPriv & addrIndexed & Rt_GPR64 +{ + Rt_GPR64 = *addrIndexed; +} + +# C6.2.173 LDXP page C6-1088 line 60995 MATCH x88600000/mask=xbfe08000 +# CONSTRUCT xc8600000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8600000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 + +:ldxp Rt_GPR64, Rt2_GPR64, addrReg +is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=1 & b_21=1 & b_15=0 & Rt2_GPR64 & addrReg & Rt_GPR64 +{ + Rt_GPR64 = *addrReg; + Rt2_GPR64 = *(addrReg + 8); +} + +# C6.2.173 LDXP page C6-1088 line 60995 MATCH x88600000/mask=xbfe08000 +# CONSTRUCT x88600000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88600000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 + +:ldxp Rt_GPR32, Rt2_GPR32, addrReg +is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=1 & b_21=1 & b_15=0 & Rt2_GPR32 & addrReg & Rt_GPR32 & Rt_GPR64 & Rt2_GPR64 +{ + Rt_GPR64 = zext(*:4 addrReg); + Rt2_GPR64 = zext(*:4 (addrReg + 4)); +} + +# C6.2.174 LDXR page C6-1090 line 61127 MATCH x88400000/mask=xbfe08000 +# CONSTRUCT xc8400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8400000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:ldxr Rt_GPR64, addrReg +is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=0 & addrReg & Rt_GPR64 +{ + Rt_GPR64 = *addrReg; +} + +# C6.2.174 LDXR page C6-1090 line 61127 MATCH x88400000/mask=xbfe08000 +# CONSTRUCT x88400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88400000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:ldxr Rt_GPR32, addrReg +is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=0 & addrReg & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:4 addrReg); +} + +# C6.2.175 LDXRB page C6-1092 line 61219 MATCH x08400000/mask=xffe08000 +# CONSTRUCT x08400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x08400000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:ldxrb Rt_GPR32, addrReg +is size.ldstr=0 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=0 & addrReg & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:1 addrReg); +} + +# C6.2.176 LDXRH page C6-1093 line 61289 MATCH x48400000/mask=xffe08000 +# CONSTRUCT x48400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x48400000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:ldxrh Rt_GPR32, addrReg +is size.ldstr=1 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=0 & addrReg & Rt_GPR32 & Rt_GPR64 +{ + Rt_GPR64 = zext(*:2 addrReg); +} + +# C6.2.177 LSL (register) page C6-1094 line 61359 MATCH x1ac02000/mask=x7fe0fc00 +# C6.2.179 LSLV page C6-1098 line 61543 MATCH x1ac02000/mask=x7fe0fc00 +# CONSTRUCT x1ac02000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1ac02000/mask=xffe0fc00 --status pass + +:lsl Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0x8 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + shiftval:8 = zext(Rm_GPR32 & 0x1f); + tmp_1:4 = Rn_GPR32 << shiftval; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.177 LSL (register) page C6-1094 line 61359 MATCH x1ac02000/mask=x7fe0fc00 +# C6.2.179 LSLV page C6-1098 line 61543 MATCH x1ac02000/mask=x7fe0fc00 +# CONSTRUCT x9ac02000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9ac02000/mask=xffe0fc00 --status pass + +:lsl Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0x8 & Rn_GPR64 & Rd_GPR64 +{ + shiftval:8 = (Rm_GPR64 & 0x3f); + tmp_1:8 = Rn_GPR64 << shiftval; + Rd_GPR64 = tmp_1; +} + +# C6.2.158 LSL (immediate) page C6-784 line 45779 KEEPWITH + +ubfiz_lsb: "#"^imm is ImmR [ imm = 32 - ImmR; ] { export *[const]:4 imm; } +ubfiz_width: "#"^imm is ImmS [ imm = ImmS + 1; ] { export *[const]:4 imm; } +ubfiz_lsb64: "#"^imm is ImmR [ imm = 64 - ImmR; ] { export *[const]:4 imm; } +ubfx_width: "#"^imm is ImmR & ImmS [ imm = ImmS - ImmR + 1; ] { export *[const]:4 imm; } + +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.181 LSR (immediate) page C6-1102 line 61727 MATCH x53007c00/mask=x7f807c00 +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# C6.2.342 UXTB page C6-1386 line 76865 MATCH x53001c00/mask=xfffffc00 +# C6.2.343 UXTH page C6-1387 line 76925 MATCH x53003c00/mask=xfffffc00 +# CONSTRUCT x53000012/mask=xffe0801e MATCHED 7 DOCUMENTED OPCODES +# AUNIT --inst x53000012/mask=xffe0801e --status pass +# Alias for ubfm where imms+1=immr and imms != '011111' +# if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); + +:lsl Rd_GPR32, Rn_GPR32, LSB_bitfield32_imm_shift +is ImmR=ImmS+1 & ImmS_ne_1f=1 & ImmS_LT_ImmR_minus_1=0 & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=1 & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & LSB_bitfield32_imm_shift & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp:4 = Rn_GPR32 << LSB_bitfield32_imm_shift; + Rd_GPR64 = zext(tmp); +} + +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.181 LSR (immediate) page C6-1102 line 61727 MATCH x53007c00/mask=x7f807c00 +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# CONSTRUCT xd3400022/mask=xffc0002e MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst xd3400022/mask=xffc0002e --status pass +# Alias for ubfm where imms+1=immr and imms != '111111' + +:lsl Rd_GPR64, Rn_GPR64, LSB_bitfield64_imm_shift +is ImmR=ImmS+1 & ImmS_ne_3f=1 & ImmS_LT_ImmR_minus_1=0 & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=1 & sf=1 & opc=2 & b_2428=0x13 & b_2323=0 & n=1 & LSB_bitfield64_imm_shift & Rn_GPR64 & Rd_GPR64 +{ + Rd_GPR64 = Rn_GPR64 << LSB_bitfield64_imm_shift; +} + +# C6.2.180 LSR (register) page C6-1100 line 61633 MATCH x1ac02400/mask=x7fe0fc00 +# C6.2.182 LSRV page C6-1104 line 61817 MATCH x1ac02400/mask=x7fe0fc00 +# CONSTRUCT x1ac02400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1ac02400/mask=xffe0fc00 --status pass + +:lsr Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0x9 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + shiftval:8 = zext(Rm_GPR32 & 0x1f); + tmp_1:4 = Rn_GPR32 >> shiftval; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.180 LSR (register) page C6-1100 line 61633 MATCH x1ac02400/mask=x7fe0fc00 +# C6.2.182 LSRV page C6-1104 line 61817 MATCH x1ac02400/mask=x7fe0fc00 +# CONSTRUCT x9ac02400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9ac02400/mask=xffe0fc00 --status pass + +:lsr Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0x9 & Rn_GPR64 & Rd_GPR64 +{ + shiftval:8 = Rm_GPR64 & 0x3f; + tmp_1:8 = Rn_GPR64 >> shiftval; + Rd_GPR64 = tmp_1; +} + +# C6.2.181 LSR (immediate) page C6-1102 line 61727 MATCH x53007c00/mask=x7f807c00 +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# CONSTRUCT x53007c00/mask=xffe0fc1a MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst x53007c00/mask=xffe0fc1a --status pass +# Alias for ubfm where imms=='011111' +# imms is MAX_INT5, so it will never be less than immr. Note that immr is limited to [0,31] +# if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); + +:lsr Rd_GPR32, Rn_GPR32, ImmRConst32 +is ImmS=0x1f & ImmS_ne_1f=0 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = ImmRConst32; + tmp_1:4 = Rn_GPR32 >> tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.181 LSR (immediate) page C6-1102 line 61727 MATCH x53007c00/mask=x7f807c00 +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# CONSTRUCT xd340fc00/mask=xffc0fc2a MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst xd340fc00/mask=xffc0fc2a --status pass +# Alias for ubfm where imms=='111111' +# imms is MAX_INT6, so it will never be less than immr. + +:lsr Rd_GPR64, Rn_GPR64, ImmRConst64 +is ImmS=0x3f & ImmS_ne_3f=0 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & sf=1 & opc=2 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = ImmRConst64; + tmp_1:8 = Rn_GPR64 >> tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.183 MADD page C6-1106 line 61907 MATCH x1b000000/mask=x7fe08000 +# C6.2.197 MUL page C6-1132 line 63209 MATCH x1b007c00/mask=x7fe0fc00 +# CONSTRUCT x1b000000/mask=xffe08000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1b000000/mask=xffe08000 --status pass + +:madd Rd_GPR32, Rn_GPR32, Rm_GPR32, Ra_GPR32 +is sf=0 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR32 & op.dp3_o0=0 & Ra_GPR32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = Rn_GPR32 * Rm_GPR32; + addflags(Ra_GPR32, tmp_2); + tmp_1:4 = Ra_GPR32 + tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.183 MADD page C6-1106 line 61907 MATCH x1b000000/mask=x7fe08000 +# C6.2.197 MUL page C6-1132 line 63209 MATCH x1b007c00/mask=x7fe0fc00 +# CONSTRUCT x9b000000/mask=xffe08000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9b000000/mask=xffe08000 --status pass + +:madd Rd_GPR64, Rn_GPR64, Rm_GPR64, Ra_GPR64 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR64 & op.dp3_o0=0 & Ra_GPR64 & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = Rn_GPR64 * Rm_GPR64; + tmp_1:8 = Ra_GPR64 + tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.184 MNEG page C6-1108 line 62017 MATCH x1b00fc00/mask=x7fe0fc00 +# C6.2.196 MSUB page C6-1130 line 63100 MATCH x1b008000/mask=x7fe08000 +# CONSTRUCT x9b00fc00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9b00fc00/mask=xffe0fc00 --status pass + +:mneg Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR64 & op.dp3_o0=1 & Ra=0x1f & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = Rn_GPR64 * Rm_GPR64; + tmp_1:8 = -tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.184 MNEG page C6-1108 line 62017 MATCH x1b00fc00/mask=x7fe0fc00 +# C6.2.196 MSUB page C6-1130 line 63100 MATCH x1b008000/mask=x7fe08000 +# CONSTRUCT x1b00fc00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1b00fc00/mask=xffe0fc00 --status pass + +:mneg Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR32 & op.dp3_o0=1 & Ra=0x1f & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = Rn_GPR32 * Rm_GPR32; + tmp_1:4 = -tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.185 MOV (to/from SP) page C6-1110 line 62111 MATCH x11000000/mask=x7ffffc00 +# C6.2.4 ADD (immediate) page C6-779 line 43893 MATCH x11000000/mask=x7f800000 +# CONSTRUCT x11000000/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x11000000/mask=xfffffc00 --status pass + +:mov Rd_GPR32xsp, Rn_GPR32xsp +is sf=0 & b_30=0 & S=0 & b_2428=0x011 & (aa_Xn=31 | aa_Xd=31) & shift=0 & imm12=0 & Rn_GPR32xsp & Rd_GPR32xsp & Rd_GPR64xsp +{ + Rd_GPR64xsp = zext(Rn_GPR32xsp); +} + +# C6.2.185 MOV (to/from SP) page C6-1110 line 62111 MATCH x11000000/mask=x7ffffc00 +# C6.2.4 ADD (immediate) page C6-779 line 43893 MATCH x11000000/mask=x7f800000 +# CONSTRUCT x91000000/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x91000000/mask=xfffffc00 --status pass + +:mov Rd_GPR64xsp, Rn_GPR64xsp +is sf=1 & b_30=0 & S=0 & b_2428=0x11 & (aa_Xn=31 | aa_Xd=31) & shift=0 & imm12=0 & Rn_GPR64xsp & Rd_GPR64xsp +{ + Rd_GPR64xsp = Rn_GPR64xsp; +} + +# C6.2.166 MOV (inverted wide immediate) page C6-793 line 46366 KEEPWITH + +FullImm_movz32_imm: "#"^val is imm16 & aa_hw=0 [ val = (imm16 << 0) & 0xffffffff; ] { export *[const]:8 val; } +FullImm_movz32_imm: "#"^val is imm16 & aa_hw=1 [ val = (imm16 << 16) & 0xffffffff; ] { export *[const]:8 val; } + +FullImm_movz64_imm: "#"^val is imm16 & aa_hw [ val = imm16 << (aa_hw * 16); ] { export *[const]:8 val; } + +FullImm_movn32_imm: "#"^val is imm16 & aa_hw=0 [ val = ~(imm16 << 0) & 0xffffffff; ] { export *[const]:8 val; } +FullImm_movn32_imm: "#"^val is imm16 & aa_hw=1 [ val = ~(imm16 << 16) & 0xffffffff; ] { export *[const]:8 val; } + +FullImm_movn64_imm: "#"^val is imm16 & aa_hw [ val = ~(imm16 << (aa_hw * 16)); ] { export *[const]:8 val; } + +FullImm_movk32_mask: mask is aa_hw [ mask = (~(0xffff << (aa_hw * 16))) & 0xffffffff; ] { export *[const]:4 mask; } +FullImm_movk32_shift: tmp is imm16 & aa_hw [ tmp = (imm16 << (aa_hw * 16)) & 0xffffffff; ] { export *[const]:4 tmp; } +FullImm_movk32_imm: "#"^imm16 is imm16 & aa_hw=0 { export *[const]:4 imm16; } +FullImm_movk32_imm: "#"^imm16, "LSL #16" is imm16 & aa_hw=1 & FullImm_movk32_shift { export FullImm_movk32_shift; } + +FullImm_movk64_mask: mask is aa_hw [ mask = ~(0xffff << (aa_hw * 16)); ] { export *[const]:8 mask; } +FullImm_movk64_shift: tmp is imm16 & aa_hw [ tmp = (imm16 << (aa_hw * 16)); ] { export *[const]:8 tmp; } +FullImm_movk64_imm: "#"^imm16 is imm16 & aa_hw=0 { export *[const]:8 imm16; } +FullImm_movk64_imm: "#"^imm16, "LSL #16" is imm16 & aa_hw=1 & FullImm_movk64_shift { export FullImm_movk64_shift; } +FullImm_movk64_imm: "#"^imm16, "LSL #32" is imm16 & aa_hw=2 & FullImm_movk64_shift { export FullImm_movk64_shift; } +FullImm_movk64_imm: "#"^imm16, "LSL #48" is imm16 & aa_hw=3 & FullImm_movk64_shift { export FullImm_movk64_shift; } + +# C6.2.186 MOV (inverted wide immediate) page C6-1111 line 62178 MATCH x12800000/mask=x7f800000 +# C6.2.191 MOVN page C6-1121 line 62621 MATCH x12800000/mask=x7f800000 +# CONSTRUCT x12800000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x12800000/mask=xff800000 --status pass + +:mov Rd_GPR32, FullImm_movn32_imm +is sf=0 & opc=0 & b_2428=0x12 & b_2323=1 & FullImm_movn32_imm & Rd_GPR32 & Rd_GPR64 +{ + # Special case MOVN + Rd_GPR64 = FullImm_movn32_imm; +} + +# C6.2.186 MOV (inverted wide immediate) page C6-1111 line 62178 MATCH x12800000/mask=x7f800000 +# C6.2.191 MOVN page C6-1121 line 62621 MATCH x12800000/mask=x7f800000 +# CONSTRUCT x92800000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x92800000/mask=xff800000 --status pass + +:mov Rd_GPR64, FullImm_movn64_imm +is sf=1 & opc=0 & b_2428=0x12 & b_2323=1 & FullImm_movn64_imm & Rd_GPR64 +{ + # Special case MOVN + Rd_GPR64 = FullImm_movn64_imm; +} + +# C6.2.187 MOV (wide immediate) page C6-1113 line 62270 MATCH x52800000/mask=x7f800000 +# C6.2.192 MOVZ page C6-1123 line 62721 MATCH x52800000/mask=x7f800000 +# CONSTRUCT x52800000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x52800000/mask=xff800000 --status pass + +:mov Rd_GPR32, FullImm_movz32_imm +is sf=0 & opc=2 & b_2428=0x12 & b_2323=1 & FullImm_movz32_imm & Rd_GPR32 & Rd_GPR64 +{ + Rd_GPR64 = FullImm_movz32_imm; +} + +# C6.2.187 MOV (wide immediate) page C6-1113 line 62270 MATCH x52800000/mask=x7f800000 +# C6.2.192 MOVZ page C6-1123 line 62721 MATCH x52800000/mask=x7f800000 +# CONSTRUCT xd2800000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd2800000/mask=xff800000 --status pass + +:mov Rd_GPR64, FullImm_movz64_imm +is sf=1 & opc=2 & b_2428=0x12 & b_2323=1 & FullImm_movz64_imm & Rd_GPR64 +{ + Rd_GPR64 = FullImm_movz64_imm; +} + +# C6.2.188 MOV (bitmask immediate) page C6-1115 line 62360 MATCH x320003e0/mask=x7f8003e0 +# C6.2.205 ORR (immediate) page C6-1146 line 63910 MATCH x32000000/mask=x7f800000 +# CONSTRUCT x320003e0/mask=xffe0ffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x320003e0/mask=xffe0ffe0 --status pass + +:mov Rd_GPR32wsp, DecodeWMask32 +is sf=0 & opc=1 & b_2428=0x12 & b_2223=0 & N=0 & imm6=0 & DecodeWMask32 & aa_Xn=31 & Rd_GPR32wsp & Rd_GPR64xsp +{ + # special case ORR + tmp_1:4 = DecodeWMask32; + Rd_GPR64xsp = zext(tmp_1); +} + +# C6.2.188 MOV (bitmask immediate) page C6-1115 line 62360 MATCH x320003e0/mask=x7f8003e0 +# C6.2.205 ORR (immediate) page C6-1146 line 63910 MATCH x32000000/mask=x7f800000 +# CONSTRUCT xb20003e0/mask=xffc0ffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb20003e0/mask=xffc0ffe0 --status pass + +:mov Rd_GPR64xsp, DecodeWMask64 +is sf=1 & opc=1 & b_2428=0x12 & b_2223=0 & imm6=0 & DecodeWMask64 & aa_Xn=31 & Rd_GPR64xsp +{ + # special case of ORR + tmp_1:8 = DecodeWMask64; + Rd_GPR64xsp = tmp_1; +} + +# C6.2.189 MOV (register) page C6-1117 line 62449 MATCH x2a0003e0/mask=x7fe0ffe0 +# C6.2.206 ORR (shifted register) page C6-1148 line 64011 MATCH x2a000000/mask=x7f200000 +# CONSTRUCT x2a0003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x2a0003e0/mask=xff2003e0 --status pass + +:mov Rd_GPR32, RegShift32Log +is b_31=0 & b_2430=0b0101010 & b_21=0 & b_0509=0b11111 & RegShift32Log & Rd_GPR32 & Rd_GPR64 +{ + # special case ORR + tmp_1:4 = RegShift32Log; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.189 MOV (register) page C6-1117 line 62449 MATCH x2a0003e0/mask=x7fe0ffe0 +# C6.2.206 ORR (shifted register) page C6-1148 line 64011 MATCH x2a000000/mask=x7f200000 +# CONSTRUCT xaa0003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xaa0003e0/mask=xff2003e0 --status pass + +:mov Rd_GPR64, RegShift64Log +is b_31=1 & b_2430=0b0101010 & b_21=0 & b_0509=0b11111 & RegShift64Log & Rd_GPR64 +{ + # special case of ORR + tmp_1:8 = RegShift64Log; + Rd_GPR64 = tmp_1; +} + +# C6.2.190 MOVK page C6-1119 line 62536 MATCH x72800000/mask=x7f800000 +# CONSTRUCT x72800000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x72800000/mask=xff800000 --status pass + +:movk Rd_GPR32, FullImm_movk32_imm +is sf=0 & opc=3 & b_2428=0x12 & b_2323=1 & FullImm_movk32_imm & Rd_GPR32 & Rd_GPR64 & FullImm_movk32_mask +{ + local tmp:4 = Rd_GPR32 & FullImm_movk32_mask; + tmp = tmp | FullImm_movk32_imm; + Rd_GPR64 = zext(tmp); +} + +# C6.2.190 MOVK page C6-1119 line 62536 MATCH x72800000/mask=x7f800000 +# CONSTRUCT xf2800000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf2800000/mask=xff800000 --status pass + +:movk Rd_GPR64, FullImm_movk64_imm +is sf=1 & opc=3 & b_2428=0x12 & b_2323=1 & FullImm_movk64_imm & Rd_GPR64 & FullImm_movk64_mask +{ + Rd_GPR64 = Rd_GPR64 & FullImm_movk64_mask; + Rd_GPR64 = Rd_GPR64 | FullImm_movk64_imm; +} + +# C6.2.173 MRS page C6-802 line 46877 MATCH KEEPWITH + +with : (l=0 | l=1) { +CopReg: spsr_el1 is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=0 & Op2_uimm3=0 & spsr_el1 { export spsr_el1; } +CopReg: elr_el1 is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=0 & Op2_uimm3=1 & elr_el1 { export elr_el1; } +CopReg: sp_el0 is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=1 & Op2_uimm3=0 & sp_el0 { export sp_el0; } +CopReg: spsel is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=2 & Op2_uimm3=0 & spsel { export spsel; } +CopReg: daif is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=2 & Op2_uimm3=1 & daif { export daif; } +CopReg: currentel is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=2 & Op2_uimm3=2 & currentel { export currentel; } +CopReg: nzcv is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=2 & Op2_uimm3=0 & nzcv { export nzcv; } +CopReg: fpcr is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=4 & Op2_uimm3=0 & fpcr { export fpcr; } +CopReg: fpsr is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=4 & Op2_uimm3=1 & fpsr { export fpsr; } +CopReg: dspsr_el0 is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=5 & Op2_uimm3=0 & dspsr_el0 { export dspsr_el0; } +CopReg: dlr_el0 is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=5 & Op2_uimm3=1 & dlr_el0 { export dlr_el0; } +CopReg: spsr_el2 is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=0 & Op2_uimm3=0 & spsr_el2 { export spsr_el2; } +CopReg: elr_el2 is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=0 & Op2_uimm3=1 & elr_el2 { export elr_el2; } +CopReg: sp_el1 is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=1 & Op2_uimm3=0 & sp_el1 { export sp_el1; } +CopReg: spsr_irq is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=3 & Op2_uimm3=0 & spsr_irq { export spsr_irq; } +CopReg: spsr_abt is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=3 & Op2_uimm3=1 & spsr_abt { export spsr_abt; } +CopReg: spsr_und is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=3 & Op2_uimm3=2 & spsr_und { export spsr_und; } +CopReg: spsr_fiq is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=3 & Op2_uimm3=3 & spsr_fiq { export spsr_fiq; } +CopReg: spsr_el3 is Op0=3 & Op1_uimm3=6 & CRn=4 & CRm=0 & Op2_uimm3=0 & spsr_el3 { export spsr_el3; } +CopReg: elr_el3 is Op0=3 & Op1_uimm3=6 & CRn=4 & CRm=0 & Op2_uimm3=1 & elr_el3 { export elr_el3; } +CopReg: sp_el2 is Op0=3 & Op1_uimm3=6 & CRn=4 & CRm=1 & Op2_uimm3=0 & sp_el2 { export sp_el2; } +# CopReg: spsr_svc is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=0 & Op2_uimm3=0 & spsr_svc { export spsr_svc; } +# CopReg: spsr_hyp is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=0 & Op2_uimm3=0 & spsr_hyp { export spsr_hyp; } + +CopReg: midr_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=0 & midr_el1 { export midr_el1; } +CopReg: mpidr_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=5 & mpidr_el1 { export mpidr_el1; } +CopReg: revidr_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=6 & revidr_el1 { export revidr_el1; } +CopReg: id_dfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=2 & id_dfr0_el1 { export id_dfr0_el1; } +CopReg: id_pfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=0 & id_pfr0_el1 { export id_pfr0_el1; } +CopReg: id_pfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=1 & id_pfr1_el1 { export id_pfr1_el1; } +CopReg: id_afr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=3 & id_afr0_el1 { export id_afr0_el1; } +CopReg: id_mmfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=4 & id_mmfr0_el1 { export id_mmfr0_el1; } +CopReg: id_mmfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=5 & id_mmfr1_el1 { export id_mmfr1_el1; } +CopReg: id_mmfr2_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=6 & id_mmfr2_el1 { export id_mmfr2_el1; } +CopReg: id_mmfr3_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=7 & id_mmfr3_el1 { export id_mmfr3_el1; } +CopReg: id_isar0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=0 & id_isar0_el1 { export id_isar0_el1; } +CopReg: id_isar1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=1 & id_isar1_el1 { export id_isar1_el1; } +CopReg: id_isar2_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=2 & id_isar2_el1 { export id_isar2_el1; } +CopReg: id_isar3_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=3 & id_isar3_el1 { export id_isar3_el1; } +CopReg: id_isar4_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=4 & id_isar4_el1 { export id_isar4_el1; } +CopReg: id_isar5_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=5 & id_isar5_el1 { export id_isar5_el1; } +CopReg: mvfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=0 & mvfr0_el1 { export mvfr0_el1; } +CopReg: mvfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=1 & mvfr1_el1 { export mvfr1_el1; } +CopReg: mvfr2_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=2 & mvfr2_el1 { export mvfr2_el1; } +CopReg: ccsidr_el1 is Op0=3 & Op1_uimm3=1 & CRn=0 & CRm=0 & Op2_uimm3=0 & ccsidr_el1 { export ccsidr_el1; } +CopReg: id_aa64pfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=0 & id_aa64pfr0_el1 { export id_aa64pfr0_el1; } +CopReg: id_aa64pfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=1 & id_aa64pfr1_el1 { export id_aa64pfr1_el1; } +CopReg: id_aa64dfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=0 & id_aa64dfr0_el1 { export id_aa64dfr0_el1; } +CopReg: id_aa64dfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=1 & id_aa64dfr1_el1 { export id_aa64dfr1_el1; } +CopReg: id_aa64isar0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=0 & id_aa64isar0_el1 { export id_aa64isar0_el1; } +CopReg: id_aa64isar1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=1 & id_aa64isar1_el1 { export id_aa64isar1_el1; } +CopReg: id_aa64mmfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=0 & id_aa64mmfr0_el1 { export id_aa64mmfr0_el1; } +CopReg: id_aa64mmfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=1 & id_aa64mmfr1_el1 { export id_aa64mmfr1_el1; } +CopReg: id_aa64afr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=4 & id_aa64afr0_el1 { export id_aa64afr0_el1; } +CopReg: id_aa64afr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=5 & id_aa64afr1_el1 { export id_aa64afr1_el1; } +CopReg: clidr_el1 is Op0=3 & Op1_uimm3=1 & CRn=0 & CRm=0 & Op2_uimm3=1 & clidr_el1 { export clidr_el1; } +CopReg: aidr_el1 is Op0=3 & Op1_uimm3=1 & CRn=0 & CRm=0 & Op2_uimm3=7 & aidr_el1 { export aidr_el1; } +CopReg: csselr_el1 is Op0=3 & Op1_uimm3=2 & CRn=0 & CRm=0 & Op2_uimm3=0 & csselr_el1 { export csselr_el1; } +CopReg: ctr_el0 is Op0=3 & Op1_uimm3=3 & CRn=0 & CRm=0 & Op2_uimm3=1 & ctr_el0 { export ctr_el0; } +CopReg: dczid_el0 is Op0=3 & Op1_uimm3=3 & CRn=0 & CRm=0 & Op2_uimm3=7 & dczid_el0 { export dczid_el0; } +CopReg: vpidr_el2 is Op0=3 & Op1_uimm3=4 & CRn=0 & CRm=0 & Op2_uimm3=0 & vpidr_el2 { export vpidr_el2; } +CopReg: vmpidr_el2 is Op0=3 & Op1_uimm3=4 & CRn=0 & CRm=0 & Op2_uimm3=5 & vmpidr_el2 { export vmpidr_el2; } +CopReg: sctlr_el1 is Op0=3 & Op1_uimm3=0 & CRn=1 & CRm=0 & Op2_uimm3=0 & sctlr_el1 { export sctlr_el1; } +CopReg: actlr_el1 is Op0=3 & Op1_uimm3=0 & CRn=1 & CRm=0 & Op2_uimm3=1 & actlr_el1 { export actlr_el1; } +CopReg: cpacr_el1 is Op0=3 & Op1_uimm3=0 & CRn=1 & CRm=0 & Op2_uimm3=2 & cpacr_el1 { export cpacr_el1; } +CopReg: sctlr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=0 & Op2_uimm3=0 & sctlr_el2 { export sctlr_el2; } +CopReg: actlr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=0 & Op2_uimm3=1 & actlr_el2 { export actlr_el2; } +CopReg: hcr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=1 & Op2_uimm3=0 & hcr_el2 { export hcr_el2; } +CopReg: mdcr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=1 & Op2_uimm3=1 & mdcr_el2 { export mdcr_el2; } +CopReg: cptr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=1 & Op2_uimm3=2 & cptr_el2 { export cptr_el2; } +CopReg: hstr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=1 & Op2_uimm3=3 & hstr_el2 { export hstr_el2; } +CopReg: hacr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=1 & Op2_uimm3=7 & hacr_el2 { export hacr_el2; } +CopReg: sctlr_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=0 & Op2_uimm3=0 & sctlr_el3 { export sctlr_el3; } +CopReg: actlr_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=0 & Op2_uimm3=1 & actlr_el3 { export actlr_el3; } +CopReg: scr_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=1 & Op2_uimm3=0 & scr_el3 { export scr_el3; } +CopReg: cptr_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=1 & Op2_uimm3=2 & cptr_el3 { export cptr_el3; } +CopReg: mdcr_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=3 & Op2_uimm3=1 & mdcr_el3 { export mdcr_el3; } +CopReg: ttbr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=2 & CRm=0 & Op2_uimm3=0 & ttbr0_el1 { export ttbr0_el1; } +CopReg: ttbr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=2 & CRm=0 & Op2_uimm3=1 & ttbr1_el1 { export ttbr1_el1; } +CopReg: ttbr0_el2 is Op0=3 & Op1_uimm3=4 & CRn=2 & CRm=0 & Op2_uimm3=0 & ttbr0_el2 { export ttbr0_el2; } +CopReg: ttbr0_el3 is Op0=3 & Op1_uimm3=6 & CRn=2 & CRm=0 & Op2_uimm3=0 & ttbr0_el3 { export ttbr0_el3; } +CopReg: vttbr_el2 is Op0=3 & Op1_uimm3=4 & CRn=2 & CRm=1 & Op2_uimm3=0 & vttbr_el2 { export vttbr_el2; } +CopReg: tcr_el1 is Op0=3 & Op1_uimm3=0 & CRn=2 & CRm=0 & Op2_uimm3=2 & tcr_el1 { export tcr_el1; } +CopReg: tcr_el2 is Op0=3 & Op1_uimm3=4 & CRn=2 & CRm=0 & Op2_uimm3=2 & tcr_el2 { export tcr_el2; } +CopReg: tcr_el3 is Op0=3 & Op1_uimm3=6 & CRn=2 & CRm=0 & Op2_uimm3=2 & tcr_el3 { export tcr_el3; } +CopReg: vtcr_el2 is Op0=3 & Op1_uimm3=4 & CRn=2 & CRm=1 & Op2_uimm3=2 & vtcr_el2 { export vtcr_el2; } +CopReg: afsr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=5 & CRm=1 & Op2_uimm3=0 & afsr0_el1 { export afsr0_el1; } +CopReg: afsr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=5 & CRm=1 & Op2_uimm3=1 & afsr1_el1 { export afsr1_el1; } +CopReg: afsr0_el2 is Op0=3 & Op1_uimm3=4 & CRn=5 & CRm=1 & Op2_uimm3=0 & afsr0_el2 { export afsr0_el2; } +CopReg: afsr1_el2 is Op0=3 & Op1_uimm3=4 & CRn=5 & CRm=1 & Op2_uimm3=1 & afsr1_el2 { export afsr1_el2; } +CopReg: afsr0_el3 is Op0=3 & Op1_uimm3=6 & CRn=5 & CRm=1 & Op2_uimm3=0 & afsr0_el3 { export afsr0_el3; } +CopReg: afsr1_el3 is Op0=3 & Op1_uimm3=6 & CRn=5 & CRm=1 & Op2_uimm3=1 & afsr1_el3 { export afsr1_el3; } +CopReg: esr_el1 is Op0=3 & Op1_uimm3=0 & CRn=5 & CRm=2 & Op2_uimm3=0 & esr_el1 { export esr_el1; } +CopReg: esr_el2 is Op0=3 & Op1_uimm3=4 & CRn=5 & CRm=2 & Op2_uimm3=0 & esr_el2 { export esr_el2; } +CopReg: esr_el3 is Op0=3 & Op1_uimm3=6 & CRn=5 & CRm=2 & Op2_uimm3=0 & esr_el3 { export esr_el3; } +CopReg: fpexc32_el2 is Op0=3 & Op1_uimm3=4 & CRn=5 & CRm=3 & Op2_uimm3=0 & fpexc32_el2 { export fpexc32_el2; } +CopReg: far_el1 is Op0=3 & Op1_uimm3=0 & CRn=6 & CRm=0 & Op2_uimm3=0 & far_el1 { export far_el1; } +CopReg: far_el2 is Op0=3 & Op1_uimm3=4 & CRn=6 & CRm=0 & Op2_uimm3=0 & far_el2 { export far_el2; } +CopReg: far_el3 is Op0=3 & Op1_uimm3=6 & CRn=6 & CRm=0 & Op2_uimm3=0 & far_el3 { export far_el3; } +CopReg: hpfar_el2 is Op0=3 & Op1_uimm3=4 & CRn=6 & CRm=0 & Op2_uimm3=4 & hpfar_el2 { export hpfar_el2; } +CopReg: par_el1 is Op0=3 & Op1_uimm3=0 & CRn=7 & CRm=4 & Op2_uimm3=0 & par_el1 { export par_el1; } +CopReg: pmintenset_el1 is Op0=3 & Op1_uimm3=0 & CRn=9 & CRm=14 & Op2_uimm3=1 & pmintenset_el1 { export pmintenset_el1; } +CopReg: pmintenclr_el1 is Op0=3 & Op1_uimm3=0 & CRn=9 & CRm=14 & Op2_uimm3=2 & pmintenclr_el1 { export pmintenclr_el1; } +CopReg: pmcr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=0 & pmcr_el0 { export pmcr_el0; } +CopReg: pmcntenset_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=1 & pmcntenset_el0 { export pmcntenset_el0; } +CopReg: pmcntenclr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=2 & pmcntenclr_el0 { export pmcntenclr_el0; } +CopReg: pmovsclr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=3 & pmovsclr_el0 { export pmovsclr_el0; } +CopReg: pmswinc_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=4 & pmswinc_el0 { export pmswinc_el0; } +CopReg: pmselr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=5 & pmselr_el0 { export pmselr_el0; } +CopReg: pmceid0_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=6 & pmceid0_el0 { export pmceid0_el0; } +CopReg: pmceid1_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=7 & pmceid1_el0 { export pmceid1_el0; } +CopReg: pmccntr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=13 & Op2_uimm3=0 & pmccntr_el0 { export pmccntr_el0; } +CopReg: pmxevtyper_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=13 & Op2_uimm3=1 & pmxevtyper_el0 { export pmxevtyper_el0; } +CopReg: pmxevcntr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=13 & Op2_uimm3=2 & pmxevcntr_el0 { export pmxevcntr_el0; } +CopReg: pmuserenr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=14 & Op2_uimm3=0 & pmuserenr_el0 { export pmuserenr_el0; } +CopReg: pmovsset_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=14 & Op2_uimm3=3 & pmovsset_el0 { export pmovsset_el0; } +CopReg: pmevcntr0_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=0 & pmevcntr0_el0 { export pmevcntr0_el0; } +CopReg: pmevcntr1_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=1 & pmevcntr1_el0 { export pmevcntr1_el0; } +CopReg: pmevcntr2_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=2 & pmevcntr2_el0 { export pmevcntr2_el0; } +CopReg: pmevcntr3_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=3 & pmevcntr3_el0 { export pmevcntr3_el0; } +CopReg: pmevcntr4_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=4 & pmevcntr4_el0 { export pmevcntr4_el0; } +CopReg: pmevcntr5_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=5 & pmevcntr5_el0 { export pmevcntr5_el0; } +CopReg: pmevcntr6_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=6 & pmevcntr6_el0 { export pmevcntr6_el0; } +CopReg: pmevcntr7_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=7 & pmevcntr7_el0 { export pmevcntr7_el0; } +CopReg: pmevcntr8_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=0 & pmevcntr8_el0 { export pmevcntr8_el0; } +CopReg: pmevcntr9_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=1 & pmevcntr9_el0 { export pmevcntr9_el0; } +CopReg: pmevcntr10_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=2 & pmevcntr10_el0 { export pmevcntr10_el0; } +CopReg: pmevcntr11_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=3 & pmevcntr11_el0 { export pmevcntr11_el0; } +CopReg: pmevcntr12_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=4 & pmevcntr12_el0 { export pmevcntr12_el0; } +CopReg: pmevcntr13_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=5 & pmevcntr13_el0 { export pmevcntr13_el0; } +CopReg: pmevcntr14_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=6 & pmevcntr14_el0 { export pmevcntr14_el0; } +CopReg: pmevcntr15_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=7 & pmevcntr15_el0 { export pmevcntr15_el0; } +CopReg: pmevcntr16_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=0 & pmevcntr16_el0 { export pmevcntr16_el0; } +CopReg: pmevcntr17_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=1 & pmevcntr17_el0 { export pmevcntr17_el0; } +CopReg: pmevcntr18_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=2 & pmevcntr18_el0 { export pmevcntr18_el0; } +CopReg: pmevcntr19_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=3 & pmevcntr19_el0 { export pmevcntr19_el0; } +CopReg: pmevcntr20_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=4 & pmevcntr20_el0 { export pmevcntr20_el0; } +CopReg: pmevcntr21_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=5 & pmevcntr21_el0 { export pmevcntr21_el0; } +CopReg: pmevcntr22_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=6 & pmevcntr22_el0 { export pmevcntr22_el0; } +CopReg: pmevcntr23_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=7 & pmevcntr23_el0 { export pmevcntr23_el0; } +CopReg: pmevcntr24_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=0 & pmevcntr24_el0 { export pmevcntr24_el0; } +CopReg: pmevcntr25_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=1 & pmevcntr25_el0 { export pmevcntr25_el0; } +CopReg: pmevcntr26_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=2 & pmevcntr26_el0 { export pmevcntr26_el0; } +CopReg: pmevcntr27_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=3 & pmevcntr27_el0 { export pmevcntr27_el0; } +CopReg: pmevcntr28_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=4 & pmevcntr28_el0 { export pmevcntr28_el0; } +CopReg: pmevcntr29_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=5 & pmevcntr29_el0 { export pmevcntr29_el0; } +CopReg: pmevcntr30_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=6 & pmevcntr30_el0 { export pmevcntr30_el0; } +CopReg: pmevtyper0_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=0 & pmevtyper0_el0 { export pmevtyper0_el0; } +CopReg: pmevtyper1_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=1 & pmevtyper1_el0 { export pmevtyper1_el0; } +CopReg: pmevtyper2_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=2 & pmevtyper2_el0 { export pmevtyper2_el0; } +CopReg: pmevtyper3_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=3 & pmevtyper3_el0 { export pmevtyper3_el0; } +CopReg: pmevtyper4_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=4 & pmevtyper4_el0 { export pmevtyper4_el0; } +CopReg: pmevtyper5_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=5 & pmevtyper5_el0 { export pmevtyper5_el0; } +CopReg: pmevtyper6_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=6 & pmevtyper6_el0 { export pmevtyper6_el0; } +CopReg: pmevtyper7_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=7 & pmevtyper7_el0 { export pmevtyper7_el0; } +CopReg: pmevtyper8_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=0 & pmevtyper8_el0 { export pmevtyper8_el0; } +CopReg: pmevtyper9_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=1 & pmevtyper9_el0 { export pmevtyper9_el0; } +CopReg: pmevtyper10_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=2 & pmevtyper10_el0 { export pmevtyper10_el0; } +CopReg: pmevtyper11_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=3 & pmevtyper11_el0 { export pmevtyper11_el0; } +CopReg: pmevtyper12_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=4 & pmevtyper12_el0 { export pmevtyper12_el0; } +CopReg: pmevtyper13_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=5 & pmevtyper13_el0 { export pmevtyper13_el0; } +CopReg: pmevtyper14_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=6 & pmevtyper14_el0 { export pmevtyper14_el0; } +CopReg: pmevtyper15_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=7 & pmevtyper15_el0 { export pmevtyper15_el0; } +CopReg: pmevtyper16_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=0 & pmevtyper16_el0 { export pmevtyper16_el0; } +CopReg: pmevtyper17_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=1 & pmevtyper17_el0 { export pmevtyper17_el0; } +CopReg: pmevtyper18_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=2 & pmevtyper18_el0 { export pmevtyper18_el0; } +CopReg: pmevtyper19_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=3 & pmevtyper19_el0 { export pmevtyper19_el0; } +CopReg: pmevtyper20_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=4 & pmevtyper20_el0 { export pmevtyper20_el0; } +CopReg: pmevtyper21_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=5 & pmevtyper21_el0 { export pmevtyper21_el0; } +CopReg: pmevtyper22_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=6 & pmevtyper22_el0 { export pmevtyper22_el0; } +CopReg: pmevtyper23_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=7 & pmevtyper23_el0 { export pmevtyper23_el0; } +CopReg: pmevtyper24_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=0 & pmevtyper24_el0 { export pmevtyper24_el0; } +CopReg: pmevtyper25_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=1 & pmevtyper25_el0 { export pmevtyper25_el0; } +CopReg: pmevtyper26_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=2 & pmevtyper26_el0 { export pmevtyper26_el0; } +CopReg: pmevtyper27_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=3 & pmevtyper27_el0 { export pmevtyper27_el0; } +CopReg: pmevtyper28_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=4 & pmevtyper28_el0 { export pmevtyper28_el0; } +CopReg: pmevtyper29_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=5 & pmevtyper29_el0 { export pmevtyper29_el0; } +CopReg: pmevtyper30_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=6 & pmevtyper30_el0 { export pmevtyper30_el0; } +CopReg: pmccfiltr_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=7 & pmccfiltr_el0 { export pmccfiltr_el0; } +CopReg: mair_el1 is Op0=3 & Op1_uimm3=0 & CRn=10 & CRm=2 & Op2_uimm3=0 & mair_el1 { export mair_el1; } +CopReg: mair_el2 is Op0=3 & Op1_uimm3=4 & CRn=10 & CRm=2 & Op2_uimm3=0 & mair_el2 { export mair_el2; } +CopReg: mair_el3 is Op0=3 & Op1_uimm3=6 & CRn=10 & CRm=2 & Op2_uimm3=0 & mair_el3 { export mair_el3; } +CopReg: amair_el1 is Op0=3 & Op1_uimm3=0 & CRn=10 & CRm=3 & Op2_uimm3=0 & amair_el1 { export amair_el1; } +CopReg: amair_el2 is Op0=3 & Op1_uimm3=4 & CRn=10 & CRm=3 & Op2_uimm3=0 & amair_el2 { export amair_el2; } +CopReg: amair_el3 is Op0=3 & Op1_uimm3=6 & CRn=10 & CRm=3 & Op2_uimm3=0 & amair_el3 { export amair_el3; } +CopReg: vbar_el1 is Op0=3 & Op1_uimm3=0 & CRn=12 & CRm=0 & Op2_uimm3=0 & vbar_el1 { export vbar_el1; } +CopReg: vbar_el2 is Op0=3 & Op1_uimm3=4 & CRn=12 & CRm=0 & Op2_uimm3=0 & vbar_el2 { export vbar_el2; } +CopReg: vbar_el3 is Op0=3 & Op1_uimm3=6 & CRn=12 & CRm=0 & Op2_uimm3=0 & vbar_el3 { export vbar_el3; } +CopReg: rvbar_el1 is Op0=3 & Op1_uimm3=0 & CRn=12 & CRm=0 & Op2_uimm3=1 & rvbar_el1 { export rvbar_el1; } +CopReg: rvbar_el2 is Op0=3 & Op1_uimm3=4 & CRn=12 & CRm=0 & Op2_uimm3=1 & rvbar_el2 { export rvbar_el2; } +CopReg: rvbar_el3 is Op0=3 & Op1_uimm3=6 & CRn=12 & CRm=0 & Op2_uimm3=1 & rvbar_el3 { export rvbar_el3; } +CopReg: rmr_el1 is Op0=3 & Op1_uimm3=0 & CRn=12 & CRm=0 & Op2_uimm3=2 & rmr_el1 { export rmr_el1; } +CopReg: rmr_el2 is Op0=3 & Op1_uimm3=4 & CRn=12 & CRm=0 & Op2_uimm3=2 & rmr_el2 { export rmr_el2; } +CopReg: rmr_el3 is Op0=3 & Op1_uimm3=6 & CRn=12 & CRm=0 & Op2_uimm3=2 & rmr_el3 { export rmr_el3; } +CopReg: isr_el1 is Op0=3 & Op1_uimm3=0 & CRn=12 & CRm=1 & Op2_uimm3=0 & isr_el1 { export isr_el1; } +CopReg: contextidr_el1 is Op0=3 & Op1_uimm3=0 & CRn=13 & CRm=0 & Op2_uimm3=1 & contextidr_el1 { export contextidr_el1; } +CopReg: tpidr_el0 is Op0=3 & Op1_uimm3=3 & CRn=13 & CRm=0 & Op2_uimm3=2 & tpidr_el0 { export tpidr_el0; } +CopReg: tpidrro_el0 is Op0=3 & Op1_uimm3=3 & CRn=13 & CRm=0 & Op2_uimm3=3 & tpidrro_el0 { export tpidrro_el0; } +CopReg: tpidr_el1 is Op0=3 & Op1_uimm3=0 & CRn=13 & CRm=0 & Op2_uimm3=4 & tpidr_el1 { export tpidr_el1; } +CopReg: tpidr_el2 is Op0=3 & Op1_uimm3=4 & CRn=13 & CRm=0 & Op2_uimm3=2 & tpidr_el2 { export tpidr_el2; } +CopReg: tpidr_el3 is Op0=3 & Op1_uimm3=6 & CRn=13 & CRm=0 & Op2_uimm3=2 & tpidr_el3 { export tpidr_el3; } +CopReg: teecr32_el1 is Op0=2 & Op1_uimm3=2 & CRn=0 & CRm=0 & Op2_uimm3=0 & teecr32_el1 { export teecr32_el1; } +CopReg: cntfrq_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=0 & Op2_uimm3=0 & cntfrq_el0 { export cntfrq_el0; } +CopReg: cntpct_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=0 & Op2_uimm3=1 & cntpct_el0 { export cntpct_el0; } +CopReg: cntvct_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=0 & Op2_uimm3=2 & cntvct_el0 { export cntvct_el0; } +CopReg: cntvoff_el2 is Op0=3 & Op1_uimm3=4 & CRn=14 & CRm=0 & Op2_uimm3=3 & cntvoff_el2 { export cntvoff_el2; } +CopReg: cntkctl_el1 is Op0=3 & Op1_uimm3=0 & CRn=14 & CRm=1 & Op2_uimm3=0 & cntkctl_el1 { export cntkctl_el1; } +CopReg: cnthctl_el2 is Op0=3 & Op1_uimm3=4 & CRn=14 & CRm=1 & Op2_uimm3=0 & cnthctl_el2 { export cnthctl_el2; } +CopReg: cntp_tval_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=2 & Op2_uimm3=0 & cntp_tval_el0 { export cntp_tval_el0; } +CopReg: cntp_ctl_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=2 & Op2_uimm3=1 & cntp_ctl_el0 { export cntp_ctl_el0; } +CopReg: cntp_cval_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=2 & Op2_uimm3=2 & cntp_cval_el0 { export cntp_cval_el0; } +CopReg: cntv_tval_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=3 & Op2_uimm3=0 & cntv_tval_el0 { export cntv_tval_el0; } +CopReg: cntv_ctl_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=3 & Op2_uimm3=1 & cntv_ctl_el0 { export cntv_ctl_el0; } +CopReg: cntv_cval_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=3 & Op2_uimm3=2 & cntv_cval_el0 { export cntv_cval_el0; } +CopReg: cnthp_tval_el2 is Op0=3 & Op1_uimm3=4 & CRn=14 & CRm=2 & Op2_uimm3=0 & cnthp_tval_el2 { export cnthp_tval_el2; } +CopReg: cnthp_ctl_el2 is Op0=3 & Op1_uimm3=4 & CRn=14 & CRm=2 & Op2_uimm3=1 & cnthp_ctl_el2 { export cnthp_ctl_el2; } +CopReg: cnthp_cval_el2 is Op0=3 & Op1_uimm3=4 & CRn=14 & CRm=2 & Op2_uimm3=2 & cnthp_cval_el2 { export cnthp_cval_el2; } +CopReg: cntps_tval_el1 is Op0=3 & Op1_uimm3=7 & CRn=14 & CRm=2 & Op2_uimm3=0 & cntps_tval_el1 { export cntps_tval_el1; } +CopReg: cntps_ctl_el1 is Op0=3 & Op1_uimm3=7 & CRn=14 & CRm=2 & Op2_uimm3=1 & cntps_ctl_el1 { export cntps_ctl_el1; } +CopReg: cntps_cval_el1 is Op0=3 & Op1_uimm3=7 & CRn=14 & CRm=2 & Op2_uimm3=2 & cntps_cval_el1 { export cntps_cval_el1; } +CopReg: dacr32_el2 is Op0=3 & Op1_uimm3=4 & CRn=3 & CRm=0 & Op2_uimm3=0 & dacr32_el2 { export dacr32_el2; } +CopReg: ifsr32_el2 is Op0=3 & Op1_uimm3=4 & CRn=5 & CRm=0 & Op2_uimm3=1 & ifsr32_el2 { export ifsr32_el2; } +CopReg: teehbr32_el1 is Op0=2 & Op1_uimm3=2 & CRn=1 & CRm=0 & Op2_uimm3=0 & teehbr32_el1 { export teehbr32_el1; } +CopReg: sder32_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=1 & Op2_uimm3=1 & sder32_el3 { export sder32_el3; } +CopReg: osdtrrx_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=2 & osdtrrx_el1 { export osdtrrx_el1; } + +CopReg: mdccint_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=0 & mdccint_el1 { export mdccint_el1; } +CopReg: mdscr_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=2 & mdscr_el1 { export mdscr_el1; } +CopReg: osdtrtx_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=2 & osdtrtx_el1 { export osdtrtx_el1; } +CopReg: oseccr_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=2 & oseccr_el1 { export oseccr_el1; } +CopReg: dbgbvr0_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=4 & dbgbvr0_el1 { export dbgbvr0_el1; } +CopReg: dbgbvr1_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=4 & dbgbvr1_el1 { export dbgbvr1_el1; } +CopReg: dbgbvr2_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=4 & dbgbvr2_el1 { export dbgbvr2_el1; } +CopReg: dbgbvr3_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=4 & dbgbvr3_el1 { export dbgbvr3_el1; } +CopReg: dbgbvr4_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=4 & dbgbvr4_el1 { export dbgbvr4_el1; } +CopReg: dbgbvr5_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=4 & dbgbvr5_el1 { export dbgbvr5_el1; } +CopReg: dbgbvr6_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=4 & dbgbvr6_el1 { export dbgbvr6_el1; } +CopReg: dbgbvr7_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=4 & dbgbvr7_el1 { export dbgbvr7_el1; } +CopReg: dbgbvr8_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=8 & Op2_uimm3=4 & dbgbvr8_el1 { export dbgbvr8_el1; } +CopReg: dbgbvr9_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=9 & Op2_uimm3=4 & dbgbvr9_el1 { export dbgbvr9_el1; } +CopReg: dbgbvr10_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=10 & Op2_uimm3=4 & dbgbvr10_el1 { export dbgbvr10_el1; } +CopReg: dbgbvr11_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=11 & Op2_uimm3=4 & dbgbvr11_el1 { export dbgbvr11_el1; } +CopReg: dbgbvr12_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=12 & Op2_uimm3=4 & dbgbvr12_el1 { export dbgbvr12_el1; } +CopReg: dbgbvr13_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=13 & Op2_uimm3=4 & dbgbvr13_el1 { export dbgbvr13_el1; } +CopReg: dbgbvr14_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=14 & Op2_uimm3=4 & dbgbvr14_el1 { export dbgbvr14_el1; } +CopReg: dbgbvr15_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=15 & Op2_uimm3=4 & dbgbvr15_el1 { export dbgbvr15_el1; } +CopReg: dbgbcr0_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=5 & dbgbcr0_el1 { export dbgbcr0_el1; } +CopReg: dbgbcr1_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=5 & dbgbcr1_el1 { export dbgbcr1_el1; } +CopReg: dbgbcr2_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=5 & dbgbcr2_el1 { export dbgbcr2_el1; } +CopReg: dbgbcr3_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=5 & dbgbcr3_el1 { export dbgbcr3_el1; } +CopReg: dbgbcr4_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=5 & dbgbcr4_el1 { export dbgbcr4_el1; } +CopReg: dbgbcr5_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=5 & dbgbcr5_el1 { export dbgbcr5_el1; } +CopReg: dbgbcr6_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=5 & dbgbcr6_el1 { export dbgbcr6_el1; } +CopReg: dbgbcr7_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=5 & dbgbcr7_el1 { export dbgbcr7_el1; } +CopReg: dbgbcr8_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=8 & Op2_uimm3=5 & dbgbcr8_el1 { export dbgbcr8_el1; } +CopReg: dbgbcr9_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=9 & Op2_uimm3=5 & dbgbcr9_el1 { export dbgbcr9_el1; } +CopReg: dbgbcr10_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=10 & Op2_uimm3=5 & dbgbcr10_el1 { export dbgbcr10_el1; } +CopReg: dbgbcr11_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=11 & Op2_uimm3=5 & dbgbcr11_el1 { export dbgbcr11_el1; } +CopReg: dbgbcr12_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=12 & Op2_uimm3=5 & dbgbcr12_el1 { export dbgbcr12_el1; } +CopReg: dbgbcr13_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=13 & Op2_uimm3=5 & dbgbcr13_el1 { export dbgbcr13_el1; } +CopReg: dbgbcr14_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=14 & Op2_uimm3=5 & dbgbcr14_el1 { export dbgbcr14_el1; } +CopReg: dbgbcr15_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=15 & Op2_uimm3=5 & dbgbcr15_el1 { export dbgbcr15_el1; } +CopReg: dbgwvr0_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=6 & dbgwvr0_el1 { export dbgwvr0_el1; } +CopReg: dbgwvr1_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=6 & dbgwvr1_el1 { export dbgwvr1_el1; } +CopReg: dbgwvr2_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=6 & dbgwvr2_el1 { export dbgwvr2_el1; } +CopReg: dbgwvr3_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=6 & dbgwvr3_el1 { export dbgwvr3_el1; } +CopReg: dbgwvr4_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=6 & dbgwvr4_el1 { export dbgwvr4_el1; } +CopReg: dbgwvr5_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=6 & dbgwvr5_el1 { export dbgwvr5_el1; } +CopReg: dbgwvr6_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=6 & dbgwvr6_el1 { export dbgwvr6_el1; } +CopReg: dbgwvr7_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=6 & dbgwvr7_el1 { export dbgwvr7_el1; } +CopReg: dbgwvr8_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=8 & Op2_uimm3=6 & dbgwvr8_el1 { export dbgwvr8_el1; } +CopReg: dbgwvr9_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=9 & Op2_uimm3=6 & dbgwvr9_el1 { export dbgwvr9_el1; } +CopReg: dbgwvr10_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=10 & Op2_uimm3=6 & dbgwvr10_el1 { export dbgwvr10_el1; } +CopReg: dbgwvr11_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=11 & Op2_uimm3=6 & dbgwvr11_el1 { export dbgwvr11_el1; } +CopReg: dbgwvr12_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=12 & Op2_uimm3=6 & dbgwvr12_el1 { export dbgwvr12_el1; } +CopReg: dbgwvr13_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=13 & Op2_uimm3=6 & dbgwvr13_el1 { export dbgwvr13_el1; } +CopReg: dbgwvr14_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=14 & Op2_uimm3=6 & dbgwvr14_el1 { export dbgwvr14_el1; } +CopReg: dbgwvr15_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=15 & Op2_uimm3=6 & dbgwvr15_el1 { export dbgwvr15_el1; } +CopReg: dbgwcr0_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=7 & dbgwcr0_el1 { export dbgwcr0_el1; } +CopReg: dbgwcr1_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=7 & dbgwcr1_el1 { export dbgwcr1_el1; } +CopReg: dbgwcr2_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=7 & dbgwcr2_el1 { export dbgwcr2_el1; } +CopReg: dbgwcr3_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=7 & dbgwcr3_el1 { export dbgwcr3_el1; } +CopReg: dbgwcr4_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=7 & dbgwcr4_el1 { export dbgwcr4_el1; } +CopReg: dbgwcr5_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=7 & dbgwcr5_el1 { export dbgwcr5_el1; } +CopReg: dbgwcr6_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=7 & dbgwcr6_el1 { export dbgwcr6_el1; } +CopReg: dbgwcr7_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=7 & dbgwcr7_el1 { export dbgwcr7_el1; } +CopReg: dbgwcr8_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=8 & Op2_uimm3=7 & dbgwcr8_el1 { export dbgwcr8_el1; } +CopReg: dbgwcr9_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=9 & Op2_uimm3=7 & dbgwcr9_el1 { export dbgwcr9_el1; } +CopReg: dbgwcr10_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=10 & Op2_uimm3=7 & dbgwcr10_el1 { export dbgwcr10_el1; } +CopReg: dbgwcr11_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=11 & Op2_uimm3=7 & dbgwcr11_el1 { export dbgwcr11_el1; } +CopReg: dbgwcr12_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=12 & Op2_uimm3=7 & dbgwcr12_el1 { export dbgwcr12_el1; } +CopReg: dbgwcr13_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=13 & Op2_uimm3=7 & dbgwcr13_el1 { export dbgwcr13_el1; } +CopReg: dbgwcr14_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=14 & Op2_uimm3=7 & dbgwcr14_el1 { export dbgwcr14_el1; } +CopReg: dbgwcr15_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=15 & Op2_uimm3=7 & dbgwcr15_el1 { export dbgwcr15_el1; } +CopReg: mdrar_el1 is Op0=2 & Op1_uimm3=0 & CRn=1 & CRm=0 & Op2_uimm3=0 & mdrar_el1 { export mdrar_el1; } +CopReg: oslar_el1 is Op0=2 & Op1_uimm3=0 & CRn=1 & CRm=0 & Op2_uimm3=4 & oslar_el1 { export oslar_el1; } +CopReg: oslsr_el1 is Op0=2 & Op1_uimm3=0 & CRn=1 & CRm=1 & Op2_uimm3=4 & oslsr_el1 { export oslsr_el1; } +CopReg: osdlr_el1 is Op0=2 & Op1_uimm3=0 & CRn=1 & CRm=3 & Op2_uimm3=4 & osdlr_el1 { export osdlr_el1; } +CopReg: dbgprcr_el1 is Op0=2 & Op1_uimm3=0 & CRn=1 & CRm=4 & Op2_uimm3=4 & dbgprcr_el1 { export dbgprcr_el1; } +CopReg: dbgclaimset_el1 is Op0=2 & Op1_uimm3=0 & CRn=7 & CRm=8 & Op2_uimm3=6 & dbgclaimset_el1 { export dbgclaimset_el1; } +CopReg: dbgclaimclr_el1 is Op0=2 & Op1_uimm3=0 & CRn=7 & CRm=9 & Op2_uimm3=6 & dbgclaimclr_el1 { export dbgclaimclr_el1; } +CopReg: dbgauthstatus_el1 is Op0=2 & Op1_uimm3=0 & CRn=7 & CRm=14 & Op2_uimm3=6 & dbgauthstatus_el1 { export dbgauthstatus_el1; } +CopReg: mdccsr_el0 is Op0=2 & Op1_uimm3=3 & CRn=0 & CRm=1 & Op2_uimm3=0 & mdccsr_el0 { export mdccsr_el0; } +CopReg: dbgdtr_el0 is Op0=2 & Op1_uimm3=3 & CRn=0 & CRm=4 & Op2_uimm3=0 & dbgdtr_el0 { export dbgdtr_el0; } +CopReg: dbgvcr32_el2 is Op0=2 & Op1_uimm3=4 & CRn=0 & CRm=7 & Op2_uimm3=0 & dbgvcr32_el2 { export dbgvcr32_el2; } +# The SysReg document implies that GMID_EL1 can only be read - the doc only provides pseudocode for read access. +# However, the register is in this block (without a required value for 'l') because that might not be fully accurate. +CopReg: gmid_el1 is Op0=3 & Op1_uimm3=1 & CRn=0 & CRm=0 & Op2_uimm3=4 & gmid_el1 { export gmid_el1; } +CopReg: ssbs is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=2 & Op2_uimm3=6 & ssbs { export ssbs; } +} # with : (l=0 | l=1) { + +CopReg: dbgdtrrx_el0 is l=0 & Op0=2 & Op1_uimm3=3 & CRn=0 & CRm=5 & Op2_uimm3=0 & dbgdtrrx_el0 { export dbgdtrrx_el0; } +CopReg: dbgdtrtx_el0 is l=1 & Op0=2 & Op1_uimm3=3 & CRn=0 & CRm=5 & Op2_uimm3=0 & dbgdtrtx_el0 { export dbgdtrtx_el0; } + +CopReg: "sreg("^Op0^", "^Op1_uimm3^", c"^CRn^", c"^CRm^", "^Op2_uimm3^")" is l=1 & Op0 & Op1_uimm3 & CRn & CRm & Op2_uimm3 { tmp:8 = UnkSytemRegRead(Op0:1, Op1_uimm3:1, CRn:1, CRm:1, Op2_uimm3:1); export tmp; } +CopReg: "sreg("^Op0^", "^Op1_uimm3^", c"^CRn^", c"^CRm^", "^Op2_uimm3^")" is l=0 & Op0 & Op1_uimm3 & CRn & CRm & Op2_uimm3 & Rt_GPR64 { tmp:8 = UnkSytemRegWrite(Op0:1, Op1_uimm3:1, CRn:1, CRm:1, Op2_uimm3:1, Rt_GPR64); export tmp; } + +PState_pstate_op: "DAIFSet" is Op1_uimm3=3 & Op2_uimm3=6 & CRm { daif = (CRm << 6) | daif; } +PState_pstate_op: "DAIFClr" is Op1_uimm3=3 & Op2_uimm3=7 & CRm { tmp:8 = CRm; daif = (~(tmp << 6)) & daif; } +PState_pstate_op: "PState.UAO" is Op1_uimm3=0 & Op2_uimm3=3 & CRm { tmp:8 = CRm; uao = tmp & 1; } +PState_pstate_op: "PState.PAN" is Op1_uimm3=0 & Op2_uimm3=4 & CRm { tmp:8 = CRm; pan = tmp & 1; } +PState_pstate_op: "PState.SP" is Op1_uimm3=0 & Op2_uimm3=5 & CRm { tmp:8 = CRm; spsel = tmp & 1; } +PState_pstate_op: "PState.TCO" is Op1_uimm3=3 & Op2_uimm3=4 & CRm { tmp:8 = CRm; tco = tmp & 1; } + +# C6.2.193 MRS page C6-1125 line 62819 MATCH xd5300000/mask=xfff00000 +# CONSTRUCT xd5200000/mask=xffe00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd5200000/mask=xffe00000 --status noqemu + +:mrs Rt_GPR64, CopReg +is b_2431=0xd5 & b_2223=0 & l=1 & CopReg & Rt_GPR64 +{ + Rt_GPR64 = CopReg; +} + +# C6.2.194 MSR (immediate) page C6-1126 line 62879 MATCH xd500401f/mask=xfff8f01f +# C6.2.50 CFINV page C6-860 line 48145 MATCH xd500401f/mask=xfffff0ff +# CONSTRUCT xd500401f/mask=xfff8f01f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd500401f/mask=xfff8f01f --status nodest + +:msr PState_pstate_op, CRm_uimm4 +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & PState_pstate_op & CRn=0x4 & CRm_uimm4 & Rt=0x1f +{ +} + +# C6.2.195 MSR (register) page C6-1129 line 63039 MATCH xd5100000/mask=xfff00000 +# CONSTRUCT xd5000000/mask=xffe00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd5000000/mask=xffe00000 --status noqemu + +:msr CopReg, Rt_GPR64 +is b_2431=0xd5 & b_2223=0 & l=0 & CopReg & Rt_GPR64 +{ + CopReg = Rt_GPR64; +} + +# C6.2.196 MSUB page C6-1130 line 63100 MATCH x1b008000/mask=x7fe08000 +# C6.2.184 MNEG page C6-1108 line 62017 MATCH x1b00fc00/mask=x7fe0fc00 +# CONSTRUCT x1b008000/mask=xffe08000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1b008000/mask=xffe08000 --status pass + +:msub Rd_GPR32, Rn_GPR32, Rm_GPR32, Ra_GPR32 +is sf=0 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR32 & op.dp3_o0=1 & Ra_GPR32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = Rn_GPR32 * Rm_GPR32; + tmp_1:4 = Ra_GPR32 - tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.196 MSUB page C6-1130 line 63100 MATCH x1b008000/mask=x7fe08000 +# C6.2.184 MNEG page C6-1108 line 62017 MATCH x1b00fc00/mask=x7fe0fc00 +# CONSTRUCT x9b008000/mask=xffe08000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9b008000/mask=xffe08000 --status pass + +:msub Rd_GPR64, Rn_GPR64, Rm_GPR64, Ra_GPR64 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR64 & op.dp3_o0=1 & Ra_GPR64 & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = Rn_GPR64 * Rm_GPR64; + tmp_1:8 = Ra_GPR64 - tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.197 MUL page C6-1132 line 63209 MATCH x1b007c00/mask=x7fe0fc00 +# C6.2.183 MADD page C6-1106 line 61907 MATCH x1b000000/mask=x7fe08000 +# CONSTRUCT x1b007c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1b007c00/mask=xffe0fc00 --status pass + +:mul Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR32 & op.dp3_o0=0 & Ra=0x1f & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = Rn_GPR32 * Rm_GPR32; + Rd_GPR64 = zext(tmp_2); +} + +# C6.2.197 MUL page C6-1132 line 63209 MATCH x1b007c00/mask=x7fe0fc00 +# C6.2.183 MADD page C6-1106 line 61907 MATCH x1b000000/mask=x7fe08000 +# CONSTRUCT x9b007c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9b007c00/mask=xffe0fc00 --status pass + +:mul Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR64 & op.dp3_o0=0 & Ra=0x1f & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = Rn_GPR64 * Rm_GPR64; + Rd_GPR64 = tmp_2; +} + +# C6.2.198 MVN page C6-1133 line 63282 MATCH x2a2003e0/mask=x7f2003e0 +# C6.2.204 ORN (shifted register) page C6-1144 line 63797 MATCH x2a200000/mask=x7f200000 +# CONSTRUCT x2a2003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x2a2003e0/mask=xff2003e0 --status pass + +:mvn Rd_GPR32, RegShift32Log +is sf=0 & opc=1 & b_2428=0xa & N=1 & RegShift32Log & Rn=0x1f & Rd_GPR32 & Rd_GPR64 +{ + tmp_1:4 = ~RegShift32Log; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.198 MVN page C6-1133 line 63282 MATCH x2a2003e0/mask=x7f2003e0 +# C6.2.204 ORN (shifted register) page C6-1144 line 63797 MATCH x2a200000/mask=x7f200000 +# CONSTRUCT xaa2003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xaa2003e0/mask=xff2003e0 --status pass + +:mvn Rd_GPR64, RegShift64Log +is sf=1 & opc=1 & b_2428=0xa & N=1 & Rm_GPR64 & RegShift64Log & Rn=0x1f & Rd_GPR64 +{ + tmp_1:8 = ~RegShift64Log; + Rd_GPR64 = tmp_1; +} + +# C6.2.199 NEG (shifted register) page C6-1135 line 63379 MATCH x4b0003e0/mask=x7f2003e0 +# C6.2.310 SUB (shifted register) page C6-1335 line 74131 MATCH x4b000000/mask=x7f200000 +# CONSTRUCT x4b0003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x4b0003e0/mask=xff2003e0 --status pass + +:neg Rd_GPR32, RegShift32 +is sf=0 & op=1 & s=0 & b_2428=0xb & b_2121=0 & RegShift32 & Rn=0x1f & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = RegShift32; + tmp_1:4 = - tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.199 NEG (shifted register) page C6-1135 line 63379 MATCH x4b0003e0/mask=x7f2003e0 +# C6.2.310 SUB (shifted register) page C6-1335 line 74131 MATCH x4b000000/mask=x7f200000 +# CONSTRUCT xcb0003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xcb0003e0/mask=xff2003e0 --status pass + +:neg Rd_GPR64, RegShift64 +is sf=1 & op=1 & s=0 & b_2428=0xb & b_2121=0 & RegShift64 & Rn=0x1f & Rd_GPR64 +{ + tmp_2:8 = RegShift64; + tmp_1:8 = - tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.200 NEGS page C6-1137 line 63476 MATCH x6b0003e0/mask=x7f2003e0 +# C6.2.62 CMP (shifted register) page C6-879 line 49133 MATCH x6b00001f/mask=x7f20001f +# C6.2.316 SUBS (shifted register) page C6-1345 line 74711 MATCH x6b000000/mask=x7f200000 +# CONSTRUCT x6b0003e0/mask=xff2003e0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x6b0003e0/mask=xff2003e0 --status pass --comment "flags" + +:negs Rd_GPR32, RegShift32 +is sf=0 & op=1 & s=1 & b_2428=0xb & b_2121=0 & RegShift32 & Rn=0x1f & Rd_GPR32 & Rd & Rd_GPR64 +{ + tmp_2:4 = RegShift32; + subflags0(tmp_2); + tmp_1:4 = 0:4 - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = zext(tmp_1); + affectflags(); +} + +# C6.2.200 NEGS page C6-1137 line 63476 MATCH x6b0003e0/mask=x7f2003e0 +# C6.2.62 CMP (shifted register) page C6-879 line 49133 MATCH x6b00001f/mask=x7f20001f +# C6.2.316 SUBS (shifted register) page C6-1345 line 74711 MATCH x6b000000/mask=x7f200000 +# CONSTRUCT xeb0003e0/mask=xff2003e0 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xeb0003e0/mask=xff2003e0 --status pass --comment "flags" + +:negs Rd_GPR64, RegShift64 +is sf=1 & op=1 & s=1 & b_2428=0xb & b_2121=0 & RegShift64 & Rn=0x1f & Rd_GPR64 & Rd +{ + tmp_2:8 = RegShift64; + subflags0(tmp_2); + tmp_1:8 = 0:8 - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = tmp_1; + affectflags(); +} + +# C6.2.201 NGC page C6-1139 line 63573 MATCH x5a0003e0/mask=x7fe0ffe0 +# C6.2.230 SBC page C6-1186 line 66053 MATCH x5a000000/mask=x7fe0fc00 +# CONSTRUCT x5a0003e0/mask=xffe0ffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x5a0003e0/mask=xffe0ffe0 --status pass --comment "flags" + +:ngc Rd_GPR32, Rm_GPR32 +is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=0 & Rm_GPR32 & opcode2=0x0 & Rn=0x1f & Rd_GPR32 & Rd_GPR64 +{ + tmp:4 = Rm_GPR32 + zext(!CY); + Rd_GPR64 = zext(-tmp); +} + +# C6.2.201 NGC page C6-1139 line 63573 MATCH x5a0003e0/mask=x7fe0ffe0 +# C6.2.230 SBC page C6-1186 line 66053 MATCH x5a000000/mask=x7fe0fc00 +# CONSTRUCT xda0003e0/mask=xffe0ffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xda0003e0/mask=xffe0ffe0 --status pass --comment "flags" + +:ngc Rd_GPR64, Rm_GPR64 +is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=0 & Rm_GPR64 & opcode2=0x0 & Rn=0x1f & Rd_GPR64 +{ + tmp:8 = Rm_GPR64 + zext(!CY); + Rd_GPR64 = -tmp; +} + +# C6.2.202 NGCS page C6-1141 line 63660 MATCH x7a0003e0/mask=x7fe0ffe0 +# C6.2.231 SBCS page C6-1188 line 66152 MATCH x7a000000/mask=x7fe0fc00 +# CONSTRUCT x7a0003e0/mask=xffe0ffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x7a0003e0/mask=xffe0ffe0 --status pass --comment "flags" + +:ngcs Rd_GPR32, Rm_GPR32 +is sf=0 & op=1 & s=1 & b_2428=0x1a & b_2123=0 & Rn=0x1f & opcode2=0x0 & Rm_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp:4 = Rm_GPR32 + zext(!CY); + add_with_carry_flags(0,~tmp); + Rd_GPR64 = zext(-tmp); + resultflags(Rd_GPR32); + affectflags(); +} + +# C6.2.202 NGCS page C6-1141 line 63660 MATCH x7a0003e0/mask=x7fe0ffe0 +# C6.2.231 SBCS page C6-1188 line 66152 MATCH x7a000000/mask=x7fe0fc00 +# CONSTRUCT xfa0003e0/mask=xffe0ffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xfa0003e0/mask=xffe0ffe0 --status pass --comment "flags" + +:ngcs Rd_GPR64, Rm_GPR64 +is sf=1 & op=1 & s=1 & b_2428=0x1a & b_2123=0 & Rn=0x1f & opcode2=0x0 & Rm_GPR64 & Rd_GPR64 +{ + tmp:8 = Rm_GPR64 + zext(!CY); + add_with_carry_flags(0,~tmp); + Rd_GPR64 = -tmp; + resultflags(Rd_GPR64); + affectflags(); +} + +# C6.2.203 NOP page C6-1143 line 63747 MATCH xd503201f/mask=xffffffff +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503201f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503201f/mask=xffffffff --status nodest + +:nop +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=0 & Rt=0x1f +{ +} + +# C6.2.204 ORN (shifted register) page C6-1144 line 63797 MATCH x2a200000/mask=x7f200000 +# C6.2.198 MVN page C6-1133 line 63282 MATCH x2a2003e0/mask=x7f2003e0 +# CONSTRUCT x2a200000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x2a200000/mask=xff200000 --status pass + +:orn Rd_GPR32, Rn_GPR32, RegShift32Log +is sf=0 & opc=1 & b_2428=0xa & N=1 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_3:4 = RegShift32Log; + tmp_2:4 = tmp_3 ^ -1:4; + tmp_1:4 = Rn_GPR32 | tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.204 ORN (shifted register) page C6-1144 line 63797 MATCH x2a200000/mask=x7f200000 +# C6.2.198 MVN page C6-1133 line 63282 MATCH x2a2003e0/mask=x7f2003e0 +# CONSTRUCT xaa200000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xaa200000/mask=xff200000 --status pass + +:orn Rd_GPR64, Rn_GPR64, RegShift64Log +is sf=1 & opc=1 & b_2428=0xa & N=1 & RegShift64Log & Rn_GPR64 & Rd_GPR64 +{ + tmp_3:8= RegShift64Log; + tmp_2:8 = tmp_3 ^ -1:8; + tmp_1:8 = Rn_GPR64 | tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.205 ORR (immediate) page C6-1146 line 63910 MATCH x32000000/mask=x7f800000 +# C6.2.188 MOV (bitmask immediate) page C6-1115 line 62360 MATCH x320003e0/mask=x7f8003e0 +# CONSTRUCT x32000000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x32000000/mask=xff800000 --status pass + +:orr Rd_GPR32wsp, Rn_GPR32, DecodeWMask32 +is sf=0 & opc=1 & b_2428=0x12 & b_2323=0 & DecodeWMask32 & Rn_GPR32 & Rd_GPR32wsp & Rd_GPR64xsp +{ + tmp_1:4 = Rn_GPR32 | DecodeWMask32; + Rd_GPR64xsp = zext(tmp_1); +} + +# C6.2.205 ORR (immediate) page C6-1146 line 63910 MATCH x32000000/mask=x7f800000 +# C6.2.188 MOV (bitmask immediate) page C6-1115 line 62360 MATCH x320003e0/mask=x7f8003e0 +# CONSTRUCT xb2000000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb2000000/mask=xff800000 --status pass + +:orr Rd_GPR64xsp, Rn_GPR64, DecodeWMask64 +is sf=1 & opc=1 & b_2428=0x12 & b_2323=0 & DecodeWMask64 & Rn_GPR64 & Rd_GPR64xsp +{ + tmp_1:8 = Rn_GPR64 | DecodeWMask64; + Rd_GPR64xsp = tmp_1; +} + +# C6.2.206 ORR (shifted register) page C6-1148 line 64011 MATCH x2a000000/mask=x7f200000 +# C6.2.189 MOV (register) page C6-1117 line 62449 MATCH x2a0003e0/mask=x7fe0ffe0 +# CONSTRUCT x2a000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x2a000000/mask=xff200000 --status pass + +:orr Rd_GPR32, Rn_GPR32, RegShift32Log +is b_31=0 & b_2430=0b0101010 & b_21=0 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = RegShift32Log; + tmp_1:4 = Rn_GPR32 | tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.206 ORR (shifted register) page C6-1148 line 64011 MATCH x2a000000/mask=x7f200000 +# C6.2.189 MOV (register) page C6-1117 line 62449 MATCH x2a0003e0/mask=x7fe0ffe0 +# CONSTRUCT xaa000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xaa000000/mask=xff200000 --status pass + +:orr Rd_GPR64, Rn_GPR64, RegShift64Log +is b_31=1 & b_2430=0b0101010 & b_21=0 & RegShift64Log & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = RegShift64Log; + tmp_1:8 = Rn_GPR64 | tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.207 PACDA, PACDZA page C6-1150 line 64122 MATCH xdac10800/mask=xffffdc00 +# CONSTRUCT xdac10800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac10800/mask=xfffffc00 --status noqemu +# z == 0 pacda variant + +:pacda Rd_GPR64, Rn_GPR64xsp +is pacda__PACpart & b_1431=0b110110101100000100 & b_1012=0b010 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 +{ + build pacda__PACpart; +} + +# C6.2.207 PACDA, PACDZA page C6-1150 line 64122 MATCH xdac10800/mask=xffffdc00 +# CONSTRUCT xdac12be0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac12be0/mask=xffffffe0 --status noqemu +# z == 1 pacdza variant + +:pacdza Rd_GPR64 +is pacdza__PACpart & b_1431=0b110110101100000100 & b_1012=0b010 & b_13=1 & b_0509=0b11111 & Rd_GPR64 +{ + build pacdza__PACpart; +} + +# C6.2.208 PACDB, PACDZB page C6-1151 line 64193 MATCH xdac10c00/mask=xffffdc00 +# CONSTRUCT xdac10c00/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac10c00/mask=xfffffc00 --status noqemu +# z == 0 pacdb variant + +:pacdb Rd_GPR64, Rn_GPR64xsp +is pacdb__PACpart & b_1431=0b110110101100000100 & b_1012=0b011 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 +{ + build pacdb__PACpart; +} + +# C6.2.208 PACDB, PACDZB page C6-1151 line 64193 MATCH xdac10c00/mask=xffffdc00 +# CONSTRUCT xdac12fe0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac12fe0/mask=xffffffe0 --status noqemu +# z == 1 pacdzb variant + +:pacdzb Rd_GPR64 +is pacdzb__PACpart & b_1431=0b110110101100000100 & b_1012=0b011 & b_13=1 & b_0509=0b11111 & Rd_GPR64 +{ + build pacdzb__PACpart; +} + +# C6.2.209 PACGA page C6-1152 line 64264 MATCH x9ac03000/mask=xffe0fc00 +# CONSTRUCT x9ac03000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x9ac03000/mask=xffe0fc00 --status noqemu + +:pacga Rd_GPR64, Rn_GPR64, Rm_GPR64xsp +is b_2131=0b10011010110 & b_1015=0b001100 & Rm_GPR64xsp & Rn_GPR64 & Rd_GPR64 +{ + # This operation, unlike all other PAC operations, does not put its output in + # the same register as its first input. This means that putting a "noclobber" + # variant on this operation would violate the definition of PACGA. + Rd_GPR64 = pacga(Rn_GPR64, Rm_GPR64xsp); +} + +# C6.2.210 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1153 line 64322 MATCH xdac10000/mask=xffffdc00 +# CONSTRUCT xdac10000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac10000/mask=xfffffc00 --status noqemu +# Z == 0 PACIA variant + +:pacia Rd_GPR64, Rn_GPR64xsp +is pacia__PACpart & b_1431=0b110110101100000100 & b_1012=0b000 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 +{ + build pacia__PACpart; +} + +# C6.2.210 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1153 line 64322 MATCH xdac10000/mask=xffffdc00 +# CONSTRUCT xdac123e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac123e0/mask=xffffffe0 --status noqemu +# Z == 1 && Rn == 11111 PACIZA variant + +:paciza Rd_GPR64 +is paciza__PACpart & b_1431=0b110110101100000100 & b_1012=0b000 & b_13=1 & b_0509=0b11111 & Rd_GPR64 +{ + build paciza__PACpart; +} + +# C6.2.210 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1153 line 64322 MATCH xd503211f/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503211f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503211f/mask=xffffffff --status nodest +# CRm == 0001 && op2 == 000 PICIA1716 variant + +:pacia1716 +is pacia1716__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0001 & b_0507=0b000 & b_0004=0b11111 +{ + build pacia1716__PACpart; +} + +# C6.2.210 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1153 line 64322 MATCH xd503211f/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503233f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503233f/mask=xffffffff --status nodest +# CRm == 0011 && op2 == 001 PACIASP variant + +:paciasp +is paciasp__PACpart & PACIXSP_BTITARGETS & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b001 & b_0004=0b11111 +{ + build paciasp__PACpart; +} + +# C6.2.210 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1153 line 64322 MATCH xd503211f/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503231f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503231f/mask=xffffffff --status nodest +# CRm == 0011 && op2 == 000 PACIAZ variant + +:paciaz +is paciaz__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b000 & b_0004=0b11111 +{ + build paciaz__PACpart; +} + +# C6.2.211 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1156 line 64481 MATCH xdac10400/mask=xffffdc00 +# CONSTRUCT xdac10400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac10400/mask=xfffffc00 --status noqemu +# Z == 0 PACIB variant + +:pacib Rd_GPR64, Rn_GPR64xsp +is pacib__PACpart & b_1431=0b110110101100000100 & b_1012=0b001 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 +{ + build pacib__PACpart; +} + +# C6.2.211 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1156 line 64481 MATCH xdac10400/mask=xffffdc00 +# CONSTRUCT xdac127e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac127e0/mask=xffffffe0 --status noqemu +# Z == 1 && Rn = 11111 PACIZB variant + +:pacizb Rd_GPR64 +is pacizb__PACpart & b_1431=0b110110101100000100 & b_1012=0b001 & b_13=1 & b_0509=0b11111 & Rd_GPR64 +{ + build pacizb__PACpart; +} + +# C6.2.211 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1156 line 64481 MATCH xd503215f/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503215f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503215f/mask=xffffffff --status nodest +# CRm == 0001 && op2 == 010 PACIB1716 variant + +:pacib1716 +is pacib1716__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0001 & b_0507=0b010 & b_0004=0b11111 +{ + build pacib1716__PACpart; +} + +# C6.2.211 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1156 line 64481 MATCH xd503215f/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503237f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503237f/mask=xffffffff --status nodest +# CRm == 0011 && op2 == 011 PACIBSP variant + +:pacibsp +is pacibsp__PACpart & PACIXSP_BTITARGETS & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b011 & b_0004=0b11111 +{ + build pacibsp__PACpart; +} + +# C6.2.211 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1156 line 64481 MATCH xd503215f/mask=xfffffddf +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503235f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503235f/mask=xffffffff --status nodest +# CRm == 0011 && op2 == 010 PACIBZ variant + +:pacibz +is pacibz__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b010 & b_0004=0b11111 +{ + build pacibz__PACpart; +} + +# C6.2.212 PRFM (immediate) page C6-1158 line 64629 MATCH xf9800000/mask=xffc00000 +# CONSTRUCT xf9800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf9800000/mask=xffc00000 --status nomem + +:prfm aa_prefetch, addrIndexed +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=1 & b_2223=2 & addrIndexed & b_0304 & b_0102 & b_00 & aa_prefetch +{ + addr:8 = addrIndexed; + hint:1 = b_0304; + target:1 = b_0102; + stream:1 = b_00; + Hint_Prefetch(addr, hint, target, stream); +} + +# C6.2.213 PRFM (literal) page C6-1160 line 64723 MATCH xd8000000/mask=xff000000 +# CONSTRUCT xd8000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd8000000/mask=xff000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" + +:prfm aa_prefetch, Addr19 +is size.ldstr=3 & b_2729=3 & v=0 & b_2425=0 & Addr19 & b_0304 & b_0102 & b_00 & aa_prefetch +{ + addr:8 = &Addr19; + hint:1 = b_0304; + target:1 = b_0102; + stream:1 = b_00; + Hint_Prefetch(addr, hint, target, stream); +} + +# C6.2.214 PRFM (register) page C6-1162 line 64806 MATCH xf8a00800/mask=xffe00c00 +# CONSTRUCT xf8a00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8a00800/mask=xffe00c00 --status nomem + +:prfm aa_prefetch, addrIndexed +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=1 & addrIndexed & b_1011=2 & b_0304 & b_0102 & b_00 & aa_prefetch +{ + addr:8 = addrIndexed; + hint:1 = b_0304; + target:1 = b_0102; + stream:1 = b_00; + Hint_Prefetch(addr, hint, target, stream); +} + +# C6.2.215 PRFUM page C6-1164 line 64920 MATCH xf8800000/mask=xffe00c00 +# CONSTRUCT xf8800000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8800000/mask=xfffffc00 --status nomem + +:prfum aa_prefetch, addr_SIMM9 +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & addr_SIMM9 & simm9=0 & b_1011=0 & b_0304 & b_0102 & b_00 & aa_prefetch +{ + addr:8 = addr_SIMM9; + hint:1 = b_0304; + target:1 = b_0102; + stream:1 = b_00; + Hint_Prefetch(addr, hint, target, stream); +} + +# C6.2.215 PRFUM page C6-1164 line 64920 MATCH xf8800000/mask=xffe00c00 +# CONSTRUCT xf8800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8800000/mask=xffe00c00 --status nomem + +:prfum aa_prefetch, addr_SIMM9 +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & addr_SIMM9 & b_1011=0 & b_0304 & b_0102 & b_00 & aa_prefetch +{ + addr:8 = addr_SIMM9; + hint:1 = b_0304; + target:1 = b_0102; + stream:1 = b_00; + Hint_Prefetch(addr, hint, target, stream); +} + +# C6.2.218 RBIT page C6-1168 line 65101 MATCH x5ac00000/mask=x7ffffc00 +# CONSTRUCT x5ac00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x5ac00000/mask=xfffffc00 --status pass + +:rbit Rd_GPR32, Rn_GPR32 +is sf=0 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + # The algorithm swaps 1, 2, 4, 8 bits, ect + local tmp:4 = Rn_GPR32; + tmp = (((tmp & 0xaaaaaaaa) >> 1) | ((tmp & 0x55555555) << 1)); + tmp = (((tmp & 0xcccccccc) >> 2) | ((tmp & 0x33333333) << 2)); + tmp = (((tmp & 0xf0f0f0f0) >> 4) | ((tmp & 0x0f0f0f0f) << 4)); + tmp = (((tmp & 0xff00ff00) >> 8) | ((tmp & 0x00ff00ff) << 8)); + tmp = ((tmp >> 16) | (tmp << 16)); + Rd_GPR64 = zext(tmp); +} + +# C6.2.218 RBIT page C6-1168 line 65101 MATCH x5ac00000/mask=x7ffffc00 +# CONSTRUCT xdac00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac00000/mask=xfffffc00 --status pass + +:rbit Rd_GPR64, Rn_GPR64 +is sf=1 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x0 & Rn_GPR64 & Rd_GPR64 +{ + # The algorithm swaps 1, 2, 4, 8 bits, ect + local tmp:8 = Rn_GPR64; + tmp = (((tmp & 0xaaaaaaaaaaaaaaaa) >> 1) | ((tmp & 0x5555555555555555) << 1)); + tmp = (((tmp & 0xcccccccccccccccc) >> 2) | ((tmp & 0x3333333333333333) << 2)); + tmp = (((tmp & 0xf0f0f0f0f0f0f0f0) >> 4) | ((tmp & 0x0f0f0f0f0f0f0f0f) << 4)); + tmp = (((tmp & 0xff00ff00ff00ff00) >> 8) | ((tmp & 0x00ff00ff00ff00ff) << 8)); + tmp = (((tmp & 0xffff0000ffff0000) >> 16) | ((tmp & 0x0000ffff0000ffff) << 16)); + Rd_GPR64 = ((tmp >> 32) | (tmp << 32)); +} + +# C6.2.219 RET page C6-1169 line 65173 MATCH xd65f0000/mask=xfffffc1f +# CONSTRUCT xd65f0000/mask=xfffffc1f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd65f0000/mask=xfffffc1f --status nodest + +:ret Rn_GPR64 +is b_2531=0x6b & b_2324=0 & b_2122=2 & b_1620=0x1f & b_1015=0 & Rn_GPR64 & b_0004=0 +{ + pc = Rn_GPR64; + return [pc]; +} + +# C6.2.219 RET page C6-1169 line 65173 MATCH xd65f0000/mask=xfffffc1f +# CONSTRUCT xd65f03c0/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd65f03c0/mask=xffffffff --status nodest + +:ret +is b_2531=0x6b & b_2324=0 & b_2122=2 & b_1620=0x1f & b_1015=0 & aa_Xn=30 & b_0004=0 +{ + pc = x30; + return [pc]; +} + +# C6.2.220 RETAA, RETAB page C6-1170 line 65226 MATCH xd65f0bff/mask=xfffffbff +# CONSTRUCT xd65f0bff/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd65f0bff/mask=xffffffff --status nodest +# M == 0 RETAA variant + +:retaa +is retaa__PACpart & b_1131=0b110101100101111100001 & b_0009=0b1111111111 & b_10=0 +{ + build retaa__PACpart; + pc = x30; + return [pc]; +} + +# C6.2.220 RETAA, RETAB page C6-1170 line 65226 MATCH xd65f0bff/mask=xfffffbff +# CONSTRUCT xd65f0fff/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd65f0fff/mask=xffffffff --status nodest +# M == 1 RETAB variant + +:retab +is retab__PACpart & b_1131=0b110101100101111100001 & b_0009=0b1111111111 & b_10=1 +{ + build retab__PACpart; + pc = x30; + return [pc]; +} + +# C6.2.221 REV page C6-1171 line 65289 MATCH x5ac00800/mask=x7ffff800 +# CONSTRUCT x5ac00800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x5ac00800/mask=xfffffc00 --status pass +# sf == 0 && opc == 10 32-bit variant (3210 -> 0123) + +:rev Rd_GPR32, Rn_GPR32 +is b_1230=0b1011010110000000000 & b_31=0 & b_1011=0b10 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp:4 = Rn_GPR32; + tmp = (((tmp & 0xff00ff00) >> 8) | ((tmp & 0x00ff00ff) << 8)); + tmp = ((tmp >> 16) | (tmp << 16)); + Rd_GPR64 = zext(tmp); +} + +# C6.2.221 REV page C6-1171 line 65289 MATCH x5ac00800/mask=x7ffff800 +# C6.2.224 REV64 page C6-1177 line 65585 MATCH xdac00c00/mask=xfffffc00 +# CONSTRUCT xdac00c00/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xdac00c00/mask=xfffffc00 --status pass +# sf == 1 && opc == 11 64-bit variant (76543210 -> 01234567) +# NB equivalent to REV64, which is never the preferred disassembly + +:rev Rd_GPR64, Rn_GPR64 +is b_1230=0b1011010110000000000 & b_31=1 & b_1011=0b11 & Rn_GPR64 & Rd_GPR64 +{ + local tmp:8 = Rn_GPR64; + tmp = (((tmp & 0xff00ff00ff00ff00) >> 8) | ((tmp & 0x00ff00ff00ff00ff) << 8)); + tmp = (((tmp & 0xffff0000ffff0000) >> 16) | ((tmp & 0x0000ffff0000ffff) << 16)); + Rd_GPR64 = ((tmp >> 32) | (tmp << 32)); +} + +# C6.2.222 REV16 page C6-1173 line 65394 MATCH x5ac00400/mask=x7ffffc00 +# CONSTRUCT x5ac00400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x5ac00400/mask=xfffffc00 --status pass +# sf == 0 (and opc == 01) 32-bit variant (3210 -> 2301) + +:rev16 Rd_GPR32, Rn_GPR32 +is b_1230=0b1011010110000000000 & b_31=0 & b_1011=0b01 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp:4 = Rn_GPR32; + tmp = (((tmp & 0xff00ff00) >> 8) | ((tmp & 0x00ff00ff) << 8)); + Rd_GPR64 = zext(tmp); +} + +# C6.2.222 REV16 page C6-1173 line 65394 MATCH x5ac00400/mask=x7ffffc00 +# CONSTRUCT xdac00400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac00400/mask=xfffffc00 --status pass +# sf == 1 (and opc=01) 64-bit variant (76543210 -> 67452301) + +:rev16 Rd_GPR64, Rn_GPR64 +is b_1230=0b1011010110000000000 & b_31=1 & b_1011=0b01 & Rn_GPR64 & Rd_GPR64 +{ + local tmp:8 = Rn_GPR64; + Rd_GPR64 = (((tmp & 0xff00ff00ff00ff00) >> 8) | ((tmp & 0x00ff00ff00ff00ff) << 8)); +} + +# C6.2.223 REV32 page C6-1175 line 65496 MATCH xdac00800/mask=xfffffc00 +# C6.2.221 REV page C6-1171 line 65289 MATCH x5ac00800/mask=x7ffff800 +# CONSTRUCT xdac00800/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xdac00800/mask=xfffffc00 --status pass +# sf == 1 (and opc == 10) 64-bit variant (76543210 -> 45670123) + +:rev32 Rd_GPR64, Rn_GPR64 +is b_1230=0b1011010110000000000 & b_31=1 & b_1011=0b10 & Rn_GPR64 & Rd_GPR64 +{ + local tmp:8 = Rn_GPR64; + tmp = (((tmp & 0xff00ff00ff00ff00) >> 8) | ((tmp & 0x00ff00ff00ff00ff) << 8)); + Rd_GPR64 = (((tmp & 0xffff0000ffff0000) >> 16) | ((tmp & 0x0000ffff0000ffff) << 16)); +} + +# C6.2.225 RMIF page C6-1178 line 65649 MATCH xba000400/mask=xffe07c10 +# CONSTRUCT xba000400/mask=xffe07c10 MATCHED 1 DOCUMENTED OPCODES + +:rmif Rn_GPR64, UImm6, NZCVImm_uimm4 +is b_2131=0b10111010000 & b_1014=0b00001 & b_04=0b0 & Rn_GPR64 & UImm6 & NZCVImm_uimm4 +{ + tmp:8 = Rn_GPR64 >> UImm6; + condMask:1 = NZCVImm_uimm4; + set_NZCV(tmp,condMask); +} + +# C6.2.226 ROR (immediate) page C6-1179 line 65715 MATCH x13800000/mask=x7fa00000 +# C6.2.90 EXTR page C6-923 line 51323 MATCH x13800000/mask=x7fa00000 +# CONSTRUCT x13800000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x13800000/mask=xffe00000 --status pass + +:ror Rd_GPR32, Rn_GPR32, LSB_bitfield32_imm +is sf=0 & b_2930=0 & b_2428=0x13 & b_2323=1 & n=0 & b_21=0 & Rn=Rm & Rm_GPR32 & LSB_bitfield32_imm & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + result:4 = (Rn_GPR32 >> LSB_bitfield32_imm) | (Rn_GPR32 << (32 - LSB_bitfield32_imm)); + Rd_GPR64 = zext(result); +} + +# C6.2.226 ROR (immediate) page C6-1179 line 65715 MATCH x13800000/mask=x7fa00000 +# C6.2.90 EXTR page C6-923 line 51323 MATCH x13800000/mask=x7fa00000 +# CONSTRUCT x93c00000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x93c00000/mask=xffe00000 --status pass + +:ror Rd_GPR64, Rn_GPR64, LSB_bitfield64_imm +is sf=1 & b_2930=0 & b_2428=0x13 & b_2323=1 & n=1 & b_21=0 & Rn=Rm & Rm_GPR64 & LSB_bitfield64_imm & Rn_GPR64 & Rd_GPR64 +{ + result:8 = (Rn_GPR64 >> LSB_bitfield64_imm) | (Rn_GPR64 << (64 - LSB_bitfield64_imm)); + Rd_GPR64 = result; +} + +# C6.2.227 ROR (register) page C6-1181 line 65808 MATCH x1ac02c00/mask=x7fe0fc00 +# C6.2.228 RORV page C6-1183 line 65903 MATCH x1ac02c00/mask=x7fe0fc00 +# CONSTRUCT x1ac02c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x1ac02c00/mask=xffe0fc00 --status pass + +:ror Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0xb & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + rval:4 = Rm_GPR32 & 0x1f; + tmp_1:4 = ( Rn_GPR32 >> rval) | ( Rn_GPR32 << ( 32 - rval ) ); + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.227 ROR (register) page C6-1181 line 65808 MATCH x1ac02c00/mask=x7fe0fc00 +# C6.2.228 RORV page C6-1183 line 65903 MATCH x1ac02c00/mask=x7fe0fc00 +# CONSTRUCT x9ac02c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9ac02c00/mask=xffe0fc00 --status pass + +:ror Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0xb & Rn_GPR64 & Rd_GPR64 +{ + rval:8 = Rm_GPR64 & 0x3f; + tmp_1:8 = ( Rn_GPR64 >> rval ) | ( Rn_GPR64 << ( 64 - rval ) ); + Rd_GPR64 = tmp_1; +} + +# C6.2.229 SB page C6-1185 line 65994 MATCH xd50330ff/mask=xfffff0ff +# CONSTRUCT xd50330ff/mask=xfffff0ff MATCHED 1 DOCUMENTED OPCODES + +:sb +is b_1231=0xd5033 & b_0007=0xff +{ + SpeculationBarrier(); +} + +# C6.2.230 SBC page C6-1186 line 66053 MATCH x5a000000/mask=x7fe0fc00 +# C6.2.201 NGC page C6-1139 line 63573 MATCH x5a0003e0/mask=x7fe0ffe0 +# CONSTRUCT x5a000000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x5a000000/mask=xffe0fc00 --status pass --comment "flags" + +:sbc Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=0 & Rm_GPR32 & opcode2=0x0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp:4 = Rm_GPR32 + zext(!CY); + Rd_GPR64 = zext(Rn_GPR32 - tmp); +} + +# C6.2.230 SBC page C6-1186 line 66053 MATCH x5a000000/mask=x7fe0fc00 +# C6.2.201 NGC page C6-1139 line 63573 MATCH x5a0003e0/mask=x7fe0ffe0 +# CONSTRUCT xda000000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xda000000/mask=xffe0fc00 --status pass --comment "flags" + +:sbc Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=0 & Rm_GPR64 & opcode2=0x0 & Rn_GPR64 & Rd_GPR64 +{ + tmp:8 = Rm_GPR64 + zext(!CY); + Rd_GPR64 = Rn_GPR64 - tmp; +} + +# C6.2.231 SBCS page C6-1188 line 66152 MATCH x7a000000/mask=x7fe0fc00 +# C6.2.202 NGCS page C6-1141 line 63660 MATCH x7a0003e0/mask=x7fe0ffe0 +# CONSTRUCT x7a000000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x7a000000/mask=xffe0fc00 --status pass --comment "flags" + +:sbcs Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & op=1 & s=1 & b_2428=0x1a & b_2123=0 & Rm_GPR32 & opcode2=0x0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp:4 = Rm_GPR32 + zext(!CY); + add_with_carry_flags(Rn_GPR32, ~Rm_GPR32); + Rd_GPR64 = zext(Rn_GPR32 - tmp); + resultflags(Rd_GPR32); + affectflags(); +} + +# C6.2.231 SBCS page C6-1188 line 66152 MATCH x7a000000/mask=x7fe0fc00 +# C6.2.202 NGCS page C6-1141 line 63660 MATCH x7a0003e0/mask=x7fe0ffe0 +# CONSTRUCT xfa000000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xfa000000/mask=xffe0fc00 --status pass --comment "flags" + +:sbcs Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & op=1 & s=1 & b_2428=0x1a & b_2123=0 & Rm_GPR64 & opcode2=0x0 & Rn_GPR64 & Rd_GPR64 +{ + tmp:8 = Rm_GPR64 + zext(!CY); + add_with_carry_flags(Rn_GPR64, ~Rm_GPR64); + Rd_GPR64 = Rn_GPR64 - tmp; + resultflags(Rd_GPR64); + affectflags(); +} + +# C6.2.209 SBFIZ page C6-856 line 49751 KEEPWITH + +sbfiz_lsb: "#"^imm is ImmR [ imm = 32 - ImmR; ] { export *[const]:4 imm; } +sbfiz_width: "#"^imm is ImmS [ imm = ImmS + 1; ] { export *[const]:4 imm; } +sbfiz_lsb64: "#"^imm is ImmR [ imm = 64 - ImmR; ] { export *[const]:4 imm; } + +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# C6.2.321 SXTB page C6-1354 line 75227 MATCH x13001c00/mask=x7fbffc00 +# C6.2.322 SXTH page C6-1356 line 75314 MATCH x13003c00/mask=x7fbffc00 +# CONSTRUCT x13000002/mask=xffe08006 MATCHED 6 DOCUMENTED OPCODES +# AUNIT --inst x13000002/mask=xffe08006 --status pass +# Special alias case of sbfm for when ImmS < ImmR-1 +# if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); + +:sbfiz Rd_GPR32, Rn_GPR32, sbfiz_lsb, sbfiz_width +is sbfiz_lsb & sbfiz_width & ImmS_LT_ImmR=1 & ImmS_EQ_ImmR=0 & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & ImmSConst32 & DecodeWMask32 & DecodeTMask32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local wmask:4 = DecodeWMask32; + local tmask:4 = DecodeTMask32; + local src:4 = Rn_GPR32; + local bot:4 = ((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask; + local top:4 = (((src>>ImmSConst32)&0x1)*(-1))&0xffffffff; + Rd_GPR64 = zext((top & ~(tmask)) | (bot & tmask)); +} + +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# C6.2.321 SXTB page C6-1354 line 75227 MATCH x13001c00/mask=x7fbffc00 +# C6.2.322 SXTH page C6-1356 line 75314 MATCH x13003c00/mask=x7fbffc00 +# C6.2.323 SXTW page C6-1358 line 75401 MATCH x93407c00/mask=xfffffc00 +# CONSTRUCT x93400002/mask=xffc00006 MATCHED 7 DOCUMENTED OPCODES +# AUNIT --inst x93400002/mask=xffc00006 --status pass +# Special alias case of sbfm for when ImmS < ImmR-1 + +:sbfiz Rd_GPR64, Rn_GPR64, sbfiz_lsb64, sbfiz_width +is sbfiz_lsb64 & sbfiz_width & ImmS_LT_ImmR=1 & ImmS_EQ_ImmR=0 & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & ImmSConst64 & DecodeWMask64 & DecodeTMask64 & Rn_GPR64 & Rd_GPR64 +{ + local wmask:8 = DecodeWMask64; + local tmask:8 = DecodeTMask64; + local src:8 = Rn_GPR64; + local bot:8 = ((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask; + local top:8 = ((src>>ImmSConst64)&0x1)*(-1); + Rd_GPR64 = (top & ~(tmask)) | (bot & tmask); +} + +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# C6.2.321 SXTB page C6-1354 line 75227 MATCH x13001c00/mask=x7fbffc00 +# C6.2.322 SXTH page C6-1356 line 75314 MATCH x13003c00/mask=x7fbffc00 +# CONSTRUCT x13000000/mask=xffe08000 MATCHED 6 DOCUMENTED OPCODES +# AUNIT --inst x13000000/mask=xffe08000 --status pass +# if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); + +:sbfm Rd_GPR32, Rn_GPR32, ImmRConst32, ImmSConst32 +is sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & ImmSConst32 & DecodeWMask32 & DecodeTMask32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local wmask:4 = DecodeWMask32; + local tmask:4 = DecodeTMask32; + local src:4 = Rn_GPR32; + local bot:4 = ((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask; + local top:4 = (((src>>ImmSConst32)&0x1)*(-1))&0xffffffff; + Rd_GPR64 = zext((top & ~(tmask)) | (bot & tmask)); +} + +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# C6.2.321 SXTB page C6-1354 line 75227 MATCH x13001c00/mask=x7fbffc00 +# C6.2.322 SXTH page C6-1356 line 75314 MATCH x13003c00/mask=x7fbffc00 +# C6.2.323 SXTW page C6-1358 line 75401 MATCH x93407c00/mask=xfffffc00 +# CONSTRUCT x93400000/mask=xffc00000 MATCHED 7 DOCUMENTED OPCODES +# AUNIT --inst x93400000/mask=xffc00000 --status pass + +:sbfm Rd_GPR64, Rn_GPR64, ImmRConst64, ImmSConst64 +is sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & ImmSConst64 & DecodeWMask64 & DecodeTMask64 & Rn_GPR64 & Rd_GPR64 +{ + local wmask:8 = DecodeWMask64; + local tmask:8 = DecodeTMask64; + local src:8 = Rn_GPR64; + local bot:8 = ((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask; + local top:8 = ((src>>ImmSConst64)&0x1)*(-1); + Rd_GPR64 = (top & ~(tmask)) | (bot & tmask); +} + +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.321 SXTB page C6-1354 line 75227 MATCH x13001c00/mask=x7fbffc00 +# C6.2.322 SXTH page C6-1356 line 75314 MATCH x13003c00/mask=x7fbffc00 +# CONSTRUCT x13000004/mask=xffe08006 MATCHED 6 DOCUMENTED OPCODES +# AUNIT --inst x13000004/mask=xffe08006 --status pass +# Special cases when just getting the 0 bit +# >> Not sure about the above old comment one, this is actually for getting one bit from Rn +# SBFX alias of SMFM is used when ImmS >= ImmR +# We split the '>=' into two separate cases +# Here ImmS = ImmR (for 32-bit) +# Alias for sbfm as determined by BFXPreferred() +# if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); + +:sbfx Rd_GPR32, Rn_GPR32, ImmRConst32, BFextractWidth32 +is ImmS_LT_ImmR=0 & ImmS_EQ_ImmR=1 & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & BFextractWidth32 & ImmSConst32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp:4 = ((Rn_GPR32 >> ImmSConst32) & 0x1) * 0xffffffff; + Rd_GPR64 = zext(tmp); +} + +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.321 SXTB page C6-1354 line 75227 MATCH x13001c00/mask=x7fbffc00 +# C6.2.322 SXTH page C6-1356 line 75314 MATCH x13003c00/mask=x7fbffc00 +# C6.2.323 SXTW page C6-1358 line 75401 MATCH x93407c00/mask=xfffffc00 +# CONSTRUCT x93400004/mask=xffc00006 MATCHED 7 DOCUMENTED OPCODES +# AUNIT --inst x93400004/mask=xffc00006 --status pass +# Now, the case where ImmS = ImmR (for 64-bit) + +:sbfx Rd_GPR64, Rn_GPR64, ImmRConst64, BFextractWidth64 +is ImmS_LT_ImmR=0 & ImmS_EQ_ImmR=1 & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & BFextractWidth64 & ImmRConst64 & ImmSConst64 & Rn_GPR64 & Rd_GPR64 +{ + tmp:8 = ((Rn_GPR64 >> ImmSConst64) & 0x1) * 0xffffffffffffffff; + Rd_GPR64 = tmp; +} + +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.321 SXTB page C6-1354 line 75227 MATCH x13001c00/mask=x7fbffc00 +# C6.2.322 SXTH page C6-1356 line 75314 MATCH x13003c00/mask=x7fbffc00 +# CONSTRUCT x13000000/mask=xffe08006 MATCHED 6 DOCUMENTED OPCODES +# AUNIT --inst x13000000/mask=xffe08006 --status pass +# Now, the case where ImmS > ImmR (for 32-bit) +# if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); + +:sbfx Rd_GPR32, Rn_GPR32, ImmRConst32, BFextractWidth32 +is ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & BFextractWidth32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + src:4 = Rn_GPR32; + tmp:4 = src << (31 - (ImmRConst32 + BFextractWidth32 - 1)); + tmp = tmp s>> (32 - BFextractWidth32); + Rd_GPR64 = zext(tmp); +} + +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.321 SXTB page C6-1354 line 75227 MATCH x13001c00/mask=x7fbffc00 +# C6.2.322 SXTH page C6-1356 line 75314 MATCH x13003c00/mask=x7fbffc00 +# C6.2.323 SXTW page C6-1358 line 75401 MATCH x93407c00/mask=xfffffc00 +# CONSTRUCT x93400000/mask=xffc00000 MATCHED 7 DOCUMENTED OPCODES +# AUNIT --inst x93400000/mask=xffc00000 --status pass +# Finally, the case where ImmS > ImmR (for 64-bit) + +:sbfx Rd_GPR64, Rn_GPR64, ImmRConst64, BFextractWidth64 +is sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & BFextractWidth64 & (ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0) & Rn_GPR64 & Rd_GPR64 +{ + src:8 = Rn_GPR64; + tmp:8 = src << (63 - (ImmRConst64 + BFextractWidth64 - 1)); + tmp = tmp s>> (64 - BFextractWidth64); + Rd_GPR64 = tmp; +} + +# C6.2.235 SDIV page C6-1196 line 66577 MATCH x1ac00c00/mask=x7fe0fc00 +# CONSTRUCT x1ac00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x1ac00c00/mask=xffe0fc00 --status pass + +:sdiv Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0x3 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp_1:4 = 0; + if (Rm_GPR32 == 0) goto ; + tmp_1 = Rn_GPR32 s/ Rm_GPR32; + + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.235 SDIV page C6-1196 line 66577 MATCH x1ac00c00/mask=x7fe0fc00 +# CONSTRUCT x9ac00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x9ac00c00/mask=xffe0fc00 --status pass + +:sdiv Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0x3 & Rn_GPR64 & Rd_GPR64 +{ + local tmp_1:8 = 0; + if (Rm_GPR64 == 0) goto ; + tmp_1 = Rn_GPR64 s/ Rm_GPR64; + + Rd_GPR64 = tmp_1; +} + +# C6.2.236 SETF8, SETF16 page C6-1197 line 66645 MATCH x3a00080d/mask=xffffbc1f +# CONSTRUCT x3a00080d/mask=xfffffc1f MATCHED 1 DOCUMENTED OPCODES + +:setf8 aa_Wn +is b_1531=0b00111010000000000 & b_14=0 & b_1013=0b0010 & b_0004=0b01101 & aa_Wn +{ + NG = ((aa_Wn:1 >> 7) & 1) == 1; + ZR = (aa_Wn:1 == 0); + OV = (((aa_Wn >> 7) & 1) ^ ((aa_Wn >>8) & 1)) == 1; +} + + +# C6.2.236 SETF8, SETF16 page C6-1197 line 66645 MATCH x3a00080d/mask=xffffbc1f +# CONSTRUCT x3a00480d/mask=xfffffc1f MATCHED 1 DOCUMENTED OPCODES + +:setf16 aa_Wn +is b_1531=0b00111010000000000 & b_14=1 & b_1013=0b0010 & b_0004=0b01101 & aa_Wn +{ + NG = ((aa_Wn:2 >> 15) & 1) == 1; + ZR = (aa_Wn:2 == 0); + OV = (((aa_Wn >> 15) & 1) ^ ((aa_Wn >>16) & 1)) == 1; +} + +# C6.2.237 SEV page C6-1198 line 66712 MATCH xd503209f/mask=xffffffff +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503209f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503209f/mask=xffffffff --status nodest + +:sev +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=4 & Rt=0x1f +{ + SendEvent(); +} + +# C6.2.238 SEVL page C6-1199 line 66746 MATCH xd50320bf/mask=xffffffff +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd50320bf/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50320bf/mask=xffffffff --status nodest + +:sevl +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=5 & Rt=0x1f +{ + SendEventLocally(); +} + +# C6.2.239 SMADDL page C6-1200 line 66780 MATCH x9b200000/mask=xffe08000 +# CONSTRUCT x9b200000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x9b200000/mask=xffe08000 --status pass + +:smaddl Rd_GPR64, Rn_GPR32, Rm_GPR32, Ra_GPR64 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=1 & Rm_GPR32 & op.dp3_o0=0 & Ra_GPR64 & Rn_GPR32 & Rd_GPR64 +{ + tmp_3:8 = sext(Rn_GPR32); + tmp_4:8 = sext(Rm_GPR32); + tmp_2:8 = tmp_3 * tmp_4; + tmp_1:8 = Ra_GPR64 + tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.240 SMC page C6-1202 line 66869 MATCH xd4000003/mask=xffe0001f +# CONSTRUCT xd4000003/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd4000003/mask=xffe0001f --status nodest + +:smc imm16 +is b_2431=0xd4 & excCode=0 & imm16 & excCode2=0 & ll=3 +{ + CallSecureMonitor(imm16:2); +} + +# C6.2.241 SMNEGL page C6-1203 line 66918 MATCH x9b20fc00/mask=xffe0fc00 +# C6.2.242 SMSUBL page C6-1204 line 66982 MATCH x9b208000/mask=xffe08000 +# CONSTRUCT x9b20fc00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9b20fc00/mask=xffe0fc00 --status pass + +:smnegl Rd_GPR64, Rn_GPR32, Rm_GPR32 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=1 & Rm_GPR32 & op.dp3_o0=1 & Ra=0x1f & Rn_GPR32 & Rd_GPR64 +{ + tmp_3:8 = sext(Rn_GPR32); + tmp_4:8 = sext(Rm_GPR32); + tmp_2:8 = tmp_3 * tmp_4; + subflags0(tmp_2); + tmp_1:8 = -tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.242 SMSUBL page C6-1204 line 66982 MATCH x9b208000/mask=xffe08000 +# CONSTRUCT x9b208000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x9b208000/mask=xffe08000 --status pass + +:smsubl Rd_GPR64, Rn_GPR32, Rm_GPR32, Ra_GPR64 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=1 & Rm_GPR32 & op.dp3_o0=1 & Ra_GPR64 & Rn_GPR32 & Rd_GPR64 +{ + tmp_3:8 = sext(Rn_GPR32); + tmp_4:8 = sext(Rm_GPR32); + tmp_2:8 = tmp_3 * tmp_4; + tmp_1:8 = Ra_GPR64 - tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.243 SMULH page C6-1206 line 67070 MATCH x9b400000/mask=xffe08000 +# CONSTRUCT x9b400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x9b400000/mask=xffe08000 --status pass +# To enforce SHOULD BE ONE fields add: b_1014=0b11111 + +:smulh Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & op.dp3=0 & b_2428=0x1b & op.dp3_op31=2 & Rm_GPR64 & op.dp3_o0=0 & Ra & Rn_GPR64 & Rd_GPR64 +{ + local tmpq:16 = sext(Rn_GPR64) * sext(Rm_GPR64); + Rd_GPR64 = tmpq(8); +} + +# C6.2.244 SMULL page C6-1207 line 67135 MATCH x9b207c00/mask=xffe0fc00 +# C6.2.239 SMADDL page C6-1200 line 66780 MATCH x9b200000/mask=xffe08000 +# CONSTRUCT x9b207c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9b207c00/mask=xffe0fc00 --status pass + +:smull Rd_GPR64, Rn_GPR32, Rm_GPR32 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=1 & Rm_GPR32 & op.dp3_o0=0 & Ra=0x1f & Rn_GPR32 & Rd_GPR64 +{ + tmp_3:8 = sext(Rn_GPR32); + tmp_4:8 = sext(Rm_GPR32); + tmp_2:8 = tmp_3 * tmp_4; + tmp_1:8 = tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.247 STADDB, STADDLB page C6-1211 line 67378 MATCH x3820001f/mask=xffa0fc1f +# C6.2.250 STCLRB, STCLRLB page C6-1217 line 67668 MATCH x3820101f/mask=xffa0fc1f +# C6.2.253 STEORB, STEORLB page C6-1223 line 67957 MATCH x3820201f/mask=xffa0fc1f +# C6.2.280 STSETB, STSETLB page C6-1276 line 70956 MATCH x3820301f/mask=xffa0fc1f +# C6.2.283 STSMAXB, STSMAXLB page C6-1282 line 71245 MATCH x3820401f/mask=xffa0fc1f +# C6.2.286 STSMINB, STSMINLB page C6-1288 line 71541 MATCH x3820501f/mask=xffa0fc1f +# C6.2.292 STUMAXB, STUMAXLB page C6-1300 line 72144 MATCH x3820601f/mask=xffa0fc1f +# C6.2.295 STUMINB, STUMINLB page C6-1306 line 72441 MATCH x3820701f/mask=xffa0fc1f +# C6.2.98 LDADDB, LDADDAB, LDADDALB, LDADDLB page C6-934 line 51959 MATCH x38200000/mask=xff20fc00 +# C6.2.117 LDCLRB, LDCLRAB, LDCLRALB, LDCLRLB page C6-969 line 53884 MATCH x38201000/mask=xff20fc00 +# C6.2.120 LDEORB, LDEORAB, LDEORALB, LDEORLB page C6-976 line 54306 MATCH x38202000/mask=xff20fc00 +# C6.2.146 LDSETB, LDSETAB, LDSETALB, LDSETLB page C6-1032 line 57673 MATCH x38203000/mask=xff20fc00 +# C6.2.149 LDSMAXB, LDSMAXAB, LDSMAXALB, LDSMAXLB page C6-1039 line 58095 MATCH x38204000/mask=xff20fc00 +# C6.2.152 LDSMINB, LDSMINAB, LDSMINALB, LDSMINLB page C6-1046 line 58517 MATCH x38205000/mask=xff20fc00 +# C6.2.161 LDUMAXB, LDUMAXAB, LDUMAXALB, LDUMAXLB page C6-1065 line 59617 MATCH x38206000/mask=xff20fc00 +# C6.2.164 LDUMINB, LDUMINAB, LDUMINALB, LDUMINLB page C6-1072 line 60039 MATCH x38207000/mask=xff20fc00 +# CONSTRUCT x3820001f/mask=xffa08c1f MATCHED 16 DOCUMENTED OPCODES +# AUNIT --inst x3820001f/mask=xffa08c1f --status nomem + +# size=0b00 (3031) + +:st^ls_opc1^ls_lor^"b" aa_Ws, [Rn_GPR64xsp] +is b_3031=0b00 & b_2429=0b111000 & b_23=0 & b_21=1 & b_1515=0 & b_1011=0b00 & b_0004=0b11111 & ls_opc1 & ls_lor & aa_Ws & Rn_GPR64xsp +{ build ls_opc1; build ls_lor; } + +# C6.2.248 STADDH, STADDLH page C6-1213 line 67465 MATCH x7820001f/mask=xffa0fc1f +# C6.2.251 STCLRH, STCLRLH page C6-1219 line 67755 MATCH x7820101f/mask=xffa0fc1f +# C6.2.254 STEORH, STEORLH page C6-1225 line 68044 MATCH x7820201f/mask=xffa0fc1f +# C6.2.281 STSETH, STSETLH page C6-1278 line 71043 MATCH x7820301f/mask=xffa0fc1f +# C6.2.284 STSMAXH, STSMAXLH page C6-1284 line 71335 MATCH x7820401f/mask=xffa0fc1f +# C6.2.287 STSMINH, STSMINLH page C6-1290 line 71631 MATCH x7820501f/mask=xffa0fc1f +# C6.2.293 STUMAXH, STUMAXLH page C6-1302 line 72234 MATCH x7820601f/mask=xffa0fc1f +# C6.2.296 STUMINH, STUMINLH page C6-1308 line 72531 MATCH x7820701f/mask=xffa0fc1f +# C6.2.99 LDADDH, LDADDAH, LDADDALH, LDADDLH page C6-936 line 52084 MATCH x78200000/mask=xff20fc00 +# C6.2.118 LDCLRH, LDCLRAH, LDCLRALH, LDCLRLH page C6-971 line 54010 MATCH x78201000/mask=xff20fc00 +# C6.2.121 LDEORH, LDEORAH, LDEORALH, LDEORLH page C6-978 line 54432 MATCH x78202000/mask=xff20fc00 +# C6.2.147 LDSETH, LDSETAH, LDSETALH, LDSETLH page C6-1034 line 57799 MATCH x78203000/mask=xff20fc00 +# C6.2.150 LDSMAXH, LDSMAXAH, LDSMAXALH, LDSMAXLH page C6-1041 line 58221 MATCH x78204000/mask=xff20fc00 +# C6.2.153 LDSMINH, LDSMINAH, LDSMINALH, LDSMINLH page C6-1048 line 58643 MATCH x78205000/mask=xff20fc00 +# C6.2.162 LDUMAXH, LDUMAXAH, LDUMAXALH, LDUMAXLH page C6-1067 line 59743 MATCH x78206000/mask=xff20fc00 +# C6.2.165 LDUMINH, LDUMINAH, LDUMINALH, LDUMINLH page C6-1074 line 60165 MATCH x78207000/mask=xff20fc00 +# CONSTRUCT x7820001f/mask=xffa08c1f MATCHED 16 DOCUMENTED OPCODES +# AUNIT --inst x7820001f/mask=xffa08c1f --status nomem + +# size=0b01 (3031) + +:st^ls_opc2^ls_lor^"h" aa_Ws, [Rn_GPR64xsp] +is b_3031=0b01 & b_2429=0b111000 & b_23=0 & b_21=1 & b_1515=0 & b_1011=0b00 & b_0004=0b11111 & ls_opc2 & ls_lor & aa_Ws & Rn_GPR64xsp +{ build ls_opc2; build ls_lor; } + +# C6.2.249 STADD, STADDL page C6-1215 line 67552 MATCH xb820001f/mask=xbfa0fc1f +# C6.2.252 STCLR, STCLRL page C6-1221 line 67842 MATCH xb820101f/mask=xbfa0fc1f +# C6.2.255 STEOR, STEORL page C6-1227 line 68131 MATCH xb820201f/mask=xbfa0fc1f +# C6.2.282 STSET, STSETL page C6-1280 line 71130 MATCH xb820301f/mask=xbfa0fc1f +# C6.2.285 STSMAX, STSMAXL page C6-1286 line 71425 MATCH xb820401f/mask=xbfa0fc1f +# C6.2.288 STSMIN, STSMINL page C6-1292 line 71721 MATCH xb820501f/mask=xbfa0fc1f +# C6.2.294 STUMAX, STUMAXL page C6-1304 line 72324 MATCH xb820601f/mask=xbfa0fc1f +# C6.2.297 STUMIN, STUMINL page C6-1310 line 72621 MATCH xb820701f/mask=xbfa0fc1f +# C6.2.100 LDADD, LDADDA, LDADDAL, LDADDL page C6-938 line 52210 MATCH xb8200000/mask=xbf20fc00 +# C6.2.119 LDCLR, LDCLRA, LDCLRAL, LDCLRL page C6-973 line 54136 MATCH xb8201000/mask=xbf20fc00 +# C6.2.122 LDEOR, LDEORA, LDEORAL, LDEORL page C6-980 line 54558 MATCH xb8202000/mask=xbf20fc00 +# C6.2.148 LDSET, LDSETA, LDSETAL, LDSETL page C6-1036 line 57925 MATCH xb8203000/mask=xbf20fc00 +# C6.2.151 LDSMAX, LDSMAXA, LDSMAXAL, LDSMAXL page C6-1043 line 58347 MATCH xb8204000/mask=xbf20fc00 +# C6.2.154 LDSMIN, LDSMINA, LDSMINAL, LDSMINL page C6-1050 line 58769 MATCH xb8205000/mask=xbf20fc00 +# C6.2.163 LDUMAX, LDUMAXA, LDUMAXAL, LDUMAXL page C6-1069 line 59869 MATCH xb8206000/mask=xbf20fc00 +# C6.2.166 LDUMIN, LDUMINA, LDUMINAL, LDUMINL page C6-1076 line 60291 MATCH xb8207000/mask=xbf20fc00 +# CONSTRUCT xb820001f/mask=xffa08c1f MATCHED 16 DOCUMENTED OPCODES +# AUNIT --inst xb820001f/mask=xffa08c1f --status nomem + +# size=0b10 (3031) + +:st^ls_opc4^ls_lor aa_Ws, [Rn_GPR64xsp] +is b_3031=0b10 & b_2429=0b111000 & b_23=0 & b_21=1 & b_1515=0 & b_1011=0b00 & b_0004=0b11111 & ls_opc4 & ls_lor & aa_Ws & Rn_GPR64xsp +{ build ls_opc4; build ls_lor; } + +# C6.2.249 STADD, STADDL page C6-1215 line 67552 MATCH xb820001f/mask=xbfa0fc1f +# C6.2.252 STCLR, STCLRL page C6-1221 line 67842 MATCH xb820101f/mask=xbfa0fc1f +# C6.2.255 STEOR, STEORL page C6-1227 line 68131 MATCH xb820201f/mask=xbfa0fc1f +# C6.2.282 STSET, STSETL page C6-1280 line 71130 MATCH xb820301f/mask=xbfa0fc1f +# C6.2.285 STSMAX, STSMAXL page C6-1286 line 71425 MATCH xb820401f/mask=xbfa0fc1f +# C6.2.288 STSMIN, STSMINL page C6-1292 line 71721 MATCH xb820501f/mask=xbfa0fc1f +# C6.2.294 STUMAX, STUMAXL page C6-1304 line 72324 MATCH xb820601f/mask=xbfa0fc1f +# C6.2.297 STUMIN, STUMINL page C6-1310 line 72621 MATCH xb820701f/mask=xbfa0fc1f +# C6.2.100 LDADD, LDADDA, LDADDAL, LDADDL page C6-938 line 52210 MATCH xb8200000/mask=xbf20fc00 +# C6.2.119 LDCLR, LDCLRA, LDCLRAL, LDCLRL page C6-973 line 54136 MATCH xb8201000/mask=xbf20fc00 +# C6.2.122 LDEOR, LDEORA, LDEORAL, LDEORL page C6-980 line 54558 MATCH xb8202000/mask=xbf20fc00 +# C6.2.148 LDSET, LDSETA, LDSETAL, LDSETL page C6-1036 line 57925 MATCH xb8203000/mask=xbf20fc00 +# C6.2.151 LDSMAX, LDSMAXA, LDSMAXAL, LDSMAXL page C6-1043 line 58347 MATCH xb8204000/mask=xbf20fc00 +# C6.2.154 LDSMIN, LDSMINA, LDSMINAL, LDSMINL page C6-1050 line 58769 MATCH xb8205000/mask=xbf20fc00 +# C6.2.163 LDUMAX, LDUMAXA, LDUMAXAL, LDUMAXL page C6-1069 line 59869 MATCH xb8206000/mask=xbf20fc00 +# C6.2.166 LDUMIN, LDUMINA, LDUMINAL, LDUMINL page C6-1076 line 60291 MATCH xb8207000/mask=xbf20fc00 +# CONSTRUCT xf820001f/mask=xffa08c1f MATCHED 16 DOCUMENTED OPCODES +# AUNIT --inst xf820001f/mask=xffa08c1f --status nomem +# size=0b11 (3031) + +:st^ls_opc8^ls_lor aa_Xs, [Rn_GPR64xsp] +is b_3031=0b11 & b_2429=0b111000 & b_23=0 & b_21=1 & b_1515=0 & b_1011=0b00 & b_0004=0b11111 & ls_opc8 & ls_lor & aa_Xs & Rn_GPR64xsp +{ build ls_opc8; build ls_lor; } + +# C6.2.259 STLLRB page C6-1234 line 68590 MATCH x08800000/mask=xffe08000 +# CONSTRUCT x08800000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x08800000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 +# size=0b00 (3031) + +:stllrb aa_Wt, [Rn_GPR64xsp] +is b_3031=0b00 & b_2329=0b0010001 & b_22=0 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp +{ *:1 Rn_GPR64xsp = aa_Wt:1; LORelease(); } + +# C6.2.260 STLLRH page C6-1235 line 68653 MATCH x48800000/mask=xffe08000 +# CONSTRUCT x48800000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x48800000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 +# size=0b01 (3031) + +:stllrh aa_Wt, [Rn_GPR64xsp] +is b_3031=0b01 & b_2329=0b0010001 & b_22=0 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp +{ *:2 Rn_GPR64xsp = aa_Wt:2; LORelease(); } + +# C6.2.261 STLLR page C6-1236 line 68716 MATCH x88800000/mask=xbfe08000 +# CONSTRUCT x88800000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88800000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 +# size=0b10 (3031) + +:stllr aa_Wt, [Rn_GPR64xsp] +is b_3031=0b10 & b_2329=0b0010001 & b_22=0 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp +{ *:4 Rn_GPR64xsp = aa_Wt; LORelease(); } + +# C6.2.261 STLLR page C6-1236 line 68716 MATCH x88800000/mask=xbfe08000 +# CONSTRUCT xc8800000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8800000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 +# size=0b11 (3031) + +:stllr aa_Xt, [Rn_GPR64xsp] +is b_3031=0b11 & b_2329=0b0010001 & b_22=0 & b_21=0 & b_15=0 & aa_Xt & Rn_GPR64xsp +{ *:8 Rn_GPR64xsp = aa_Xt; LORelease(); } + +# C6.2.262 STLR page C6-1238 line 68800 MATCH x88808000/mask=xbfe08000 +# CONSTRUCT xc8808000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8808000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:stlr Rt_GPR64, addrReg +is size.ldstr=3 & b_2429=0x8 & b_23=1 & L=0 & b_21=0 & b_15=1 & addrReg & Rt_GPR64 +{ + *addrReg = Rt_GPR64; +} + +# C6.2.262 STLR page C6-1238 line 68800 MATCH x88808000/mask=xbfe08000 +# CONSTRUCT x88808000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88808000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:stlr Rt_GPR32, addrReg +is size.ldstr=2 & b_2429=0x8 & b_23=1 & L=0 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 +{ + *addrReg = Rt_GPR32; +} + +# C6.2.263 STLRB page C6-1239 line 68872 MATCH x08808000/mask=xffe08000 +# CONSTRUCT x08808000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x08808000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:stlrb Rt_GPR32, addrReg +is size.ldstr=0 & b_2429=0x8 & b_23=1 & L=0 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 +{ + *addrReg = Rt_GPR32; +} + +# C6.2.264 STLRH page C6-1240 line 68933 MATCH x48808000/mask=xffe08000 +# CONSTRUCT x48808000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x48808000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 + +:stlrh Rt_GPR32, addrReg +is size.ldstr=1 & b_2429=0x8 & b_23=1 & L=0 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 +{ + *addrReg = Rt_GPR32; +} + +# C6.2.265 STLUR page C6-1241 line 68994 MATCH x99000000/mask=xbfe00c00 +# CONSTRUCT x99000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES + +:stlur aa_Wt, addr_SIMM9 +is b_3031=0b10 & b_2129=0b011001000 & b_1011=0b00 & addr_SIMM9 & aa_Wt +{ + *addr_SIMM9 = aa_Wt; +} + +# C6.2.265 STLUR page C6-1241 line 68994 MATCH x99000000/mask=xbfe00c00 +# CONSTRUCT xd9000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES + +:stlur aa_Xt, addr_SIMM9 +is b_3031=0b11 & b_2129=0b011001000 & b_1011=0b00 & addr_SIMM9 & aa_Xt +{ + *addr_SIMM9 = aa_Xt; +} + +# C6.2.266 STLURB page C6-1243 line 69091 MATCH x19000000/mask=xffe00c00 +# CONSTRUCT x19000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# x19000000/mask=xffe00c00 NOT MATCHED BY ANY CONSTRUCTOR + +:stlurb aa_Wt, addr_SIMM9 +is b_2131=0b00011001000 & b_1011=0b00 & addr_SIMM9 & aa_Wt +{ + *addr_SIMM9 = aa_Wt:1; +} + +# C6.2.267 STLURH page C6-1245 line 69176 MATCH x59000000/mask=xffe00c00 +# CONSTRUCT x59000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# x59000000/mask=xffe00c00 NOT MATCHED BY ANY CONSTRUCTOR + +:stlurh aa_Wt, addr_SIMM9 +is b_2131=0b01011001000 & b_1011=0b00 & addr_SIMM9 & aa_Wt +{ + *addr_SIMM9 = aa_Wt:2; +} + +# C6.2.268 STLXP page C6-1247 line 69261 MATCH x88208000/mask=xbfe08000 +# CONSTRUCT xc8208000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8208000/mask=xffe08000 --status nomem + +:stlxp Rs_GPR32, Rt_GPR64, Rt2_GPR64, addrReg +is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=0 & b_21=1 & Rs_GPR32 & b_15=1 & Rt2_GPR64 & addrReg & Rt_GPR64 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + *addrReg = Rt_GPR64; + *(addrReg + 4) = Rt2_GPR64; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.268 STLXP page C6-1247 line 69261 MATCH x88208000/mask=xbfe08000 +# CONSTRUCT x88208000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88208000/mask=xffe08000 --status nomem + +:stlxp Rs_GPR32, Rt_GPR32, Rt2_GPR32, addrReg +is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=0 & b_21=1 & Rs_GPR32 & b_15=1 & Rt2_GPR32 & addrReg & Rt_GPR32 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + *addrReg = Rt_GPR32; + *(addrReg + 4) = Rt2_GPR32; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.269 STLXR page C6-1250 line 69429 MATCH x88008000/mask=xbfe08000 +# CONSTRUCT xc8008000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8008000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1014=0b11111 + +:stlxr Rs_GPR32, Rt_GPR64, addrReg +is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=1 & addrReg & Rt_GPR64 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + *addrReg = Rt_GPR64; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.269 STLXR page C6-1250 line 69429 MATCH x88008000/mask=xbfe08000 +# CONSTRUCT x88008000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88008000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1014=0b11111 + +:stlxr Rs_GPR32, Rt_GPR32, addrReg +is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=1 & addrReg & Rt_GPR32 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + *addrReg = Rt_GPR32; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.270 STLXRB page C6-1252 line 69575 MATCH x08008000/mask=xffe08000 +# CONSTRUCT x08008000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x08008000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1014=0b11111 + +:stlxrb Rs_GPR32, Rt_GPR32, addrReg +is size.ldstr=0 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=1 & addrReg & Rt_GPR32 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + local tmp:4 = Rt_GPR32; + *addrReg = tmp:1; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.271 STLXRH page C6-1254 line 69703 MATCH x48008000/mask=xffe08000 +# CONSTRUCT x48008000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x48008000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1014=0b11111 + +:stlxrh Rs_GPR32, Rt_GPR32, addrReg +is size.ldstr=1 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=1 & addrReg & Rt_GPR32 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + local tmp:4 = Rt_GPR32; + *addrReg = tmp:2; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.272 STNP page C6-1256 line 69837 MATCH x28000000/mask=x7fc00000 +# CONSTRUCT x28000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x28000000/mask=xffc00000 --status nomem + +:stnp Rt_GPR32, Rt2_GPR32, addrPairIndexed +is b_3031=0b00 & b_2229=0b10100000 & Rt2_GPR32 & addrPairIndexed & Rt_GPR32 +{ + data1:4 = Rt_GPR32; + data2:4 = Rt2_GPR32; + build addrPairIndexed; + *addrPairIndexed = data1; + *(addrPairIndexed + 4) = data2; +} + +# C6.2.272 STNP page C6-1256 line 69837 MATCH x28000000/mask=x7fc00000 +# CONSTRUCT xa8000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xa8000000/mask=xffc00000 --status nomem + +:stnp Rt_GPR64, Rt2_GPR64, addrPairIndexed +is b_3031=0b10 & b_2229=0b10100000 & Rt2_GPR64 & addrPairIndexed & Rt_GPR64 +{ + data1:8 = Rt_GPR64; + data2:8 = Rt2_GPR64; + build addrPairIndexed; + *addrPairIndexed = data1; + *(addrPairIndexed + 8) = data2; +} + +# C6.2.273 STP page C6-1258 line 69943 MATCH x28800000/mask=x7fc00000 +# C6.2.273 STP page C6-1258 line 69943 MATCH x29800000/mask=x7fc00000 +# C6.2.273 STP page C6-1258 line 69943 MATCH x29000000/mask=x7fc00000 +# C6.2.272 STNP page C6-1256 line 69837 MATCH x28000000/mask=x7fc00000 +# CONSTRUCT x28000000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x28000000/mask=xfe400000 --status nomem + +:stp Rt_GPR32, Rt2_GPR32, addrPairIndexed +is b_3031=0b00 & b_2529=0b10100 & b_22=0b0 & Rt2_GPR32 & addrPairIndexed & Rt_GPR32 +{ + data1:4 = Rt_GPR32; + data2:4 = Rt2_GPR32; + build addrPairIndexed; + *addrPairIndexed = data1; + *(addrPairIndexed + 4) = data2; +} + +# C6.2.273 STP page C6-1258 line 69943 MATCH x28800000/mask=x7fc00000 +# C6.2.273 STP page C6-1258 line 69943 MATCH x29800000/mask=x7fc00000 +# C6.2.273 STP page C6-1258 line 69943 MATCH x29000000/mask=x7fc00000 +# C6.2.272 STNP page C6-1256 line 69837 MATCH x28000000/mask=x7fc00000 +# CONSTRUCT xa8000000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xa8000000/mask=xfe400000 --status nomem + +:stp Rt_GPR64, Rt2_GPR64, addrPairIndexed +is b_3031=0b10 & b_2529=0b10100 & b_22=0b0 & Rt2_GPR64 & addrPairIndexed & Rt_GPR64 +{ + data1:8 = Rt_GPR64; + data2:8 = Rt2_GPR64; + build addrPairIndexed; + *addrPairIndexed = data1; + *(addrPairIndexed + 8) = data2; +} + +# C6.2.274 STR (immediate) page C6-1261 line 70143 MATCH xb9000000/mask=xbfc00000 +# CONSTRUCT xb9000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xb9000000/mask=xffc00000 --status nomem + +:str Rt_GPR32, addrUIMM +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=0 & addrUIMM & Rn_GPR64xsp & Rt_GPR32 +{ + *addrUIMM = Rt_GPR32; +} + +# C6.2.274 STR (immediate) page C6-1261 line 70143 MATCH xb8000400/mask=xbfe00c00 +# C6.2.274 STR (immediate) page C6-1261 line 70143 MATCH xb8000c00/mask=xbfe00c00 +# C6.2.289 STTR page C6-1294 line 71837 MATCH xb8000800/mask=xbfe00c00 +# C6.2.298 STUR page C6-1312 line 72737 MATCH xb8000000/mask=xbfe00c00 +# CONSTRUCT xb8000000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst xb8000000/mask=xffe00000 --status nomem + +:st^UnscPriv^"r" Rt_GPR32, addrIndexed +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 +{ + data1:4 = Rt_GPR32; + build addrIndexed; + *addrIndexed = data1; +} + +# C6.2.274 STR (immediate) page C6-1261 line 70143 MATCH xb8000400/mask=xbfe00c00 +# C6.2.274 STR (immediate) page C6-1261 line 70143 MATCH xb8000c00/mask=xbfe00c00 +# CONSTRUCT xb8000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb8000400/mask=xffe00400 --status nomem + +:str Rt_GPR32, addrIndexed +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 +{ + data1:4 = Rt_GPR32; + build addrIndexed; + *addrIndexed = data1; +} + +# C6.2.274 STR (immediate) page C6-1261 line 70143 MATCH xb9000000/mask=xbfc00000 +# CONSTRUCT xf9000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf9000000/mask=xffc00000 --status nomem + +:str Rt_GPR64, addrUIMM +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=0 & addrUIMM & Rn_GPR64xsp & Rt_GPR64 +{ + *addrUIMM = Rt_GPR64; +} + +# C6.2.274 STR (immediate) page C6-1261 line 70143 MATCH xb8000400/mask=xbfe00c00 +# C6.2.274 STR (immediate) page C6-1261 line 70143 MATCH xb8000c00/mask=xbfe00c00 +# CONSTRUCT xf8000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xf8000400/mask=xffe00400 --status nomem + +:str Rt_GPR64, addrIndexed +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR64 +{ + data1:8 = Rt_GPR64; + build addrIndexed; + *addrIndexed = data1; +} + +# C6.2.275 STR (register) page C6-1264 line 70326 MATCH xb8200800/mask=xbfe00c00 +# CONSTRUCT xb8200800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xb8200800/mask=xffe00c00 --status nomem + +:str Rt_GPR32, addrIndexed +is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 +{ + data1:4 = Rt_GPR32; + build addrIndexed; + *addrIndexed = data1; +} + +# C6.2.275 STR (register) page C6-1264 line 70326 MATCH xb8200800/mask=xbfe00c00 +# CONSTRUCT xf8200800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8200800/mask=xffe00c00 --status nomem + +:str Rt_GPR64, addrIndexed +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR64 +{ + data1:8 = Rt_GPR64; + build addrIndexed; + *addrIndexed = data1; +} + +# C6.2.276 STRB (immediate) page C6-1266 line 70444 MATCH x39000000/mask=xffc00000 +# CONSTRUCT x39000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x39000000/mask=xffc00000 --status nomem + +:strb Rt_GPR32, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=0 & addrIndexed & Rt_GPR32 +{ + tmp:4 = Rt_GPR32; + build addrIndexed; + *addrIndexed = tmp:1; +} + +# C6.2.276 STRB (immediate) page C6-1266 line 70444 MATCH x38000400/mask=xffe00c00 +# C6.2.276 STRB (immediate) page C6-1266 line 70444 MATCH x38000c00/mask=xffe00c00 +# CONSTRUCT x38000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x38000400/mask=xffe00400 --status nomem + +:strb Rt_GPR32, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 +{ + tmp:4 = Rt_GPR32; + build addrIndexed; + *addrIndexed = tmp:1; +} + +# C6.2.277 STRB (register) page C6-1269 line 70600 MATCH x38200800/mask=xffe00c00 +# CONSTRUCT x38200800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x38200800/mask=xffe00c00 --status nomem + +:strb Rt_GPR32, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 +{ + tmp:4 = Rt_GPR32; + build addrIndexed; + *addrIndexed = tmp:1; +} + +# C6.2.278 STRH (immediate) page C6-1271 line 70701 MATCH x79000000/mask=xffc00000 +# CONSTRUCT x79000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x79000000/mask=xffc00000 --status nomem + +:strh Rt_GPR32, addrUIMM +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=0 & addrUIMM & Rn_GPR64xsp & Rt_GPR32 +{ + tmp:4 = Rt_GPR32; + *addrUIMM = tmp:2; +} + +# C6.2.278 STRH (immediate) page C6-1271 line 70701 MATCH x78000400/mask=xffe00c00 +# C6.2.278 STRH (immediate) page C6-1271 line 70701 MATCH x78000c00/mask=xffe00c00 +# CONSTRUCT x78000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x78000400/mask=xffe00400 --status nomem + +:strh Rt_GPR32, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 +{ + tmp:4 = Rt_GPR32; + build addrIndexed; + *addrIndexed = tmp:2; +} + +# C6.2.279 STRH (register) page C6-1274 line 70857 MATCH x78200800/mask=xffe00c00 +# CONSTRUCT x78200800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x78200800/mask=xffe00c00 --status nomem + +:strh Rt_GPR32, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 +{ + tmp:4 = Rt_GPR32; + build addrIndexed; + *addrIndexed = tmp:2; +} + +# C6.2.289 STTR page C6-1294 line 71837 MATCH xb8000800/mask=xbfe00c00 +# CONSTRUCT xf8000800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8000800/mask=xffe00c00 --status nomem + +:st^UnscPriv^"r" Rt_GPR64, addrIndexed +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_2223=0 & b_2121=0 & b_1011=2 & UnscPriv & addrIndexed & Rt_GPR64 +{ + data1:8 = Rt_GPR64; + build addrIndexed; + *addrIndexed = data1; +} + +# C6.2.290 STTRB page C6-1296 line 71948 MATCH x38000800/mask=xffe00c00 +# C6.2.299 STURB page C6-1314 line 72829 MATCH x38000000/mask=xffe00c00 +# CONSTRUCT x38000000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x38000000/mask=xffe00000 --status nomem + +:st^UnscPriv^"rb" Rt_GPR32, addrIndexed +is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 +{ + local tmp:4 = Rt_GPR32; + build addrIndexed; + *addrIndexed = tmp:1; +} + +# C6.2.291 STTRH page C6-1298 line 72046 MATCH x78000800/mask=xffe00c00 +# C6.2.300 STURH page C6-1315 line 72899 MATCH x78000000/mask=xffe00c00 +# CONSTRUCT x78000000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x78000000/mask=xffe00000 --status nomem + +:st^UnscPriv^"rh" Rt_GPR32, addrIndexed +is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 +{ + local tmp:4 = Rt_GPR32; + build addrIndexed; + *addrIndexed = tmp:2; +} + +# C6.2.298 STUR page C6-1312 line 72737 MATCH xb8000000/mask=xbfe00c00 +# CONSTRUCT xf8000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8000000/mask=xffe00c00 --status nomem + +:st^UnscPriv^"r" Rt_GPR64, addrIndexed +is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2122=0 & b_1011=0 & UnscPriv & addrIndexed & Rt_GPR64 +{ + data1:8 = Rt_GPR64; + build addrIndexed; + *addrIndexed = data1; +} + +# C6.2.301 STXP page C6-1316 line 72969 MATCH x88200000/mask=xbfe08000 +# CONSTRUCT xc8200000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8200000/mask=xffe08000 --status nomem + +:stxp Rs_GPR32, Rt_GPR64, Rt2_GPR64, addrReg +is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=0 & b_21=1 & Rs_GPR32 & b_15=0 & Rt2_GPR64 & addrReg & Rt_GPR64 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + *addrReg = Rt_GPR64; + *(addrReg + 8) = Rt2_GPR64; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.301 STXP page C6-1316 line 72969 MATCH x88200000/mask=xbfe08000 +# CONSTRUCT x88200000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88200000/mask=xffe08000 --status nomem + +:stxp Rs_GPR32, Rt_GPR32, Rt2_GPR32, addrReg +is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=0 & b_21=1 & Rs_GPR32 & b_15=0 & Rt2_GPR32 & addrReg & Rt_GPR32 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + *addrReg = Rt_GPR32; + *(addrReg + 4) = Rt2_GPR32; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.302 STXR page C6-1319 line 73137 MATCH x88000000/mask=xbfe08000 +# CONSTRUCT xc8000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xc8000000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1014=0b11111 + +:stxr Rs_GPR32, Rt_GPR64, addrReg +is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=0 & addrReg & Rt_GPR64 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + *addrReg = Rt_GPR64; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.302 STXR page C6-1319 line 73137 MATCH x88000000/mask=xbfe08000 +# CONSTRUCT x88000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x88000000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1014=0b11111 + +:stxr Rs_GPR32, Rt_GPR32, addrReg +is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=0 & addrReg & Rt_GPR32 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + *addrReg = Rt_GPR32; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.303 STXRB page C6-1321 line 73282 MATCH x08000000/mask=xffe08000 +# CONSTRUCT x08000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x08000000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1014=0b11111 + +:stxrb Rs_GPR32, Rt_GPR32, addrReg +is size.ldstr=0 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=0 & addrReg & Rt_GPR32 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + local tmp:4 = Rt_GPR32; + *addrReg = tmp:1; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.304 STXRH page C6-1323 line 73411 MATCH x48000000/mask=xffe08000 +# CONSTRUCT x48000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x48000000/mask=xffe08000 --status nomem +# To enforce SHOULD BE ONE fields add: b_1014=0b11111 + +:stxrh Rs_GPR32, Rt_GPR32, addrReg +is size.ldstr=1 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=0 & addrReg & Rt_GPR32 & Rs_GPR64 +{ + status:1 = 1; + rsize:1 = 16; + check:1 = ExclusiveMonitorPass(addrReg, rsize); + if (!check) goto ; + local tmp:4 = Rt_GPR32; + *addrReg = tmp:2; + status = ExclusiveMonitorsStatus(); + + Rs_GPR64 = zext(status); +} + +# C6.2.308 SUB (extended register) page C6-1330 line 73884 MATCH x4b200000/mask=x7fe00000 +# CONSTRUCT x4b200000/mask=xffe00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x4b200000/mask=xffe00000 --status pass + +:sub Rd_GPR32wsp, Rn_GPR32wsp, ExtendRegShift32 +is sf=0 & op=1 & S=0 & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift32 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp +{ + tmp_2:4 = ExtendRegShift32; + tmp_1:4 = Rn_GPR32wsp - tmp_2; + Rd_GPR64xsp = zext(tmp_1); +} + +# C6.2.308 SUB (extended register) page C6-1330 line 73884 MATCH x4b200000/mask=x7fe00000 +# CONSTRUCT xcb200000/mask=xffe00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xcb200000/mask=xffe00000 --status pass + +:sub Rd_GPR64xsp, Rn_GPR64xsp, ExtendRegShift64 +is sf=1 & op=1 & S=0 & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift64 & Rn_GPR64xsp & Rd_GPR64xsp +{ + tmp_2:8 = ExtendRegShift64; + tmp_1:8 = Rn_GPR64xsp - tmp_2; + Rd_GPR64xsp = tmp_1; +} + +# C6.2.309 SUB (immediate) page C6-1333 line 74031 MATCH x51000000/mask=x7f800000 +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# CONSTRUCT x51000000/mask=xdf000000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x51000000/mask=xdf000000 --status pass --comment "flags" + +:sub^SBIT_CZNO Rd_GPR32xsp, Rn_GPR32xsp, ImmShift32 +is sf=0 & b_30=1 & S & SBIT_CZNO & b_2428=0x11 & ImmShift32 & Rn_GPR32xsp & Rd_GPR32xsp & Rd_GPR64xsp +{ + subflags(Rn_GPR32xsp, ImmShift32); + tmp:4 = Rn_GPR32xsp - ImmShift32; + resultflags(tmp); + build SBIT_CZNO; + Rd_GPR64xsp = zext(tmp); +} + +# C6.2.309 SUB (immediate) page C6-1333 line 74031 MATCH x51000000/mask=x7f800000 +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# CONSTRUCT xd1000000/mask=xdf000000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xd1000000/mask=xdf000000 --status pass --comment "flags" + +:sub^SBIT_CZNO Rd_GPR64xsp, Rn_GPR64xsp, ImmShift64 +is sf=1 & b_30=1 & S & SBIT_CZNO & b_2428=0x11 & ImmShift64 & Rn_GPR64xsp & Rd_GPR64xsp +{ + subflags(Rn_GPR64xsp, ImmShift64); + Rd_GPR64xsp = Rn_GPR64xsp - ImmShift64; + resultflags(Rd_GPR64xsp); + build SBIT_CZNO; +} + +# C6.2.309 SUB (immediate) page C6-1333 line 74031 MATCH x51000000/mask=x7f800000 +# CONSTRUCT x51000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x51000000/mask=xffc00000 --status pass + +:sub Rd_GPR32wsp, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl0 +is sf=0 & op=1 & S=0 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i32_negimm_lsl0 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp +{ + tmp_2:4 = Imm12_addsubimm_operand_i32_negimm_lsl0; + tmp_1:4 = Rn_GPR32wsp - tmp_2; + Rd_GPR64xsp = zext(tmp_1); +} + +# C6.2.309 SUB (immediate) page C6-1333 line 74031 MATCH x51000000/mask=x7f800000 +# CONSTRUCT x51400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x51400000/mask=xffc00000 --status pass + +:sub Rd_GPR32wsp, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl12 +is sf=0 & op=1 & S=0 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i32_negimm_lsl12 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp +{ + tmp_2:4 = Imm12_addsubimm_operand_i32_negimm_lsl12; + tmp_1:4 = Rn_GPR32wsp - tmp_2; + Rd_GPR64xsp = zext(tmp_1); +} + +# C6.2.309 SUB (immediate) page C6-1333 line 74031 MATCH x51000000/mask=x7f800000 +# CONSTRUCT xd1000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd1000000/mask=xffc00000 --status pass + +:sub Rd_GPR64xsp, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl0 +is sf=1 & op=1 & S=0 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i64_negimm_lsl0 & Rn_GPR64xsp & Rd_GPR64xsp +{ + tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl0; + tmp_1:8 = Rn_GPR64xsp - tmp_2; + Rd_GPR64xsp = tmp_1; +} + +# C6.2.309 SUB (immediate) page C6-1333 line 74031 MATCH x51000000/mask=x7f800000 +# CONSTRUCT xd1400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd1400000/mask=xffc00000 --status pass + +:sub Rd_GPR64xsp, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl12 +is sf=1 & op=1 & S=0 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i64_negimm_lsl12 & Rn_GPR64xsp & Rd_GPR64xsp +{ + tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl12; + tmp_1:8 = Rn_GPR64xsp - tmp_2; + Rd_GPR64xsp = tmp_1; +} + +# C6.2.310 SUB (shifted register) page C6-1335 line 74131 MATCH x4b000000/mask=x7f200000 +# C6.2.199 NEG (shifted register) page C6-1135 line 63379 MATCH x4b0003e0/mask=x7f2003e0 +# CONSTRUCT x4b000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x4b000000/mask=xff200000 --status pass + +:sub Rd_GPR32, Rn_GPR32, RegShift32 +is sf=0 & op=1 & s=0 & b_2428=0xb & b_2121=0 & RegShift32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = RegShift32; + tmp_1:4 = Rn_GPR32 - tmp_2; + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.310 SUB (shifted register) page C6-1335 line 74131 MATCH x4b000000/mask=x7f200000 +# C6.2.199 NEG (shifted register) page C6-1135 line 63379 MATCH x4b0003e0/mask=x7f2003e0 +# CONSTRUCT xcb000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xcb000000/mask=xff200000 --status pass + +:sub Rd_GPR64, Rn_GPR64, RegShift64 +is sf=1 & op=1 & s=0 & b_2428=0xb & b_2121=0 & RegShift64 & Rn_GPR64 & Rd_GPR64 +{ + tmp_2:8 = RegShift64; + tmp_1:8 = Rn_GPR64 - tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.314 SUBS (extended register) page C6-1340 line 74449 MATCH x6b200000/mask=x7fe00000 +# C6.2.60 CMP (extended register) page C6-875 line 48916 MATCH x6b20001f/mask=x7fe0001f +# CONSTRUCT x6b200000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x6b200000/mask=xffe00000 --status pass --comment "flags" + +:subs Rd_GPR32, Rn_GPR32wsp, ExtendRegShift32 +is sf=0 & op=1 & S=1 & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift32 & Rn_GPR32wsp & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = ExtendRegShift32; + subflags(Rn_GPR32wsp, tmp_2); + tmp_1:4 = Rn_GPR32wsp - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = zext(tmp_1); + affectflags(); +} + +# C6.2.314 SUBS (extended register) page C6-1340 line 74449 MATCH x6b200000/mask=x7fe00000 +# C6.2.316 SUBS (shifted register) page C6-1345 line 74711 MATCH x6b000000/mask=x7f200000 +# C6.2.60 CMP (extended register) page C6-875 line 48916 MATCH x6b20001f/mask=x7fe0001f +# C6.2.62 CMP (shifted register) page C6-879 line 49133 MATCH x6b00001f/mask=x7f20001f +# C6.2.200 NEGS page C6-1137 line 63476 MATCH x6b0003e0/mask=x7f2003e0 +# CONSTRUCT xeb000000/mask=xffc00000 MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst xeb000000/mask=xffc00000 --status pass --comment "flags" + +:subs Rd_GPR64, Rn_GPR64xsp, ExtendRegShift64 +is sf=1 & op=1 & S=1 & b_2428=0xb & opt=0 & ExtendRegShift64 & Rn_GPR64xsp & Rd_GPR64 +{ + tmp_2:8 = ExtendRegShift64; + subflags(Rn_GPR64xsp, tmp_2); + tmp_1:8 = Rn_GPR64xsp - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = tmp_1; + affectflags(); +} + +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# CONSTRUCT x71000000/mask=xffc00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x71000000/mask=xffc00000 --status pass --comment "flags" + +:subs Rd_GPR32, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl0 +is sf=0 & op=1 & S=1 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i32_negimm_lsl0 & Rn_GPR32wsp & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = Imm12_addsubimm_operand_i32_negimm_lsl0; + subflags(Rn_GPR32wsp, tmp_2); + tmp_1:4 = Rn_GPR32wsp - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = zext(tmp_1); + affectflags(); +} + +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# CONSTRUCT x71400000/mask=xffc00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x71400000/mask=xffc00000 --status pass --comment "flags" + +:subs Rd_GPR32, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl12 +is sf=0 & op=1 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i32_negimm_lsl12 & Rn_GPR32wsp & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = Imm12_addsubimm_operand_i32_negimm_lsl12; + subflags(Rn_GPR32wsp, tmp_2); + tmp_1:4 = Rn_GPR32wsp - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = zext(tmp_1); + affectflags(); +} + +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# CONSTRUCT xf1000000/mask=xffc00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xf1000000/mask=xffc00000 --status pass --comment "flags" + +:subs Rd_GPR64, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl0 +is sf=1 & op=1 & S=1 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i64_negimm_lsl0 & Rn_GPR64xsp & Rd_GPR64 +{ + tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl0; + subflags(Rn_GPR64xsp, tmp_2); + tmp_1:8 = Rn_GPR64xsp - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = tmp_1; + affectflags(); +} + +# C6.2.315 SUBS (immediate) page C6-1343 line 74604 MATCH x71000000/mask=x7f800000 +# C6.2.61 CMP (immediate) page C6-877 line 49043 MATCH x7100001f/mask=x7f80001f +# CONSTRUCT xf1400000/mask=xffc00000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xf1400000/mask=xffc00000 --status pass --comment "flags" + +:subs Rd_GPR64, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl12 +is sf=1 & op=1 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i64_negimm_lsl12 & Rn_GPR64xsp & Rd_GPR64 +{ + tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl12; + subflags(Rn_GPR64xsp, tmp_2); + tmp_1:8 = Rn_GPR64xsp - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = tmp_1; + affectflags(); +} + +# C6.2.316 SUBS (shifted register) page C6-1345 line 74711 MATCH x6b000000/mask=x7f200000 +# C6.2.62 CMP (shifted register) page C6-879 line 49133 MATCH x6b00001f/mask=x7f20001f +# C6.2.200 NEGS page C6-1137 line 63476 MATCH x6b0003e0/mask=x7f2003e0 +# CONSTRUCT x6b000000/mask=xff200000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst x6b000000/mask=xff200000 --status pass + +:subs Rd_GPR32, Rn_GPR32, RegShift32 +is sf=0 & op=1 & s=1 & b_2428=0xb & b_2121=0 & RegShift32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp_2:4 = RegShift32; + subflags(Rn_GPR32, tmp_2); + tmp_1:4 = Rn_GPR32 - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = zext(tmp_1); + affectflags(); +} + +# C6.2.316 SUBS (shifted register) page C6-1345 line 74711 MATCH x6b000000/mask=x7f200000 +# C6.2.62 CMP (shifted register) page C6-879 line 49133 MATCH x6b00001f/mask=x7f20001f +# C6.2.200 NEGS page C6-1137 line 63476 MATCH x6b0003e0/mask=x7f2003e0 +# CONSTRUCT xeb000000/mask=xff200000 MATCHED 3 DOCUMENTED OPCODES +# AUNIT --inst xeb000000/mask=xff200000 --status pass + +:subs Rd_GPR64, Rn_GPR64, RegShift64 +is sf=1 & op=1 & s=1 & b_2428=0xb & b_2121=0 & RegShift64 & Rn_GPR64 & Rd_GPR64 & Rd +{ + tmp_2:8 = RegShift64; + subflags(Rn_GPR64, tmp_2); + tmp_1:8 = Rn_GPR64 - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = tmp_1; + affectflags(); +} + +# C6.2.317 SVC page C6-1347 line 74833 MATCH xd4000001/mask=xffe0001f +# CONSTRUCT xd4000001/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd4000001/mask=xffe0001f --status nodest + +:svc imm16 +is b_2431=0xd4 & excCode=0 & imm16 & excCode2=0 & ll=1 +{ + CallSupervisor(imm16:2); +} + +# C6.2.318 SWPB, SWPAB, SWPALB, SWPLB page C6-1348 line 74875 MATCH x38208000/mask=xff20fc00 +# CONSTRUCT x38208000/mask=xff20fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x38208000/mask=xff20fc00 --status nomem + +# size=0b00 (3031) + +:swp^ls_lor^"b" aa_Ws, aa_Wt, [Rn_GPR64xsp] +is b_3031=0b00 & b_2429=0b111000 & b_21=1 & b_1215=0b1000 & b_1011=0b00 & ls_loa & ls_lor & aa_Wt & ls_data1 & ls_mem1 & aa_Ws & Rn_GPR64xsp +{ build ls_loa; build ls_data1; ls_opc_swp(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; aa_Wt = tmp_ldWn; build ls_lor; } + +# C6.2.319 SWPH, SWPAH, SWPALH, SWPLH page C6-1350 line 74982 MATCH x78208000/mask=xff20fc00 +# CONSTRUCT x78208000/mask=xff20fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x78208000/mask=xff20fc00 --status nomem + +# size=0b01 (3031) + +:swp^ls_lor^"h" aa_Ws, aa_Wt, [Rn_GPR64xsp] +is b_3031=0b01 & b_2429=0b111000 & b_21=1 & b_1215=0b1000 & b_1011=0b00 & ls_loa & ls_lor & aa_Wt & ls_data2 & ls_mem2 & aa_Ws & Rn_GPR64xsp +{ build ls_loa; build ls_data2; ls_opc_swp(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; aa_Wt = tmp_ldWn; build ls_lor; } + +# C6.2.320 SWP, SWPA, SWPAL, SWPL page C6-1352 line 75089 MATCH xb8208000/mask=xbf20fc00 +# CONSTRUCT xb8208000/mask=xff20fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xb8208000/mask=xff20fc00 --status nomem + +# size=0b10 (3031) + +:swp^ls_lor aa_Ws, aa_Wt, [Rn_GPR64xsp] +is b_3031=0b10 & b_2429=0b111000 & b_21=1 & b_1215=0b1000 & b_1011=0b00 & ls_loa & ls_lor & aa_Wt & ls_data4 & ls_mem4 & aa_Ws & Rn_GPR64xsp +{ build ls_loa; build ls_data4; ls_opc_swp(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; aa_Wt = tmp_ldWn; build ls_lor; } + +# C6.2.320 SWP, SWPA, SWPAL, SWPL page C6-1352 line 75089 MATCH xb8208000/mask=xbf20fc00 +# CONSTRUCT xf8208000/mask=xff20fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xf8208000/mask=xff20fc00 --status nomem + +# size=0b11 (3031) + +:swp^ls_lor aa_Xs, aa_Xt, [Rn_GPR64xsp] +is b_3031=0b11 & b_2429=0b111000 & b_21=1 & b_1215=0b1000 & b_1011=0b00 & ls_loa & ls_lor & aa_Xt & ls_data8 & ls_mem8 & aa_Xs & Rn_GPR64xsp +{ build ls_loa; build ls_data8; ls_opc_swp(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; aa_Xt = tmp_ldXn; build ls_lor; } + +# C6.2.321 SXTB page C6-1354 line 75227 MATCH x13001c00/mask=x7fbffc00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# CONSTRUCT x93401c00/mask=xfffffc06 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x93401c00/mask=xfffffc06 --status pass + +# Special case of sbfm where imms='000111' and immr='000000' + +:sxtb Rd_GPR64, Rn_GPR32 +is ImmR=0x0 & ImmS=0x7 & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & Rn_GPR32 & Rd_GPR64 +{ + tmp:4 = Rn_GPR32; + tmp_byte:1 = tmp:1; + result:8 = sext(tmp_byte); + Rd_GPR64 = result; +} + +# C6.2.321 SXTB page C6-1354 line 75227 MATCH x13001c00/mask=x7fbffc00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# CONSTRUCT x13001c00/mask=xfffffc06 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x13001c00/mask=xfffffc06 --status pass + +# Special case of sbfm when ImmS=7 and ImmR=0. Note that this implies ImmS > ImmR-1 +# Otherwise, this might appear to conflict with sbfiz + +:sxtb Rd_GPR32, Rn_GPR32 +is ImmR=0x0 & ImmS=0x7 & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp:4 = Rn_GPR32; + tmp_byte:1 = tmp:1; + result:4 = sext(tmp_byte); + Rd_GPR64 = zext(result); +} + +# C6.2.322 SXTH page C6-1356 line 75314 MATCH x13003c00/mask=x7fbffc00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# CONSTRUCT x93403c00/mask=xfffffc06 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x93403c00/mask=xfffffc06 --status pass + +# Special case of sbfm where imms='001111' and immr='000000' + +:sxth Rd_GPR64, Rn_GPR32 +is ImmR=0x0 & ImmS=0xf & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & Rn_GPR32 & Rd_GPR64 +{ + tmp:4 = Rn_GPR32; + tmp_1:2 = tmp:2; + tmp_2:8 = sext(tmp_1); + Rd_GPR64 = tmp_2; +} + +# C6.2.322 SXTH page C6-1356 line 75314 MATCH x13003c00/mask=x7fbffc00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# CONSTRUCT x13003c00/mask=xfffffc06 MATCHED 4 DOCUMENTED OPCODES +# AUNIT --inst x13003c00/mask=xfffffc06 --status pass + +# Special case of sbfm where imms='001111' and immr='000000' + +:sxth Rd_GPR32, Rn_GPR32 +is ImmR=0x0 & ImmS=0xf & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp:4 = Rn_GPR32; + tmp_1:2 = tmp:2; + tmp_2:4 = sext(tmp_1); + Rd_GPR64 = zext(tmp_2); +} + +# C6.2.323 SXTW page C6-1358 line 75401 MATCH x93407c00/mask=xfffffc00 +# C6.2.17 ASR (immediate) page C6-803 line 45139 MATCH x13007c00/mask=x7f807c00 +# C6.2.232 SBFIZ page C6-1190 line 66254 MATCH x13000000/mask=x7f800000 +# C6.2.233 SBFM page C6-1192 line 66348 MATCH x13000000/mask=x7f800000 +# C6.2.234 SBFX page C6-1194 line 66483 MATCH x13000000/mask=x7f800000 +# CONSTRUCT x93407c00/mask=xfffffc06 MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst x93407c00/mask=xfffffc06 --status pass + +# Special case of sbfm where imms='011111' and immr='000000' + +:sxtw Rd_GPR64, Rn_GPR32 +is ImmR=0x0 & ImmS=0x1f & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & Rn_GPR32 & Rd_GPR64 +{ + tmp:4 = Rn_GPR32; + Rd_GPR64 = sext(tmp); +} + +# C6.2.286 SYS page C6-979 line 56782 KEEPWITH + +SysArgs: Op1_uimm3, CRn_CRx, CRm_CRx, Op2_uimm3, Rt_GPR64 is Op1_uimm3 & CRn_CRx & CRm_CRx & Op2_uimm3 & aa_Xt & Rt_GPR64 { export Rt_GPR64; } +SysArgs: Op1_uimm3, CRn_CRx, CRm_CRx, Op2_uimm3 is Op1_uimm3 & CRn_CRx & CRm_CRx & Op2_uimm3 & aa_Xt=31 & Rt_GPR64 { export 0:8; } + +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# C6.2.19 AT page C6-807 line 45319 MATCH xd5087800/mask=xfff8fe00 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# CONSTRUCT xd5080000/mask=xfff80000 MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst xd5080000/mask=xfff80000 --status nodest + +:sys SysArgs +is b_1931=0b1101010100001 & Op1_uimm3 & CRn_CRx & CRm_CRx & Op2_uimm3 & SysArgs +{ + tmp1:4 = Op1_uimm3; + tmp2:4 = CRn_CRx; + tmp3:4 = CRm_CRx; + tmp4:4 = Op2_uimm3; + SysOp_W(tmp1, tmp2, tmp3, tmp4, SysArgs); +} + +# C6.2.325 SYSL page C6-1361 line 75548 MATCH xd5280000/mask=xfff80000 +# CONSTRUCT xd5280000/mask=xfff80000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd5280000/mask=xfff80000 --status nodest + +:sysl Rt_GPR64, Op1_uimm3, CRn_CRx, CRm_CRx, Op2_uimm3 +is b_2431=0xd5 & b_2223=0 & l=1 & Op0=1 & Op1_uimm3 & CRn_CRx & CRm_CRx & Op2_uimm3 & aa_Xt & Rt_GPR64 +{ + tmp1:4 = Op1_uimm3; + tmp2:4 = CRn_CRx; + tmp3:4 = CRm_CRx; + tmp4:4 = Op2_uimm3; + Rt_GPR64 = SysOp_R(tmp1, tmp2, tmp3, tmp4); +} + +# C6.2.325 SYSL page C6-1361 line 75548 MATCH xd5280000/mask=xfff80000 +# CONSTRUCT xd528001f/mask=xfff8001f MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xd528001f/mask=xfff8001f --status nodest + +:sysl Op1_uimm3, CRn_CRx, CRm_CRx, Op2_uimm3 +is b_2431=0xd5 & b_2223=0 & l=1 & Op0=1 & Op1_uimm3 & CRn_CRx & CRm_CRx & Op2_uimm3 & aa_Xt=31 & Rt_GPR64 +{ + tmp1:4 = Op1_uimm3; + tmp2:4 = CRn_CRx; + tmp3:4 = CRm_CRx; + tmp4:4 = Op2_uimm3; + SysOp_R(tmp1, tmp2, tmp3, tmp4); +} + +# C6.2.326 TBNZ page C6-1362 line 75602 MATCH x37000000/mask=x7f000000 +# C6.2.327 TBZ page C6-1363 line 75660 MATCH x36000000/mask=x7f000000 +# CONSTRUCT xb6000000/mask=xfe000000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xb6000000/mask=xfe000000 --status nodest + +:tb^ZeroOp Rd_GPR64, BitPos, Addr14 +is sf=1 & b_2530=0x1b & BitPos & ZeroOp & Addr14 & Rd_GPR64 +{ + tmp:1 = BitPos; + if (tmp == ZeroOp) goto Addr14; +} + +# C6.2.326 TBNZ page C6-1362 line 75602 MATCH x37000000/mask=x7f000000 +# C6.2.327 TBZ page C6-1363 line 75660 MATCH x36000000/mask=x7f000000 +# CONSTRUCT x36000000/mask=xfe000000 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x36000000/mask=xfe000000 --status nodest + +:tb^ZeroOp Rd_GPR32, BitPos, Addr14 +is sf=0 & b_2530=0x1b & BitPos & ZeroOp & Addr14 & Rd_GPR32 +{ + tmp:1 = BitPos; + if (tmp == ZeroOp) goto Addr14; +} + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c8020/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c8020/mask=xffffffe0 --status nodest + +:tlbi "IPAS2E1IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0000 & b_0507=0b001 & Rt_GPR64 +{ TLBI_IPAS2E1IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c80a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c80a0/mask=xffffffe0 --status nodest + +:tlbi "IPAS2LE1IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0000 & b_0507=0b101 & Rt_GPR64 +{ TLBI_IPAS2LE1IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5088300/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd5088300/mask=xffffffe0 --status nodest + +:tlbi "VMALLE1IS" +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b000 +{ TLBI_VMALLE1IS(); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c8300/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c8300/mask=xffffffe0 --status nodest + +:tlbi "ALLE2IS" +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b000 +{ TLBI_ALLE2IS(); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50e8300/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50e8300/mask=xffffffe0 --status nodest + +:tlbi "ALLE3IS" +is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b000 +{ TLBI_ALLE3IS(); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5088320/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd5088320/mask=xffffffe0 --status nodest + +:tlbi "VAE1IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b001 & Rt_GPR64 +{ TLBI_VAE1IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c8320/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c8320/mask=xffffffe0 --status nodest + +:tlbi "VAE2IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b001 & Rt_GPR64 +{ TLBI_VAE2IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50e8320/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50e8320/mask=xffffffe0 --status nodest + +:tlbi "VAE3IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b001 & Rt_GPR64 +{ TLBI_VAE3IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5088340/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd5088340/mask=xffffffe0 --status nodest + +:tlbi "ASIDE1IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b010 & Rt_GPR64 +{ TLBI_ASIDE1IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5088360/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd5088360/mask=xffffffe0 --status nodest + +:tlbi "VAAE1IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b011 & Rt_GPR64 +{ TLBI_VAAE1IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c8380/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c8380/mask=xffffffe0 --status nodest + +:tlbi "ALLE1IS" +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b100 +{ TLBI_ALLE1IS(); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50883a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50883a0/mask=xffffffe0 --status nodest + +:tlbi "VALE1IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b101 & Rt_GPR64 +{ TLBI_VALE1IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c83a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c83a0/mask=xffffffe0 --status nodest + +:tlbi "VALE2IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b101 & Rt_GPR64 +{ TLBI_VALE2IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50e83a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50e83a0/mask=xffffffe0 --status nodest + +:tlbi "VALE3IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b101 & Rt_GPR64 +{ TLBI_VALE3IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c83c0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c83c0/mask=xffffffe0 --status nodest + +:tlbi "VMALLS12E1IS" +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b110 +{ TLBI_VMALLS12E1IS(); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50883e0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50883e0/mask=xffffffe0 --status nodest + +:tlbi "VAALE1IS", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b111 & Rt_GPR64 +{ TLBI_VAALE1IS(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c8420/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c8420/mask=xffffffe0 --status nodest + +:tlbi "IPAS2E1", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0100 & b_0507=0b001 & Rt_GPR64 +{ TLBI_IPAS2E1(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c84a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c84a0/mask=xffffffe0 --status nodest + +:tlbi "IPAS2LE1", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0100 & b_0507=0b101 & Rt_GPR64 +{ TLBI_IPAS2LE1(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5088700/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd5088700/mask=xffffffe0 --status nodest + +:tlbi "VMALLE1" +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b000 +{ TLBI_VMALLE1(); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c8700/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c8700/mask=xffffffe0 --status nodest + +:tlbi "ALLE2" +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b000 +{ TLBI_ALLE2(); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50e8700/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50e8700/mask=xffffffe0 --status nodest + +:tlbi "ALLE3" +is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b000 +{ TLBI_ALLE3(); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5088720/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd5088720/mask=xffffffe0 --status nodest + +:tlbi "VAE1", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b001 & Rt_GPR64 +{ TLBI_VAE1(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c8720/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c8720/mask=xffffffe0 --status nodest + +:tlbi "VAE2", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b001 & Rt_GPR64 +{ TLBI_VAE2(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50e8720/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50e8720/mask=xffffffe0 --status nodest + +:tlbi "VAE3", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b001 & Rt_GPR64 +{ TLBI_VAE3(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5088740/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd5088740/mask=xffffffe0 --status nodest + +:tlbi "ASIDE1", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b010 & Rt_GPR64 +{ TLBI_ASIDE1(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5088760/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd5088760/mask=xffffffe0 --status nodest + +:tlbi "VAAE1", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b011 & Rt_GPR64 +{ TLBI_VAAE1(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c8780/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c8780/mask=xffffffe0 --status nodest + +:tlbi "ALLE1" +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b100 +{ TLBI_ALLE1(); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50887a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50887a0/mask=xffffffe0 --status nodest + +:tlbi "VALE1", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b101 & Rt_GPR64 +{ TLBI_VALE1(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c87a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c87a0/mask=xffffffe0 --status nodest + +:tlbi "VALE2", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b101 & Rt_GPR64 +{ TLBI_VALE2(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50e87a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50e87a0/mask=xffffffe0 --status nodest + +:tlbi "VALE3", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b101 & Rt_GPR64 +{ TLBI_VALE3(Rt_GPR64); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50c87c0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50c87c0/mask=xffffffe0 --status nodest + +:tlbi "VMALLS12E1" +is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b110 +{ TLBI_VMALLS12E1(); } + +# C6.2.328 TLBI page C6-1364 line 75718 MATCH xd5088000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50887e0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50887e0/mask=xffffffe0 --status nodest + +:tlbi "VAALE1", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b111 & Rt_GPR64 +{ TLBI_VAALE1(Rt_GPR64); } + +# C6.2.330 TST (immediate) page C6-1368 line 75910 MATCH x7200001f/mask=x7f80001f +# C6.2.14 ANDS (immediate) page C6-797 line 44831 MATCH x72000000/mask=x7f800000 +# CONSTRUCT x7200001f/mask=xff80001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x7200001f/mask=xff80001f --status pass --comment "flags" + +:tst Rn_GPR32, DecodeWMask32 +is sf=0 & opc=3 & b_2428=0x12 & b_2323=0 & DecodeWMask32 & Rn_GPR32 & Rd=0x1f +{ + tmp_2:4 = DecodeWMask32; + tmp_1:4 = Rn_GPR32 & tmp_2; + resultflags(tmp_1); + affectLflags(); +} + +# C6.2.330 TST (immediate) page C6-1368 line 75910 MATCH x7200001f/mask=x7f80001f +# C6.2.14 ANDS (immediate) page C6-797 line 44831 MATCH x72000000/mask=x7f800000 +# CONSTRUCT xf200001f/mask=xff80001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xf200001f/mask=xff80001f --status pass --comment "flags" + +:tst Rn_GPR64, DecodeWMask64 +is sf=1 & opc=3 & b_2428=0x12 & b_2323=0 & DecodeWMask64 & Rn_GPR64 & Rd=0x1f +{ + tmp_2:8 = DecodeWMask64; + tmp_1:8 = Rn_GPR64 & tmp_2; + resultflags(tmp_1); + affectLflags(); +} + +# C6.2.331 TST (shifted register) page C6-1369 line 75974 MATCH x6a00001f/mask=x7f20001f +# C6.2.15 ANDS (shifted register) page C6-799 line 44931 MATCH x6a000000/mask=x7f200000 +# CONSTRUCT x6a00001f/mask=xff20001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x6a00001f/mask=xff20001f --status pass --comment "flags" + +:tst Rn_GPR32, RegShift32Log +is sf=0 & opc=3 & b_2428=0xa & N=0 & RegShift32Log & Rn_GPR32 & Rd=0x1f +{ + tmp_2:4 = RegShift32Log; + tmp_1:4 = Rn_GPR32 & tmp_2; + resultflags(tmp_1); + affectLflags(); +} + +# C6.2.331 TST (shifted register) page C6-1369 line 75974 MATCH x6a00001f/mask=x7f20001f +# C6.2.15 ANDS (shifted register) page C6-799 line 44931 MATCH x6a000000/mask=x7f200000 +# CONSTRUCT xea00001f/mask=xff20001f MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xea00001f/mask=xff20001f --status pass --comment "flags" + +:tst Rn_GPR64, RegShift64Log +is sf=1 & opc=3 & b_2428=0xa & N=0 & RegShift64Log & Rn_GPR64 & Rd=0x1f +{ + tmp_2:8 = RegShift64Log; + tmp_1:8 = Rn_GPR64 & tmp_2; + resultflags(tmp_1); + affectLflags(); +} + +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.181 LSR (immediate) page C6-1102 line 61727 MATCH x53007c00/mask=x7f807c00 +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# C6.2.342 UXTB page C6-1386 line 76865 MATCH x53001c00/mask=xfffffc00 +# C6.2.343 UXTH page C6-1387 line 76925 MATCH x53003c00/mask=xfffffc00 +# CONSTRUCT x53000008/mask=xffe0800c MATCHED 7 DOCUMENTED OPCODES +# AUNIT --inst x53000008/mask=xffe0800c --status pass +# Special case of ubfm where UInt(imms) < UInt(immr). +# Note because LSL is preferred where imms + 1 == immr, we use ImmS_LT_ImmR_minus_1 +# if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); + +:ubfiz Rd_GPR32, Rn_GPR32, ubfiz_lsb, ubfiz_width +is ImmS_LT_ImmR_minus_1=1 & ImmS_EQ_ImmR=0 & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & ubfiz_lsb & ubfiz_width & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 & DecodeWMask32 & DecodeTMask32 +{ + local wmask:4 = DecodeWMask32; + local tmask:4 = DecodeTMask32; + local src:4 = Rn_GPR32; + local bot:4 = ((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask; + Rd_GPR64 = zext(bot & tmask); +} + +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.181 LSR (immediate) page C6-1102 line 61727 MATCH x53007c00/mask=x7f807c00 +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# CONSTRUCT xd340000a/mask=xffc0000a MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst xd340000a/mask=xffc0000a --status pass +# Special case of ubfm where UInt(imms) < UInt(immr). +# Note because LSL is preferred where imms + 1 == immr, we use ImmS_LT_ImmR_minus_1 + +:ubfiz Rd_GPR64, Rn_GPR64, ubfiz_lsb64, ubfiz_width +is ImmS_LT_ImmR_minus_1=1 & ImmS_LT_ImmR=1 & sf=1 & opc=2 & b_2428=0x13 & b_2323=0 & n=1 & ImmR_bitfield64_imm & ImmS_bitfield64_imm & ImmRConst64 & ubfiz_lsb64 & ubfiz_width & Rn_GPR64 & Rd_GPR64 & DecodeWMask64 & DecodeTMask64 +{ + local wmask:8 = DecodeWMask64; + local tmask:8 = DecodeTMask64; + local src:8 = Rn_GPR64; + local bot:8 = ((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask; + Rd_GPR64 = bot & tmask; +} + +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.181 LSR (immediate) page C6-1102 line 61727 MATCH x53007c00/mask=x7f807c00 +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# C6.2.342 UXTB page C6-1386 line 76865 MATCH x53001c00/mask=xfffffc00 +# C6.2.343 UXTH page C6-1387 line 76925 MATCH x53003c00/mask=xfffffc00 +# CONSTRUCT x53000000/mask=xffe08000 MATCHED 7 DOCUMENTED OPCODES +# AUNIT --inst x53000000/mask=xffe08000 --status pass +# if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); + +:ubfm Rd_GPR32, Rn_GPR32, ImmRConst32, ImmSConst32 +is sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & ImmSConst32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 & DecodeWMask32 & DecodeTMask32 +{ + local wmask:4 = DecodeWMask32; + local tmask:4 = DecodeTMask32; + local src:4 = Rn_GPR32; + local bot:4 = ((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask; + Rd_GPR64 = zext(bot & tmask); +} + +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.181 LSR (immediate) page C6-1102 line 61727 MATCH x53007c00/mask=x7f807c00 +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# CONSTRUCT xd3400000/mask=xffc00000 MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst xd3400000/mask=xffc00000 --status pass + +:ubfm Rd_GPR64, Rn_GPR64, ImmRConst64, ImmSConst64 +is sf=1 & opc=2 & b_2428=0x13 & b_2323=0 & n=1 & ImmR_bitfield64_imm & ImmS_bitfield64_imm & ImmRConst64 & ImmSConst64 & Rn_GPR64 & Rd_GPR64 & DecodeWMask64 & DecodeTMask64 +{ + local wmask:8 = DecodeWMask64; + local tmask:8 = DecodeTMask64; + local src:8 = Rn_GPR64; + local bot:8 = ((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask; + Rd_GPR64 = bot & tmask; +} + +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.181 LSR (immediate) page C6-1102 line 61727 MATCH x53007c00/mask=x7f807c00 +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.342 UXTB page C6-1386 line 76865 MATCH x53001c00/mask=xfffffc00 +# C6.2.343 UXTH page C6-1387 line 76925 MATCH x53003c00/mask=xfffffc00 +# CONSTRUCT x53000010/mask=xffe0801a MATCHED 7 DOCUMENTED OPCODES +# AUNIT --inst x53000010/mask=xffe0801a --status pass +# Special case of ubfm as determined by BFXPreferred() +# if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); + +:ubfx Rd_GPR32, Rn_GPR32, ImmRConst32, ubfx_width +is ImmS_ne_1f=1 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & ImmRConst32 & ubfx_width & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 & DecodeWMask32 & DecodeTMask32 +{ + local wmask:4 = DecodeWMask32; + local tmask:4 = DecodeTMask32; + local src:4 = Rn_GPR32; + local bot:4 = ((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask; + Rd_GPR64 = zext(bot & tmask); +} + +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.181 LSR (immediate) page C6-1102 line 61727 MATCH x53007c00/mask=x7f807c00 +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# CONSTRUCT xd3400020/mask=xffc0002a MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst xd3400020/mask=xffc0002a --status pass + +# Special case of ubfm as determined by BFXPreferred() + +:ubfx Rd_GPR64, Rn_GPR64, ImmRConst64, ubfx_width +is ImmS_ne_3f=1 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & ImmRConst64 & ubfx_width & sf=1 & opc=2 & b_2428=0x13 & b_2323=0 & n=1 & ImmR_bitfield64_imm & ImmS_bitfield64_imm & Rn_GPR64 & Rd_GPR64 & DecodeWMask64 & DecodeTMask64 +{ + local wmask:8 = DecodeWMask64; + local tmask:8 = DecodeTMask64; + local src:8 = Rn_GPR64; + local bot:8 = ((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask; + Rd_GPR64 = bot & tmask; +} + +# C6.2.336 UDIV page C6-1378 line 76428 MATCH x1ac00800/mask=x7fe0fc00 +# CONSTRUCT x1ac00800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x1ac00800/mask=xffe0fc00 --status pass + +:udiv Rd_GPR32, Rn_GPR32, Rm_GPR32 +is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0x2 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp_1:4 = 0; + if (Rm_GPR32 == 0) goto ; + tmp_1 = Rn_GPR32 / Rm_GPR32; + + Rd_GPR64 = zext(tmp_1); +} + +# C6.2.336 UDIV page C6-1378 line 76428 MATCH x1ac00800/mask=x7fe0fc00 +# CONSTRUCT x9ac00800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x9ac00800/mask=xffe0fc00 --status pass + +:udiv Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0x2 & Rn_GPR64 & Rd_GPR64 +{ + local tmp_1:8 = 0; + if (Rm_GPR64 == 0) goto ; + tmp_1 = Rn_GPR64 / Rm_GPR64; + + Rd_GPR64 = tmp_1; +} + +# C6.2.337 UMADDL page C6-1379 line 76496 MATCH x9ba00000/mask=xffe08000 +# CONSTRUCT x9ba00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x9ba00000/mask=xffe08000 --status pass + +:umaddl Rd_GPR64, Rn_GPR32, Rm_GPR32, Ra_GPR64 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=5 & Rm_GPR32 & op.dp3_o0=0 & Ra_GPR64 & Rn_GPR32 & Rd_GPR64 +{ + tmp_3:8 = zext(Rn_GPR32); + tmp_4:8 = zext(Rm_GPR32); + tmp_2:8 = tmp_3 * tmp_4; + tmp_1:8 = Ra_GPR64 + tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.338 UMNEGL page C6-1381 line 76585 MATCH x9ba0fc00/mask=xffe0fc00 +# C6.2.339 UMSUBL page C6-1382 line 76649 MATCH x9ba08000/mask=xffe08000 +# CONSTRUCT x9ba0fc00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9ba0fc00/mask=xffe0fc00 --status pass + +:umnegl Rd_GPR64, Rn_GPR32, Rm_GPR32 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=5 & Rm_GPR32 & op.dp3_o0=1 & Ra=0x1f & Rn_GPR32 & Rd_GPR64 +{ + tmp_3:8 = zext(Rn_GPR32); + tmp_4:8 = zext(Rm_GPR32); + tmp_2:8 = tmp_3 * tmp_4; + tmp_1:8 = - tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.339 UMSUBL page C6-1382 line 76649 MATCH x9ba08000/mask=xffe08000 +# CONSTRUCT x9ba08000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x9ba08000/mask=xffe08000 --status pass + +:umsubl Rd_GPR64, Rn_GPR32, Rm_GPR32, Ra_GPR64 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=5 & Rm_GPR32 & op.dp3_o0=1 & Ra_GPR64 & Rn_GPR32 & Rd_GPR64 +{ + tmp_3:8 = zext(Rn_GPR32); + tmp_4:8 = zext(Rm_GPR32); + tmp_2:8 = tmp_3 * tmp_4; + tmp_1:8 = Ra_GPR64 - tmp_2; + Rd_GPR64 = tmp_1; +} + +# C6.2.340 UMULH page C6-1384 line 76737 MATCH x9bc00000/mask=xffe08000 +# CONSTRUCT x9bc00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst x9bc00000/mask=xffe08000 --status pass +# To enforce SHOULD BE ONE fields add: b_1014=0b11111 + +:umulh Rd_GPR64, Rn_GPR64, Rm_GPR64 +is sf=1 & op.dp3=0 & b_2428=0x1b & op.dp3_op31=6 & Rm_GPR64 & op.dp3_o0=0 & Ra & Rn_GPR64 & Rd_GPR64 +{ + local tmpq:16 = zext(Rn_GPR64) * zext(Rm_GPR64); + Rd_GPR64 = tmpq(8); +} + +# C6.2.341 UMULL page C6-1385 line 76802 MATCH x9ba07c00/mask=xffe0fc00 +# C6.2.337 UMADDL page C6-1379 line 76496 MATCH x9ba00000/mask=xffe08000 +# CONSTRUCT x9ba07c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst x9ba07c00/mask=xffe0fc00 --status pass + +:umull Rd_GPR64, Rn_GPR32, Rm_GPR32 +is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=5 & Rm_GPR32 & op.dp3_o0=0 & Ra=0x1f & Rn_GPR32 & Rd_GPR64 +{ + tmp_3:8 = zext(Rn_GPR32); + tmp_4:8 = zext(Rm_GPR32); + tmp_2:8 = tmp_3 * tmp_4; + Rd_GPR64 = tmp_2; +} + +# C6.2.342 UXTB page C6-1386 line 76865 MATCH x53001c00/mask=xfffffc00 +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# CONSTRUCT x53001c10/mask=xfffffc1e MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst x53001c10/mask=xfffffc1e --status pass + +# Alias for ubfm where immr=='000000' and imms='000111' +# These imply things about the inequalities + +:uxtb Rd_GPR32, Rn_GPR32 +is ImmR=0x0 & ImmS=0x7 & ImmS_ne_1f=1 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & ImmS_EQ_ImmR=0 & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp:4 = Rn_GPR32; + Rd_GPR64 = zext(tmp:1); +} + +# C6.2.343 UXTH page C6-1387 line 76925 MATCH x53003c00/mask=xfffffc00 +# C6.2.178 LSL (immediate) page C6-1096 line 61453 MATCH x53000000/mask=x7f800000 +# C6.2.332 UBFIZ page C6-1371 line 76071 MATCH x53000000/mask=x7f800000 +# C6.2.333 UBFM page C6-1373 line 76162 MATCH x53000000/mask=x7f800000 +# C6.2.334 UBFX page C6-1375 line 76294 MATCH x53000000/mask=x7f800000 +# CONSTRUCT x53003c10/mask=xfffffc1e MATCHED 5 DOCUMENTED OPCODES +# AUNIT --inst x53003c10/mask=xfffffc1e --status pass + +# Alias for ubfm where immr=='000000' and imms='001111' +# These imply things about the inequalities + +:uxth Rd_GPR32, Rn_GPR32 +is ImmR=0x0 & ImmS=0x0f & ImmS_ne_1f=1 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & ImmS_EQ_ImmR=0 & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 +{ + tmp:4 = Rn_GPR32; + Rd_GPR64 = zext(tmp:2); +} + +# C6.2.344 WFE page C6-1388 line 76985 MATCH xd503205f/mask=xffffffff +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503205f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503205f/mask=xffffffff --status nodest + +:wfe +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=2 & Rt=0x1f +{ + WaitForEvent(); +} + +# C6.2.345 WFI page C6-1390 line 77074 MATCH xd503207f/mask=xffffffff +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503207f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503207f/mask=xffffffff --status nodest + +:wfi +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=3 & Rt=0x1f +{ + WaitForInterrupt(); +} + +# C6.2.347 XPACD, XPACI, XPACLRI page C6-1392 line 77172 MATCH xdac143e0/mask=xfffffbe0 +# CONSTRUCT xdac147e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac147e0/mask=xffffffe0 --status noqemu +# D == 1 XPACD variant + +:xpacd Rd_GPR64 +is xpacd__PACpart & b_1131=0b110110101100000101000 & b_0509=0b11111 & b_10=1 & Rd_GPR64 +{ + build xpacd__PACpart; +} + +# C6.2.347 XPACD, XPACI, XPACLRI page C6-1392 line 77172 MATCH xdac143e0/mask=xfffffbe0 +# CONSTRUCT xdac143e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES +# AUNIT --inst xdac143e0/mask=xffffffe0 --status noqemu +# D == 0 XPACI variant + +:xpaci Rd_GPR64 +is xpaci__PACpart & b_1131=0b110110101100000101000 & b_0509=0b11111 & b_10=0 & Rd_GPR64 +{ + build xpaci__PACpart; +} + +# C6.2.347 XPACD, XPACI, XPACLRI page C6-1392 line 77172 MATCH xd50320ff/mask=xffffffff +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd50320ff/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd50320ff/mask=xffffffff --status nodest +# System variant + +:xpaclri +is xpaclri__PACpart & b_0031=0b11010101000000110010000011111111 +{ + build xpaclri__PACpart; +} + +# C6.2.348 YIELD page C6-1393 line 77243 MATCH xd503203f/mask=xffffffff +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503203f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES +# AUNIT --inst xd503203f/mask=xffffffff --status nodest + +:yield +is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=1 & Rt=0x1f +{ + Yield(); +} + +# C6.2.6 ADDG page C6-787 line 46877 MATCH KEEPWITH + +with : ShowMemTag=1 { + +# C6.2.6 ADDG page C6-783 line 44104 MATCH x91800000/mask=xffc00000 +# CONSTRUCT x91800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES + +:addg Rd_GPR64xsp, Rn_GPR64xsp, "#"^shifted_imm, "#"^b_1013 +is sf=1 & op=0 & S=0 & b_2328=0b100011 & b_22=0 & b_1621 & b_1013 & Rd_GPR64xsp & Rn_GPR64xsp +# " & b_1415=0" is not required by the spec (op3 doesn't have any requirements and is not used) +[ shifted_imm = b_1621 << $(LOG2_TAG_GRANULE); ] +{ + # we don't actually modify the target register, so Ghidra understands the pointer target is still the same. + # pseudo-ops let us do that, but it means that the decompiler can put an unintuitive value in the + # "CopyPtrTag_AddToPtrTag_Exclude" argument, e.g. "param_2 + 0x20". + uimm4:1 = b_1013; + exclude:2 = 0; + Or2BytesWithExcludedTags(exclude); + Rd_GPR64xsp = Rn_GPR64xsp + shifted_imm; + CopyPtrTag_AddToPtrTag_Exclude(Rd_GPR64xsp, Rn_GPR64xsp, uimm4, exclude); +} + +} + +# C6.2.6 ADDG page C6-787 line 44223 KEEPWITH +with : ShowMemTag=0 { + +# C6.2.6 ADDG page C6-783 line 44104 MATCH x91800000/mask=xffc00000 +# CONSTRUCT x91800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES + +:addg Rd_GPR64xsp, Rn_GPR64xsp, "#"^shifted_imm, "#"^b_1013 +is sf=1 & op=0 & S=0 & b_2328=0b100011 & b_22=0 & b_1621 & b_1013 & Rd_GPR64xsp & Rn_GPR64xsp +# " & b_1415=0" is not required by the spec (op3 doesn't have any requirements and is not used) +[ shifted_imm = b_1621 << $(LOG2_TAG_GRANULE); ] +{ + Rd_GPR64xsp = Rn_GPR64xsp + shifted_imm; +} + +} + + +# C6.2.24 AXFLAG page C6-815 line 45842 MATCH xd500405f/mask=xfffff0ff +# C6.2.194 MSR (immediate) page C6-1126 line 62879 MATCH xd500401f/mask=xfff8f01f +# CONSTRUCT xd500405f/mask=xfffff0ff MATCHED 2 DOCUMENTED OPCODES +# To enforce SHOULD BE ZERO fields add: b_0811=0b0000 +:axflag +is b_1231=0b11010101000000000100 & b_0007=0b01011111 +{ + tmpZR = ZR | OV; + tmpCY = CY & !OV; + + NG = 0; + ZR = tmpZR; + CY = tmpCY; + OV = 0; +} + +# C6.2.39 BTI page C6-838 line 46944 MATCH xd503241f/mask=xffffff3f +# C6.2.92 HINT page C6-926 line 51483 MATCH xd503201f/mask=xfffff01f +# CONSTRUCT xd503241f/mask=xffffff1f MATCHED 2 DOCUMENTED OPCODES + +:bti BTI_BTITARGETS +is BTI_BTITARGETS & b_1231=0xd5032 & b_0811=4 & b_0004=0x1f +{ + # This instruction is a valid target for jumps, calls, or both; see the BTI_BTITARGETS table. +} + +# C6.2.51 CFP page C6-861 line 48192 MATCH xd50b7380/mask=xffffffe0 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7380/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES + +:cfp "RCTX", Rt_GPR64 +is b_1931=0b1101010100001 & Op1_uimm3=3 & b_1215=7 & b_0811=3 & Op2_uimm3=4 & Rt_GPR64 +{ + ControlFlowPredictionRestrictionByContext(Rt_GPR64); +} + + +# C6.2.63 CMPP page C6-881 line 49230 MATCH xbac0001f/mask=xffe0fc1f +# C6.2.313 SUBPS page C6-1339 line 74377 MATCH xbac00000/mask=xffe0fc00 +# CONSTRUCT xbac0001f/mask=xffe0fc1f MATCHED 2 DOCUMENTED OPCODES +# CMPP: Compare Pointers +# Compare two pointer 56-bit pointer values and set flags +:cmpp Rn_GPR64xsp, Rm_GPR64xsp +is sf=1 & b_30=0 & S=1 & b_2128=0b11010110 & Rm_GPR64xsp & b_1015=0b000000 & Rd=0b11111 & Rn_GPR64xsp +{ + # out of a 64-bit value, keep the lowest 56 bits, which is 7 bytes. + # sign-extend a 7-byte value to an 8-byte value. If the boundary weren't byte-aligned, + # sext() wouldn't work so well. + tmp_2:8 = Rm_GPR64xsp; + tmp_2 = sext(tmp_2:7); # if Rm:7 is used here, the decompiler considers the Rm register an int7 for the whole function. + tmp_1:8 = Rn_GPR64xsp; + tmp_1 = sext(tmp_1:7); + subflags(tmp_1, tmp_2); + tmp_1 = tmp_1 - tmp_2; + resultflags(tmp_1); + affectflags(); +} + + +# C6.2.65 CPP page C6-884 line 49372 MATCH xd50b73e0/mask=xffffffe0 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b73e0/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES + +:cpp "RCTX", Rt_GPR64 +is b_1931=0b1101010100001 & Op1_uimm3=3 & b_1215=7 & b_0811=3 & Op2_uimm3=7 & Rt_GPR64 +{ + CachePrefetchPredictionRestrictionByContext(Rt_GPR64); +} + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087660/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES +# the new DC instruction types from ARMv8.5 + +:dc "IGVAC", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b011 & Rt_GPR64 +{ DC_IGVAC(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087680/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "IGSW", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b100 & Rt_GPR64 +{ DC_IGSW(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50876a0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "IGDVAC", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b101 & Rt_GPR64 +{ DC_IGDVAC(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50876c0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "IGDSW", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b110 & Rt_GPR64 +{ DC_IGDSW(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087a80/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CGSW", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b100 & Rt_GPR64 +{ DC_CGSW(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087ac0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CGDSW", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b110 & Rt_GPR64 +{ DC_CGDSW(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087e80/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CIGSW", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b100 & Rt_GPR64 +{ DC_CIGSW(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd5087ec0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CIGDSW", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b110 & Rt_GPR64 +{ DC_CIGDSW(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7460/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "GVA", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b0100 & b_0507=0b011 & Rt_GPR64 +{ DC_GVA(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7480/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "GZVA", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b0100 & b_0507=0b100 & Rt_GPR64 +{ DC_GZVA(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7a60/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CGVAC", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b011 & Rt_GPR64 +{ DC_CGVAC(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7aa0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CGDVAC", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b101 & Rt_GPR64 +{ DC_CGDVAC(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7c60/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CGVAP", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1100 & b_0507=0b011 & Rt_GPR64 +{ DC_CGVAP(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7ca0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CGDVAP", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1100 & b_0507=0b101 & Rt_GPR64 +{ DC_CGDVAP(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7d60/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CGVADP", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1101 & b_0507=0b011 & Rt_GPR64 +{ DC_CGVADP(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7da0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CGDVADP", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1101 & b_0507=0b101 & Rt_GPR64 +{ DC_CGDVADP(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7e60/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CIGVAC", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b011 & Rt_GPR64 +{ DC_CIGVAC(Rt_GPR64); } + +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b7ea0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES + +:dc "CIGDVAC", Rt_GPR64 +is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b101 & Rt_GPR64 +{ DC_CIGDVAC(Rt_GPR64); } + +# C6.2.83 DVP page C6-913 line 50823 MATCH xd50b73a0/mask=xffffffe0 +# C6.2.75 DC page C6-902 line 50267 MATCH xd5087000/mask=xfff8f000 +# C6.2.95 IC page C6-931 line 51782 MATCH xd5087000/mask=xfff8f000 +# C6.2.324 SYS page C6-1359 line 75462 MATCH xd5080000/mask=xfff80000 +# CONSTRUCT xd50b73a0/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES + +:dvp "RCTX", Rt_GPR64 +is b_1931=0b1101010100001 & Op1_uimm3=3 & b_1215=7 & b_0811=3 & Op2_uimm3=5 & Rt_GPR64 +{ + DataValuePredictionRestrictionByContext(Rt_GPR64); +} + + +# GMI: Tag Mask Insert +# Extracts tag from first source register (Xn) and adds as an excluded tag to list of excluded +# tags in second source register, writing the updated exclusion set to the destination register + +with : ShowMemTag=1 { + +# C6.2.91 GMI page C6-925 line 51429 MATCH x9ac01400/mask=xffe0fc00 +# CONSTRUCT x9ac01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES + +:gmi Rd_GPR64, Rn_GPR64xsp, Rm_GPR64 +is sf=1 & b_30=0 & S=0 & b_2128=0b11010110 & Rm_GPR64 & b_1015=0b000101 & Rn_GPR64xsp & Rd_GPR64 +{ + # get tag from address + #tag:8 = (Rn_GPR64xsp >> 56) & 0xf; + tag:8 = 0; + AllocationTagFromAddress(tag, Rn_GPR64xsp); + Rd_GPR64 = Rm_GPR64 | (1 << tag); +} + +} +with : ShowMemTag=0 { + +# C6.2.91 GMI page C6-925 line 51429 MATCH x9ac01400/mask=xffe0fc00 +# CONSTRUCT x9ac01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES + +:gmi Rd_GPR64, Rn_GPR64xsp, Rm_GPR64 +is sf=1 & b_30=0 & S=0 & b_2128=0b11010110 & Rm_GPR64 & b_1015=0b000101 & Rn_GPR64xsp & Rd_GPR64 +{ + # The only expected use of the output of this instruction is in "exclude" arguments, which will be totally ignored + # with ShowMemTag off anyway, so for the sake of more concise code don't set any mask bits at all. + Rd_GPR64 = Rm_GPR64; +} + +} + +# IRG: Insert Random Tag +# Generates random tag (honoring excluded tags specified in optional second source register +# and GCR_EL1.Exclude) into the address from first source register, writing the result to the +# destination register. +with : ShowMemTag=1 { + +# C6.2.96 IRG page C6-932 line 51841 MATCH x9ac01000/mask=xffe0fc00 +# CONSTRUCT x9ac01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES + +:irg Rd_GPR64xsp, Rn_GPR64xsp^OPTIONAL_XM +is sf=1 & b_30=0 & S=0 & b_2128=0b11010110 & OPTIONAL_XM & b_1015=0b000100 & Rn_GPR64xsp & Rd_GPR64xsp +{ + tmp:8 = OPTIONAL_XM; + exclude:2 = tmp:2; + Or2BytesWithExcludedTags(exclude); + Rd_GPR64xsp = Rn_GPR64xsp; + RandomizePtrTag_Exclude(Rd_GPR64xsp, exclude); +} + +} +with : ShowMemTag=0 { + +# C6.2.96 IRG page C6-932 line 51841 MATCH x9ac01000/mask=xffe0fc00 +# CONSTRUCT x9ac01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES + +:irg Rd_GPR64xsp, Rn_GPR64xsp^OPTIONAL_XM +is sf=1 & b_30=0 & S=0 & b_2128=0b11010110 & OPTIONAL_XM & b_1015=0b000100 & Rn_GPR64xsp & Rd_GPR64xsp +{ + Rd_GPR64xsp = Rn_GPR64xsp; +} + +} + + +with : ShowMemTag=1 { + +# C6.2.123 LDG page C6-983 line 54728 MATCH xd9600000/mask=xffe00c00 +# CONSTRUCT xd9600000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES + +:ldg Rt_GPR64, addr_granuleSIMM +is b_2131=0b11011001011 & addr_granuleSIMM & b_1011=0b00 & Rt_GPR64 +{ + tmp:8 = addr_granuleSIMM; + Align(tmp, $(TAG_GRANULE)); + tag:8 = LoadMemTag(tmp); + SetPtrTag(Rt_GPR64, tag); +} + +} +with : ShowMemTag=0 { + +# C6.2.123 LDG page C6-983 line 54728 MATCH xd9600000/mask=xffe00c00 +# CONSTRUCT xd9600000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES + +:ldg Rt_GPR64, addr_granuleSIMM +is b_2131=0b11011001011 & addr_granuleSIMM & b_1011=0b00 & Rt_GPR64 +{ +} + +} + + +with : ShowMemTag=1 { + +# C6.2.124 LDGM page C6-984 line 54791 MATCH xd9e00000/mask=xfffffc00 +# CONSTRUCT xd9e00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES + +:ldgm Rt_GPR64, "["^Rn_GPR64xsp^"]" +is b_1031=0b1101100111100000000000 & Rt_GPR64 & Rn_GPR64xsp +{ + sze:8 = 4 << (gmid_el1 & 0xf); # The value in parentheses (GMID_EL1.BS) varies between 2 and 6. + address:8 = Rn_GPR64xsp; + Align(address, sze); # this ensures that address will be granule-aligned, so we don't need to check it + count:8 = sze >> $(LOG2_TAG_GRANULE); + data:8 = 0:8; # output value + index:8 = (address >> $(LOG2_TAG_GRANULE)) & 0xf; + # for tmp = 0 to count-1 + tmp:8 = 0; + + tag:8 = LoadMemTag(address) & 0xf; + # The 0xf doesn't do anything to streamline the representation of this + # instruction in the decompiler, but it shows the size of a tag. + data = data | (tag << (index * 4)); + address = address + $(TAG_GRANULE); + index = index + 1; + tmp = tmp + 1; + # next tmp + if (tmp < count) goto ; + Rt_GPR64 = data; +} + +} +with : ShowMemTag=0 { + +# C6.2.124 LDGM page C6-984 line 54791 MATCH xd9e00000/mask=xfffffc00 +# CONSTRUCT xd9e00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES + +:ldgm Rt_GPR64, "["^Rn_GPR64xsp^"]" +is b_1031=0b1101100111100000000000 & Rt_GPR64 & Rn_GPR64xsp +{ + data:8 = 0:8; # output value + Rt_GPR64 = data; +} + +} + +addrGranuleIndexed_checkAlignment: addrGranuleIndexed is Rn=0b11111 & addrGranuleIndexed { export addrGranuleIndexed; } # don't check alignment if we're working with the stack, it's assumed to be 16-byte-aligned, though that is technically optional +addrGranuleIndexed_checkAlignment: addrGranuleIndexed is Rn & addrGranuleIndexed { tmp:8 = addrGranuleIndexed; RequireGranuleAlignment(tmp); export tmp; } # if the address in tmp is derived from sp, the error condition in RequireGranuleAlignment can still be an unreachable block; it doesn't seem possible to avoid the decompiler message in that case + +addrPairGranuleIndexed_checkAlignment: addrPairGranuleIndexed is Rn=0b11111 & addrPairGranuleIndexed { export addrPairGranuleIndexed; } # don't check alignment if we're working with the stack, it's assumed to be 16-byte-aligned, though that is technically optional +addrPairGranuleIndexed_checkAlignment: addrPairGranuleIndexed is Rn & addrPairGranuleIndexed { tmp:8 = addrPairGranuleIndexed; RequireGranuleAlignment(tmp); export tmp; } # if the address in tmp is derived from sp, the error condition in RequireGranuleAlignment can still be an unreachable block; it doesn't seem possible to avoid the decompiler message in that case + + +with : ShowMemTag=1 { + +# C6.2.246 ST2G page C6-1209 line 67247 MATCH xd9a00400/mask=xffe00c00 +# C6.2.246 ST2G page C6-1209 line 67247 MATCH xd9a00c00/mask=xffe00c00 +# C6.2.246 ST2G page C6-1209 line 67247 MATCH xd9a00800/mask=xffe00c00 +# CONSTRUCT xd9a00000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES + +:st2g Rt_GPR64xsp, addrGranuleIndexed_checkAlignment +is b_2131=0b11011001101 & (b_10=1 | b_11=1) & Rt_GPR64xsp & addrGranuleIndexed_checkAlignment +{ + # in case Rt == Rn, get the tag first so any updates in addrGranuleIndexed_checkAlignment don't affect it + tag:8 = 0; + AllocationTagFromAddress(tag, Rt_GPR64xsp); + + build addrGranuleIndexed_checkAlignment; + + # this instruction throws an alignment fault if address is not granule-aligned + address:8 = addrGranuleIndexed_checkAlignment; + + StoreMemTag(address, tag ); + StoreMemTag(address + $(TAG_GRANULE), tag ); +} + +} +with : ShowMemTag=0 { + +# C6.2.246 ST2G page C6-1209 line 67247 MATCH xd9a00400/mask=xffe00c00 +# C6.2.246 ST2G page C6-1209 line 67247 MATCH xd9a00c00/mask=xffe00c00 +# C6.2.246 ST2G page C6-1209 line 67247 MATCH xd9a00800/mask=xffe00c00 +# CONSTRUCT xd9a00000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES + +:st2g Rt_GPR64xsp, addrGranuleIndexed +is b_2131=0b11011001101 & (b_10=1 | b_11=1) & Rt_GPR64xsp & addrGranuleIndexed +{ + # for the sake of simplified output, omit the alignment check when ShowMemTag is off +} + +} + + +with : ShowMemTag=1 { + +# C6.2.256 STG page C6-1229 line 68246 MATCH xd9200400/mask=xffe00c00 +# C6.2.256 STG page C6-1229 line 68246 MATCH xd9200c00/mask=xffe00c00 +# C6.2.256 STG page C6-1229 line 68246 MATCH xd9200800/mask=xffe00c00 +# CONSTRUCT xd9200000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES + +:stg Rt_GPR64xsp, addrGranuleIndexed_checkAlignment +is b_2131=0b11011001001 & (b_10=1 | b_11=1) & Rt_GPR64xsp & addrGranuleIndexed_checkAlignment +{ + # in case Rt == Rn, get the tag first so any updates in addrGranuleIndexed_checkAlignment don't affect it + tag:8 = 0; + AllocationTagFromAddress(tag, Rt_GPR64xsp); + + build addrGranuleIndexed_checkAlignment; + + # this instruction throws an alignment fault if address is not granule-aligned + address:8 = addrGranuleIndexed_checkAlignment; + + StoreMemTag(address, tag ); +} + +} +with : ShowMemTag=0 { + +# C6.2.256 STG page C6-1229 line 68246 MATCH xd9200400/mask=xffe00c00 +# C6.2.256 STG page C6-1229 line 68246 MATCH xd9200c00/mask=xffe00c00 +# C6.2.256 STG page C6-1229 line 68246 MATCH xd9200800/mask=xffe00c00 +# CONSTRUCT xd9200000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES + +:stg Rt_GPR64xsp, addrGranuleIndexed +is b_2131=0b11011001001 & (b_10=1 | b_11=1) & Rt_GPR64xsp & addrGranuleIndexed +{ + # for the sake of simplified output, omit the alignment check when ShowMemTag is off +} + +} + + +with : ShowMemTag=1 { + +# C6.2.257 STGM page C6-1231 line 68376 MATCH xd9a00000/mask=xfffffc00 +# CONSTRUCT xd9a00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES + +:stgm Rt_GPR64, "["^Rn_GPR64xsp^"]" +is b_1031=0b1101100110100000000000 & Rt_GPR64 & Rn_GPR64xsp +{ + sze:8 = 4 << (gmid_el1 & 0xf); # The value of GMID_EL1.BS varies between 2 and 6. Can that be asserted somehow? + address:8 = Rn_GPR64xsp; + Align(address, sze); # this ensures that address will be granule-aligned, so we don't need to check it + count:8 = sze >> $(LOG2_TAG_GRANULE); + data:8 = Rt_GPR64; + index:8 = (address >> $(LOG2_TAG_GRANULE)) & 0xf; + # for tmp = 0 to count-1 + tmp:8 = 0; + + # This could also be done by leaving index and address constant and adding a tmp-based + # offset to them both, but that crams everything together into the StoreMemTag line in + # the decompiler and makes it harder to assign names and figure out what's going on. + # (Or at least, my opinion is that it's harder that way.) + # Also in favor of this design is that the ARM spec pseudocode describes it this way, + # so it's easier to see that this code matches the pseudocode. + tag:8 = (data >> (index * 4)) & 0xf; + StoreMemTag(address, tag ); + address = address + $(TAG_GRANULE); + index = index + 1; + tmp = tmp + 1; + # next tmp + if (tmp < count) goto ; +} + +} +with : ShowMemTag=0 { + +# C6.2.257 STGM page C6-1231 line 68376 MATCH xd9a00000/mask=xfffffc00 +# CONSTRUCT xd9a00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES + +:stgm Rt_GPR64, "["^Rn_GPR64xsp^"]" +is b_1031=0b1101100110100000000000 & Rt_GPR64 & Rn_GPR64xsp +{ +} + +} + + +with : ShowMemTag=1 { + +# C6.2.258 STGP page C6-1232 line 68448 MATCH x68800000/mask=xffc00000 +# C6.2.258 STGP page C6-1232 line 68448 MATCH x69800000/mask=xffc00000 +# C6.2.258 STGP page C6-1232 line 68448 MATCH x69000000/mask=xffc00000 +# CONSTRUCT x68000000/mask=xfe400000 MATCHED 3 DOCUMENTED OPCODES + +:stgp Rt_GPR64, Rt2_GPR64, addrPairGranuleIndexed_checkAlignment +is b_3031=0b01 & b_2529=0b10100 & (b_23=1 | b_24=1) & b_22=0 & Rt2_GPR64 & addrPairGranuleIndexed_checkAlignment & Rt_GPR64 +{ + # Read all registers before addrPairGranuleIndexed_checkAlignment takes effect, or pre-index writeback could modify their values + # (unusually, this instruction does not have unpredictable behavior in that case). + data1:8 = Rt_GPR64; + data2:8 = Rt2_GPR64; + + build addrPairGranuleIndexed_checkAlignment; + address:8 = addrPairGranuleIndexed_checkAlignment; # StoreMemTag requires granule alignment + + tag:8 = 0; + AllocationTagFromAddress(tag, address); + + # The decompiler apparently doesn't show changes to [sp+X] unless the new values + # are used in the function. However, the changes really are happening. + *address = data1; + *(address + 8) = data2; + StoreMemTag(address, tag); +} + +} +with : ShowMemTag=0 { + +# C6.2.258 STGP page C6-1232 line 68448 MATCH x68800000/mask=xffc00000 +# C6.2.258 STGP page C6-1232 line 68448 MATCH x69800000/mask=xffc00000 +# C6.2.258 STGP page C6-1232 line 68448 MATCH x69000000/mask=xffc00000 +# CONSTRUCT x68000000/mask=xfe400000 MATCHED 3 DOCUMENTED OPCODES + +:stgp Rt_GPR64, Rt2_GPR64, addrPairGranuleIndexed +is b_3031=0b01 & b_2529=0b10100 & (b_23=1 | b_24=1) & b_22=0 & Rt2_GPR64 & addrPairGranuleIndexed & Rt_GPR64 +{ + # Read all registers before addrPairGranuleIndexed takes effect, or pre-index writeback could modify their values + # (unusually, this instruction does not have unpredictable behavior in this case). + data1:8 = Rt_GPR64; + data2:8 = Rt2_GPR64; + + # for the sake of simplified output, omit the alignment check when ShowMemTag is off + build addrPairGranuleIndexed; + address:8 = addrPairGranuleIndexed; + + # The decompiler apparently doesn't show changes to [sp+X] unless the new values + # are used in the function. However, the changes really are happening. + *address = data1; + *(address + 8) = data2; +} + +} + + +with : ShowMemTag=1 { + +# C6.2.305 STZ2G page C6-1325 line 73542 MATCH xd9e00400/mask=xffe00c00 +# C6.2.305 STZ2G page C6-1325 line 73542 MATCH xd9e00c00/mask=xffe00c00 +# C6.2.305 STZ2G page C6-1325 line 73542 MATCH xd9e00800/mask=xffe00c00 +# CONSTRUCT xd9e00000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES + +:stz2g Rt_GPR64xsp, addrGranuleIndexed_checkAlignment +is b_2131=0b11011001111 & (b_10=1 | b_11=1) & addrGranuleIndexed_checkAlignment & Rt_GPR64xsp +{ + tag:8 = 0; + AllocationTagFromAddress(tag, Rt_GPR64xsp); + + # Although the zero-storage is not required to be granule-aligned, the tag-updating is, + # so effectively the entire operation must be at a granule-aligned address. + build addrGranuleIndexed_checkAlignment; + address:8 = addrGranuleIndexed_checkAlignment; + + # store two granules worth of zeros and tag it from Rt + tmp:8 = 0; + addr:8 = 0; + count:8 = $(TAG_GRANULE) * 2; + + addr = address + tmp; + *addr = 0:8; + tmp = tmp + 8; + if (tmp < count) goto ; + + StoreMemTag(address, tag); + StoreMemTag(address + $(TAG_GRANULE), tag); +} + +} +with : ShowMemTag=0 { + +# C6.2.305 STZ2G page C6-1325 line 73542 MATCH xd9e00400/mask=xffe00c00 +# C6.2.305 STZ2G page C6-1325 line 73542 MATCH xd9e00c00/mask=xffe00c00 +# C6.2.305 STZ2G page C6-1325 line 73542 MATCH xd9e00800/mask=xffe00c00 +# CONSTRUCT xd9e00000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES + +:stz2g Rt_GPR64xsp, addrGranuleIndexed +is b_2131=0b11011001111 & (b_10=1 | b_11=1) & addrGranuleIndexed & Rt_GPR64xsp +{ + # for the sake of simplified output, omit the alignment check when ShowMemTag is off + build addrGranuleIndexed; + address:8 = addrGranuleIndexed; + + # store two granules worth of zeros + tmp:8 = 0; + addr:8 = 0; + count:8 = $(TAG_GRANULE) * 2; + + addr = address + tmp; + *addr = 0:8; + tmp = tmp + 8; + if (tmp < count) goto ; +} + +} + + +with : ShowMemTag=1 { + +# C6.2.306 STZG page C6-1327 line 73679 MATCH xd9600400/mask=xffe00c00 +# C6.2.306 STZG page C6-1327 line 73679 MATCH xd9600c00/mask=xffe00c00 +# C6.2.306 STZG page C6-1327 line 73679 MATCH xd9600800/mask=xffe00c00 +# CONSTRUCT xd9600000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES + +:stzg Rt_GPR64xsp, addrGranuleIndexed_checkAlignment +is b_2131=0b11011001011 & (b_10=1 | b_11=1) & addrGranuleIndexed_checkAlignment & Rt_GPR64xsp +{ + tag:8 = 0; + AllocationTagFromAddress(tag, Rt_GPR64xsp); + + # Although the zero-storage is not required to be granule-aligned, the tag-updating is, + # so effectively the entire operation must be at a granule-aligned address. + build addrGranuleIndexed_checkAlignment; + address:8 = addrGranuleIndexed_checkAlignment; + + # store one granule worth of zeros and tag it from Rt + tmp:8 = 0; + addr:8 = 0; + count:8 = $(TAG_GRANULE); + + addr = address + tmp; + *addr = 0:8; + tmp = tmp + 8; + if (tmp < count) goto ; + + StoreMemTag(address, tag); +} + +} +with : ShowMemTag=0 { + +# C6.2.306 STZG page C6-1327 line 73679 MATCH xd9600400/mask=xffe00c00 +# C6.2.306 STZG page C6-1327 line 73679 MATCH xd9600c00/mask=xffe00c00 +# C6.2.306 STZG page C6-1327 line 73679 MATCH xd9600800/mask=xffe00c00 +# CONSTRUCT xd9600000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES + +:stzg Rt_GPR64xsp, addrGranuleIndexed +is b_2131=0b11011001011 & (b_10=1 | b_11=1) & addrGranuleIndexed & Rt_GPR64xsp +{ + # for the sake of simplified output, omit the alignment check when ShowMemTag is off + build addrGranuleIndexed; + address:8 = addrGranuleIndexed; + + # store one granule worth of zeros + tmp:8 = 0; + addr:8 = 0; + count:8 = $(TAG_GRANULE); + + addr = address + tmp; + *addr = 0:8; + tmp = tmp + 8; + if (tmp < count) goto ; +} + +} + + + +with : ShowMemTag=1 { + +# C6.2.307 STZGM page C6-1329 line 73814 MATCH xd9200000/mask=xfffffc00 +# CONSTRUCT xd9200000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES + +:stzgm Rt_GPR64, "["^Rn_GPR64xsp^"]" +is b_1031=0b1101100100100000000000 & Rt_GPR64 & Rn_GPR64xsp +{ + sze:8 = 4 << (dczid_el0 & 0xf); # the last value (DCZID_EL0.BS) can be up to 9 (for a size of 2KB) and seems + # to be hardware-dependent and unwriteable (sysreg spec doesn't show how to write it). + # minimum is probably 2, which would make the size equal to a tag granule + address:8 = Rn_GPR64xsp; + Align(address, sze); # based on the educated guess above, address is probably granule-aligned by this, so we won't check it explicitly (compare to LDGM or STGM) + count:8 = sze >> $(LOG2_TAG_GRANULE); + data:8 = Rt_GPR64; + tag:8 = data & 0xf; + # for tmp = 0 to count-1 + tmp:8 = 0; + + StoreMemTag(address, tag ); + + # store zeros to the entire granule + tmp_zero:8 = 0; + addr_zero:8 = address; + count_zero:8 = $(TAG_GRANULE); + + addr_zero = address + tmp_zero; + *addr_zero = 0:8; + tmp_zero = tmp_zero + 8; + if (tmp_zero < count_zero) goto ; + + address = address + $(TAG_GRANULE); + # next tmp + tmp = tmp + 1; + if (tmp < count) goto ; +} + +} +with : ShowMemTag=0 { + +# C6.2.307 STZGM page C6-1329 line 73814 MATCH xd9200000/mask=xfffffc00 +# CONSTRUCT xd9200000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES + +:stzgm Rt_GPR64, "["^Rn_GPR64xsp^"]" +is b_1031=0b1101100100100000000000 & Rt_GPR64 & Rn_GPR64xsp +{ + sze:8 = 4 << (dczid_el0 & 0xf); # the last value (DCZID_EL0.BS) can be up to 9 (for a size of 2KB) and seems + # to be hardware-dependent and unwriteable (sysreg spec doesn't show how to write it). + # minimum is probably 2, which would make the size equal to a tag granule + address:8 = Rn_GPR64xsp; + Align(address, sze); # based on the educated guess above, address is probably granule-aligned by this, so we won't check it explicitly (compare to LDGM or STGM) + count:8 = sze >> $(LOG2_TAG_GRANULE); + # for tmp = 0 to count-1 + tmp:8 = 0; + + + # store zeros to the entire granule + tmp_zero:8 = 0; + addr_zero:8 = address; + count_zero:8 = $(TAG_GRANULE); + + addr_zero = address + tmp_zero; + *addr_zero = 0:8; + tmp_zero = tmp_zero + 8; + if (tmp_zero < count_zero) goto ; + + address = address + $(TAG_GRANULE); + # next tmp + tmp = tmp + 1; + if (tmp < count) goto ; +} + +} + + +# To enforce SHOULD BE ZERO fields add: b_1415=0b00 + +with : ShowMemTag=1 { + +# C6.2.311 SUBG page C6-1337 line 74248 MATCH xd1800000/mask=xffc00000 +# CONSTRUCT xd1800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES + +:subg Rd_GPR64xsp, Rn_GPR64xsp, "#"^shifted_imm, "#"^b_1013 +is sf=1 & op=1 & S=0 & b_2328=0b100011 & b_22=0 & b_1621 & b_1013 & Rd_GPR64xsp & Rn_GPR64xsp +# " & b_1415=0" is not required by the spec (op3 doesn't have any requirements and is not used) +[ shifted_imm = b_1621 << $(LOG2_TAG_GRANULE); ] +{ + # we don't actually modify the target register, so Ghidra understands the pointer target is still the same. + # pseudo-ops let us do that, but it means that the decompiler can put an unintuitive value in the + # "CopyPtrTag_AddToPtrTag_Exclude" argument, e.g. "param_2 - 0x20". + uimm4:1 = b_1013; + exclude:2 = 0; + Or2BytesWithExcludedTags(exclude); + Rd_GPR64xsp = Rn_GPR64xsp - shifted_imm; + CopyPtrTag_AddToPtrTag_Exclude(Rd_GPR64xsp, Rn_GPR64xsp, uimm4, exclude); +} + +} +with : ShowMemTag=0 { + +# C6.2.311 SUBG page C6-1337 line 74248 MATCH xd1800000/mask=xffc00000 +# CONSTRUCT xd1800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES + +:subg Rd_GPR64xsp, Rn_GPR64xsp, "#"^shifted_imm, "#"^b_1013 +is sf=1 & op=1 & S=0 & b_2328=0b100011 & b_22=0 & b_1621 & b_1013 & Rd_GPR64xsp & Rn_GPR64xsp +# " & b_1415=0" is not required by the spec (op3 doesn't have any requirements and is not used) +[ shifted_imm = b_1621 << $(LOG2_TAG_GRANULE); ] +{ + Rd_GPR64xsp = Rn_GPR64xsp - shifted_imm; +} + +} + + +# Subtract Pointer [setting Flags]: +# Subtract the 56-bit address held in the second operand from the first and store the result +# in the destination register. If the destination register is XZR, then just use as a side- +# effect of being a pointer comparison (CMPP). +# C6.2.312 SUBP page C6-1338 line 74318 MATCH x9ac00000/mask=xffe0fc00 +# C6.2.313 SUBPS page C6-1339 line 74377 MATCH xbac00000/mask=xffe0fc00 +# CONSTRUCT x9ac00000/mask=xdfe0fc00 MATCHED 2 DOCUMENTED OPCODES + +:subp^SBIT_CZNO Rd_GPR64, Rn_GPR64xsp, Rm_GPR64xsp +is sf=1 & b_30=0 & S & SBIT_CZNO & b_2128=0b11010110 & b_1015=0b000000 & Rd_GPR64 & Rn_GPR64xsp & Rm_GPR64xsp +{ + # out of a 64-bit value, keep the lowest 56 bits, which is 7 bytes. + # sign-extend a 7-byte value to an 8-byte value. If the boundary weren't byte-aligned, + # sext() wouldn't work so well. + tmp_2:8 = Rm_GPR64xsp; + tmp_2 = sext(tmp_2:7); # if Rm:7 is used here, the decompiler considers the Rm register an int7 for the whole function. + tmp_1:8 = Rn_GPR64xsp; + tmp_1 = sext(tmp_1:7); + subflags(tmp_1, tmp_2); + tmp_1 = tmp_1 - tmp_2; + resultflags(tmp_1); + Rd_GPR64 = tmp_1; + build SBIT_CZNO; +} + + +# C6.2.335 UDF page C6-1377 line 76387 MATCH x00000000/mask=xffff0000 +# CONSTRUCT x00000000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# Undefined instruction +:udf b_0015 +is b_1631=0b0000000000000000 & b_0015 +{ + local excaddr:8 = inst_start; + local id:2 = b_0015; + local target:8 = UndefinedInstructionException(id, excaddr); + goto [target]; +} + + +# C6.2.346 XAFLAG page C6-1391 line 77127 MATCH xd500403f/mask=xfffff0ff +# C6.2.194 MSR (immediate) page C6-1126 line 62879 MATCH xd500401f/mask=xfff8f01f +# CONSTRUCT xd500403f/mask=xfffff0ff MATCHED 2 DOCUMENTED OPCODES + +:xaflag +is b_1231=0b11010101000000000100 & b_0007=0b00111111 +{ + tmpNG = !CY & !ZR; + tmpZR = ZR & CY; + tmpCY = CY | ZR; + tmpOV = !CY & ZR; + + NG = tmpNG; + ZR = tmpZR; + CY = tmpCY; + OV = tmpOV; +} diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64instructions.sinc b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64instructions.sinc new file mode 100644 index 00000000..fa11662c --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64instructions.sinc @@ -0,0 +1,4000 @@ +# Specification for the AARCH64 64-bit ARM instruction set +# +# See "ARM Architecture Reference Manual ARMv8, for ARMv8-A architecture profile" +# opcodes are always Little endian, although the data can be Big/Little Endian. + +# TODO Collapse SUB/SUBS forms +# TODO MSR/MRS's need to be specified with special registers, coproc +# TODO Many special case opcodes for UBFM and BFM, For example BFI + +# TODO? Floating point numbers don't display correctly as IEEE floats +# TODO? Many special case opcodes like + +# TODO When writing to 32-bit Rd32, the upper bits of the bigger 64-bit Rd64 are zero'ed +# Most pcode does this, but this needs to be carefully checked. There may be some +# that do not zero extend into Rd64, and some that do extend into Rd64 but shouldn't. +# If it's not done right (or naively) the decompiler gets confused. So +# the accepted pattern for doing this is: +# +# ... calculate and set destination register ... +# local tmps:SIZE = destination_register; +# big_register = zext(tmps); +# destination_register = big_register; + +# Note Implemented 2/2016 +# +# UBFM/SBFM/BFM is implemented +# +# When the destination is a 32-bit register, the upper 32 bits of the register must be set to 0. +# This includes the wsp stack pointer, which might clobber the upper part of an address. +# +# And when the destination is a Rd_VPR vector register but the operand size is less than 128 bits, +# and the destination is not the upper half of the register (ie, bit 30 q=0) +# then the unused remaining upper bits must be set to 0. + +@if DATA_ENDIAN == "little" +define endian=little; +@else +define endian=big; +@endif +define alignment=4; + +# Unlike the above, these are preprocessor macros. Use them with e.g. $(TAG_GRANULE) in SLEIGH statements. +@define LOG2_TAG_GRANULE "4" +@define TAG_GRANULE "16" + +# SECTION registers + +define space ram type=ram_space size=8 default; +define space register type=register_space size=4; + +# See "ABOUT THE ENDIAN IFDEFS" below for an explanation of the endian +# ifdefs + +@if DATA_ENDIAN == "little" +define register offset=0x0000 size=8 [ pc sp ]; +define register offset=0x0000 size=4 [ _ _ wsp _ ]; +@else +define register offset=0x0000 size=8 [ pc sp ]; +define register offset=0x0000 size=4 [ _ _ _ wsp ]; +@endif + +define register offset=0x0100 size=1 [ NG ZR CY OV shift_carry tmpCY tmpOV tmpNG tmpZR ]; + +define register offset=0x0200 size=4 [ glob_mask32 ]; +define register offset=0x0204 size=8 [ glob_mask64 ]; + +# address set to load/store a value from memory in/out of vectors +define register offset=0x0300 size=8 [ VecMemAddr VectorSelem ]; + +# register address to load/store a value from memory in/out of registers +define register offset=0x0310 size=4 [ VecRegAddr ]; + +# Special Purpose Registers - most of these are really 1 bit and part +# of a status register, however they all need to be consistent + +# 26 registers 0xd0 bytes + +define register offset=0x1000 size=8 +[ + spsr_el1 + elr_el1 + sp_el0 + spsel + daif + currentel + nzcv + fpcr + fpsr + dspsr_el0 + dlr_el0 + spsr_el2 + elr_el2 + sp_el1 + spsr_irq + spsr_abt + spsr_und + spsr_fiq + spsr_el3 + elr_el3 + sp_el2 + spsr_svc + spsr_hyp + uao + pan + tco +]; + +# System Registers + +# 202 registers 0x330 bytes +define register offset=0x1100 size=8 +[ + midr_el1 + mpidr_el1 + revidr_el1 + id_dfr0_el1 + id_pfr0_el1 + id_pfr1_el1 + id_afr0_el1 + id_mmfr0_el1 + id_mmfr1_el1 + id_mmfr2_el1 + id_mmfr3_el1 + id_isar0_el1 + id_isar1_el1 + id_isar2_el1 + id_isar3_el1 + id_isar4_el1 + id_isar5_el1 + mvfr0_el1 + mvfr1_el1 + mvfr2_el1 + ccsidr_el1 + id_aa64pfr0_el1 + id_aa64pfr1_el1 + id_aa64dfr0_el1 + id_aa64dfr1_el1 + id_aa64isar0_el1 + id_aa64isar1_el1 + id_aa64mmfr0_el1 + id_aa64mmfr1_el1 + id_aa64afr0_el1 + id_aa64afr1_el1 + clidr_el1 + aidr_el1 + csselr_el1 + ctr_el0 + dczid_el0 + vpidr_el2 + vmpidr_el2 + sctlr_el1 + actlr_el1 + cpacr_el1 + sctlr_el2 + actlr_el2 + hcr_el2 + mdcr_el2 + cptr_el2 + hstr_el2 + hacr_el2 + sctlr_el3 + actlr_el3 + scr_el3 + cptr_el3 + mdcr_el3 + ttbr0_el1 + ttbr1_el1 + ttbr0_el2 + ttbr0_el3 + vttbr_el2 + tcr_el1 + tcr_el2 + tcr_el3 + vtcr_el2 + afsr0_el1 + afsr1_el1 + afsr0_el2 + afsr1_el2 + afsr0_el3 + afsr1_el3 + esr_el1 + esr_el2 + esr_el3 + fpexc32_el2 + far_el1 + far_el2 + far_el3 + hpfar_el2 + par_el1 + pmintenset_el1 + pmintenclr_el1 + pmcr_el0 + pmcntenset_el0 + pmcntenclr_el0 + pmovsclr_el0 + pmswinc_el0 + pmselr_el0 + pmceid0_el0 + pmceid1_el0 + pmccntr_el0 + pmxevtyper_el0 + pmxevcntr_el0 + pmuserenr_el0 + pmovsset_el0 + pmevcntr0_el0 + pmevcntr1_el0 + pmevcntr2_el0 + pmevcntr3_el0 + pmevcntr4_el0 + pmevcntr5_el0 + pmevcntr6_el0 + pmevcntr7_el0 + pmevcntr8_el0 + pmevcntr9_el0 + pmevcntr10_el0 + pmevcntr11_el0 + pmevcntr12_el0 + pmevcntr13_el0 + pmevcntr14_el0 + pmevcntr15_el0 + pmevcntr16_el0 + pmevcntr17_el0 + pmevcntr18_el0 + pmevcntr19_el0 + pmevcntr20_el0 + pmevcntr21_el0 + pmevcntr22_el0 + pmevcntr23_el0 + pmevcntr24_el0 + pmevcntr25_el0 + pmevcntr26_el0 + pmevcntr27_el0 + pmevcntr28_el0 + pmevcntr29_el0 + pmevcntr30_el0 + pmevtyper0_el0 + pmevtyper1_el0 + pmevtyper2_el0 + pmevtyper3_el0 + pmevtyper4_el0 + pmevtyper5_el0 + pmevtyper6_el0 + pmevtyper7_el0 + pmevtyper8_el0 + pmevtyper9_el0 + pmevtyper10_el0 + pmevtyper11_el0 + pmevtyper12_el0 + pmevtyper13_el0 + pmevtyper14_el0 + pmevtyper15_el0 + pmevtyper16_el0 + pmevtyper17_el0 + pmevtyper18_el0 + pmevtyper19_el0 + pmevtyper20_el0 + pmevtyper21_el0 + pmevtyper22_el0 + pmevtyper23_el0 + pmevtyper24_el0 + pmevtyper25_el0 + pmevtyper26_el0 + pmevtyper27_el0 + pmevtyper28_el0 + pmevtyper29_el0 + pmevtyper30_el0 + pmccfiltr_el0 + mair_el1 + mair_el2 + mair_el3 + amair_el1 + amair_el2 + amair_el3 + vbar_el1 + vbar_el2 + vbar_el3 + rvbar_el1 + rvbar_el2 + rvbar_el3 + rmr_el1 + rmr_el2 + rmr_el3 + isr_el1 + contextidr_el1 + tpidr_el0 + tpidrro_el0 + tpidr_el1 + tpidr_el2 + tpidr_el3 + teecr32_el1 + cntfrq_el0 + cntpct_el0 + cntvct_el0 + cntvoff_el2 + cntkctl_el1 + cnthctl_el2 + cntp_tval_el0 + cntp_ctl_el0 + cntp_cval_el0 + cntv_tval_el0 + cntv_ctl_el0 + cntv_cval_el0 + cnthp_tval_el2 + cnthp_ctl_el2 + cnthp_cval_el2 + cntps_tval_el1 + cntps_ctl_el1 + cntps_cval_el1 + dacr32_el2 + ifsr32_el2 + teehbr32_el1 + sder32_el3 + gmid_el1 + gcr_el1 + ssbs +]; + +# bitrange definitions are [,] + +define bitrange gcr_el1.exclude=gcr_el1[0,16]; + +# Debug Registers +# 82 registers 0x290 bytes + +define register offset=0x1800 size=8 +[ + osdtrrx_el1 + mdccint_el1 + mdscr_el1 + osdtrtx_el1 + oseccr_el1 + dbgbvr0_el1 + dbgbvr1_el1 + dbgbvr2_el1 + dbgbvr3_el1 + dbgbvr4_el1 + dbgbvr5_el1 + dbgbvr6_el1 + dbgbvr7_el1 + dbgbvr8_el1 + dbgbvr9_el1 + dbgbvr10_el1 + dbgbvr11_el1 + dbgbvr12_el1 + dbgbvr13_el1 + dbgbvr14_el1 + dbgbvr15_el1 + dbgbcr0_el1 + dbgbcr1_el1 + dbgbcr2_el1 + dbgbcr3_el1 + dbgbcr4_el1 + dbgbcr5_el1 + dbgbcr6_el1 + dbgbcr7_el1 + dbgbcr8_el1 + dbgbcr9_el1 + dbgbcr10_el1 + dbgbcr11_el1 + dbgbcr12_el1 + dbgbcr13_el1 + dbgbcr14_el1 + dbgbcr15_el1 + dbgwvr0_el1 + dbgwvr1_el1 + dbgwvr2_el1 + dbgwvr3_el1 + dbgwvr4_el1 + dbgwvr5_el1 + dbgwvr6_el1 + dbgwvr7_el1 + dbgwvr8_el1 + dbgwvr9_el1 + dbgwvr10_el1 + dbgwvr11_el1 + dbgwvr12_el1 + dbgwvr13_el1 + dbgwvr14_el1 + dbgwvr15_el1 + dbgwcr0_el1 + dbgwcr1_el1 + dbgwcr2_el1 + dbgwcr3_el1 + dbgwcr4_el1 + dbgwcr5_el1 + dbgwcr6_el1 + dbgwcr7_el1 + dbgwcr8_el1 + dbgwcr9_el1 + dbgwcr10_el1 + dbgwcr11_el1 + dbgwcr12_el1 + dbgwcr13_el1 + dbgwcr14_el1 + dbgwcr15_el1 + mdrar_el1 + oslar_el1 + oslsr_el1 + osdlr_el1 + dbgprcr_el1 + dbgclaimset_el1 + dbgclaimclr_el1 + dbgauthstatus_el1 + mdccsr_el0 + dbgdtr_el0 + dbgdtrrx_el0 + dbgdtrtx_el0 + dbgvcr32_el2 +]; + +define register offset=0x3000 size=4 contextreg; + +# value loaded from memory to store in register +# or computed to store in memory +define register offset=0x3100 size=4 tmp_ldWn; +define register offset=0x3104 size=8 tmp_ldXn; +define register offset=0x310c size=4 tmp_stWn; +define register offset=0x3110 size=8 tmp_stXn; + +# General purpose and SIMD registers +# +# These will start at 0x3800 and there should be no defined registers +# after this address (this is because the size of the registers is +# potentially variable). +# +# ABOUT THE ENDIAN IFDEFS +# the *address* of the overlain registers depends on if the underlying +# memory is in big or little endian order. In little endian order, the +# LSB is byte 0, so (for example) w0 and x0 have the same address *in +# register memory*. But in big endian order, the LSB of x0 is byte 7, +# and so w0 starts at byte 4. All of that just gets at the address in +# register memory. Any time a value is loaded into a varnode and +# manipulated in sleigh code, it is always in big endian order. It is +# only byte reversed when read or written to little endian memory. All +# that means is that there are endian ifdefs for the overlain +# registers here, but that can and should be ignored when writing +# semantics. + +# General purpose registers R0-R30 (R31=zero register ZR) +# They are accessed as +# 64-bit register named X0-X30 +# 32-bit registers named W0-W30 + +define register offset=0x4000 size=8 +[ + x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 + x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 xzr +]; + +@if DATA_ENDIAN == "little" +define register offset=0x4000 size=4 +[ + w0 _ + w1 _ + w2 _ + w3 _ + w4 _ + w5 _ + w6 _ + w7 _ + w8 _ + w9 _ + w10 _ + w11 _ + w12 _ + w13 _ + w14 _ + w15 _ + w16 _ + w17 _ + w18 _ + w19 _ + w20 _ + w21 _ + w22 _ + w23 _ + w24 _ + w25 _ + w26 _ + w27 _ + w28 _ + w29 _ + w30 _ + wzr _ +]; +@else +define register offset=0x4000 size=4 +[ + _ w0 + _ w1 + _ w2 + _ w3 + _ w4 + _ w5 + _ w6 + _ w7 + _ w8 + _ w9 + _ w10 + _ w11 + _ w12 + _ w13 + _ w14 + _ w15 + _ w16 + _ w17 + _ w18 + _ w19 + _ w20 + _ w21 + _ w22 + _ w23 + _ w24 + _ w25 + _ w26 + _ w27 + _ w28 + _ w29 + _ w30 + _ wzr +]; +@endif + +# SIMD&FP registers V0-V31 at 0x5000 +# They are accessed as: +# 128-bit registers named Q0-Q31 +# 64-bit registers named D0-D31 +# 32-bit registers named S0-S31 +# 16-bit registers named H0-H31 +# 8-bit registers named B0-B31 +# a 128-bit vector of elements +# a 64-bit vector of elements +# The packing is endian dependent +# For SVE, registers Z0-Z31 can be any size that is a multiple of 128 +# up to 2048 bits, and they overlap the V0-V31 registers + +# temporary SIMD registers, needed for calculations in SIMD semantics + +define register offset=0x4800 size=32 [ TMPZ1 TMPZ2 TMPZ3 TMPZ4 TMPZ5 TMPZ6 ]; + +@if DATA_ENDIAN == "little" + +define register offset=0x4800 size=16 +[ + TMPQ1 _ + TMPQ2 _ + TMPQ3 _ + TMPQ4 _ + TMPQ5 _ + TMPQ6 _ +]; + +define register offset=0x4800 size=8 +[ + TMPD1 _ _ _ + TMPD2 _ _ _ + TMPD3 _ _ _ + TMPD4 _ _ _ + TMPD5 _ _ _ + TMPD6 _ _ _ +]; + +define register offset=0x4800 size=4 +[ + TMPS1 _ _ _ _ _ _ _ + TMPS2 _ _ _ _ _ _ _ + TMPS3 _ _ _ _ _ _ _ + TMPS4 _ _ _ _ _ _ _ + TMPS5 _ _ _ _ _ _ _ + TMPS6 _ _ _ _ _ _ _ +]; + +@else # this is DATA_ENDIAN == "big" + +define register offset=0x4800 size=16 +[ + _ TMPQ1 + _ TMPQ2 + _ TMPQ3 + _ TMPQ4 + _ TMPQ5 + _ TMPQ6 +]; + +define register offset=0x4800 size=8 +[ + _ _ _ TMPD1 + _ _ _ TMPD2 + _ _ _ TMPD3 + _ _ _ TMPD4 + _ _ _ TMPD5 + _ _ _ TMPD6 +]; + +define register offset=0x4800 size=4 +[ + _ _ _ _ _ _ _ TMPS1 + _ _ _ _ _ _ _ TMPS2 + _ _ _ _ _ _ _ TMPS3 + _ _ _ _ _ _ _ TMPS4 + _ _ _ _ _ _ _ TMPS5 + _ _ _ _ _ _ _ TMPS6 +]; + +@endif + +# The size of the simd (z) register (in bytes) can be any multiple of +# 16 from 32 to 256 bytes. There are also 16 predicate registers are +# 1/8 the size of the corresponding simd registers. + +@define SIMD_SIZE "32" +@define PRED_SIZE "4" + +# In order to "move" the overlain registers to the right place, use +# these defines to locate within the z register. The __128 is for an +# 128-bit vector overlaid in a z-register, etc. For this to work +# SIMD_SIZE must be at least 32. + +define register offset=0x5000 size=$(SIMD_SIZE) +[ + z0 z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 + z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 +]; + +define register offset=0x6000 size=$(PRED_SIZE) +[ + p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 +]; + +# define the overlaid simd registers + +@if DATA_ENDIAN == "little" + +define register offset=0x5000 size=16 +[ + q0 _ + q1 _ + q2 _ + q3 _ + q4 _ + q5 _ + q6 _ + q7 _ + q8 _ + q9 _ + q10 _ + q11 _ + q12 _ + q13 _ + q14 _ + q15 _ + q16 _ + q17 _ + q18 _ + q19 _ + q20 _ + q21 _ + q22 _ + q23 _ + q24 _ + q25 _ + q26 _ + q27 _ + q28 _ + q29 _ + q30 _ + q31 _ +]; + +define register offset=0x5000 size=8 +[ + d0 _ _ _ + d1 _ _ _ + d2 _ _ _ + d3 _ _ _ + d4 _ _ _ + d5 _ _ _ + d6 _ _ _ + d7 _ _ _ + d8 _ _ _ + d9 _ _ _ + d10 _ _ _ + d11 _ _ _ + d12 _ _ _ + d13 _ _ _ + d14 _ _ _ + d15 _ _ _ + d16 _ _ _ + d17 _ _ _ + d18 _ _ _ + d19 _ _ _ + d20 _ _ _ + d21 _ _ _ + d22 _ _ _ + d23 _ _ _ + d24 _ _ _ + d25 _ _ _ + d26 _ _ _ + d27 _ _ _ + d28 _ _ _ + d29 _ _ _ + d30 _ _ _ + d31 _ _ _ +]; + +define register offset=0x5000 size=4 +[ + s0 _ _ _ _ _ _ _ + s1 _ _ _ _ _ _ _ + s2 _ _ _ _ _ _ _ + s3 _ _ _ _ _ _ _ + s4 _ _ _ _ _ _ _ + s5 _ _ _ _ _ _ _ + s6 _ _ _ _ _ _ _ + s7 _ _ _ _ _ _ _ + s8 _ _ _ _ _ _ _ + s9 _ _ _ _ _ _ _ + s10 _ _ _ _ _ _ _ + s11 _ _ _ _ _ _ _ + s12 _ _ _ _ _ _ _ + s13 _ _ _ _ _ _ _ + s14 _ _ _ _ _ _ _ + s15 _ _ _ _ _ _ _ + s16 _ _ _ _ _ _ _ + s17 _ _ _ _ _ _ _ + s18 _ _ _ _ _ _ _ + s19 _ _ _ _ _ _ _ + s20 _ _ _ _ _ _ _ + s21 _ _ _ _ _ _ _ + s22 _ _ _ _ _ _ _ + s23 _ _ _ _ _ _ _ + s24 _ _ _ _ _ _ _ + s25 _ _ _ _ _ _ _ + s26 _ _ _ _ _ _ _ + s27 _ _ _ _ _ _ _ + s28 _ _ _ _ _ _ _ + s29 _ _ _ _ _ _ _ + s30 _ _ _ _ _ _ _ + s31 _ _ _ _ _ _ _ +]; + +define register offset=0x5000 size=2 +[ + h0 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h1 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h2 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h3 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h4 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h5 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h6 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h7 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h8 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h9 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h10 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h11 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h12 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h13 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h14 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h15 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h16 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h17 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h18 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h19 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h20 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h21 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h22 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h23 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h24 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h25 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h26 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h27 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h28 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h29 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h30 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + h31 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +]; + +define register offset=0x5000 size=1 +[ + b0 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b1 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b2 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b3 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b4 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b5 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b6 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b7 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b8 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b9 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b10 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b11 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b12 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b13 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b14 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b15 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b16 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b17 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b18 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b19 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b20 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b21 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b22 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b23 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b24 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b25 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b26 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b27 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b28 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b29 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b30 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + b31 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +]; + +@else # this is DATA_ENDIAN == "big" + +define register offset=0x5000 size=16 +[ + _ q0 + _ q1 + _ q2 + _ q3 + _ q4 + _ q5 + _ q6 + _ q7 + _ q8 + _ q9 + _ q10 + _ q11 + _ q12 + _ q13 + _ q14 + _ q15 + _ q16 + _ q17 + _ q18 + _ q19 + _ q20 + _ q21 + _ q22 + _ q23 + _ q24 + _ q25 + _ q26 + _ q27 + _ q28 + _ q29 + _ q30 + _ q31 +]; + +define register offset=0x5000 size=8 +[ + _ _ _ d0 + _ _ _ d1 + _ _ _ d2 + _ _ _ d3 + _ _ _ d4 + _ _ _ d5 + _ _ _ d6 + _ _ _ d7 + _ _ _ d8 + _ _ _ d9 + _ _ _ d10 + _ _ _ d11 + _ _ _ d12 + _ _ _ d13 + _ _ _ d14 + _ _ _ d15 + _ _ _ d16 + _ _ _ d17 + _ _ _ d18 + _ _ _ d19 + _ _ _ d20 + _ _ _ d21 + _ _ _ d22 + _ _ _ d23 + _ _ _ d24 + _ _ _ d25 + _ _ _ d26 + _ _ _ d27 + _ _ _ d28 + _ _ _ d29 + _ _ _ d30 + _ _ _ d31 +]; + +define register offset=0x5000 size=4 +[ + _ _ _ _ _ _ _ s0 + _ _ _ _ _ _ _ s1 + _ _ _ _ _ _ _ s2 + _ _ _ _ _ _ _ s3 + _ _ _ _ _ _ _ s4 + _ _ _ _ _ _ _ s5 + _ _ _ _ _ _ _ s6 + _ _ _ _ _ _ _ s7 + _ _ _ _ _ _ _ s8 + _ _ _ _ _ _ _ s9 + _ _ _ _ _ _ _ s10 + _ _ _ _ _ _ _ s11 + _ _ _ _ _ _ _ s12 + _ _ _ _ _ _ _ s13 + _ _ _ _ _ _ _ s14 + _ _ _ _ _ _ _ s15 + _ _ _ _ _ _ _ s16 + _ _ _ _ _ _ _ s17 + _ _ _ _ _ _ _ s18 + _ _ _ _ _ _ _ s19 + _ _ _ _ _ _ _ s20 + _ _ _ _ _ _ _ s21 + _ _ _ _ _ _ _ s22 + _ _ _ _ _ _ _ s23 + _ _ _ _ _ _ _ s24 + _ _ _ _ _ _ _ s25 + _ _ _ _ _ _ _ s26 + _ _ _ _ _ _ _ s27 + _ _ _ _ _ _ _ s28 + _ _ _ _ _ _ _ s29 + _ _ _ _ _ _ _ s30 + _ _ _ _ _ _ _ s31 +]; + +define register offset=0x5000 size=2 +[ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h0 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h1 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h2 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h3 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h4 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h5 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h6 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h7 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h8 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h9 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h10 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h11 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h12 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h13 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h14 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h15 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h16 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h17 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h18 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h19 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h20 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h21 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h22 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h23 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h24 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h25 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h26 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h27 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h28 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h29 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h30 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h31 +]; + +define register offset=0x5000 size=1 +[ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b0 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b1 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b2 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b3 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b4 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b5 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b6 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b7 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b8 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b9 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b10 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b11 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b12 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b13 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b14 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b15 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b16 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b17 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b18 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b19 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b20 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b21 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b22 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b23 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b24 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b25 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b26 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b27 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b28 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b29 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b30 + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b31 +]; +@endif + +# SECTION token fields and context variables + +# "noflow" limits register changes to a single instruction (or a highlighted region) rather than following control flow. +# This allows the select-clear-SetRegister-disassemble procedure to be done without affecting +# any instructions other than those that are selected. +define context contextreg + ImmS_ImmR_TestSet = (0,0) + ImmS_LT_ImmR = (1,1) + ImmS_EQ_ImmR = (2,2) + ImmS_LT_ImmR_minus_1 = (3,3) + ImmS_ne_1f = (4,4) + ImmS_ne_3f = (5,5) + ShowPAC = (21,21) noflow + PAC_clobber = (22,22) noflow + ShowBTI = (23,23) noflow + ShowMemTag = (24,24) noflow +; + +define token instrAARCH64 (32) endian = little + + Rm = (16,20) + Rn = (5,9) + Rd = (0,4) + Rt = (0,4) + Ra = (10,14) + Rt2 = (10,14) + + Rm_FPR8 = (16,20) + Rn_FPR8 = (5,9) + Rd_FPR8 = (0,4) + Rd_FPR8_2 = (0,4) + Rt_FPR8 = (0,4) + Rm_FPR16 = (16,20) + Rn_FPR16 = (5,9) + Rd_FPR16 = (0,4) + Rd_FPR16_2 = (0,4) + Rt_FPR16 = (0,4) + Ra_FPR16 = (10,14) + Rm_FPR32 = (16,20) + Rn_FPR32 = (5,9) + Rd_FPR32 = (0,4) + Rd_FPR32_2 = (0,4) + Ra_FPR32 = (10,14) + Rm_FPR64 = (16,20) + Rn_FPR64 = (5,9) + Rd_FPR64 = (0,4) + Rd_FPR64_2 = (0,4) + Rt_FPR64 = (0,4) + Rt_FPR32 = (0,4) + Ra_FPR64 = (10,14) + Rt2_FPR128 = (10,14) + Rt2_FPR32 = (10,14) + Rt2_FPR64 = (10,14) + Ra_VPR128 = (10,14) + + Rm_VPR64 = (16,20) + Rn_VPR64 = (5,9) + Rd_VPR64 = (0,4) + + Re_VPR128 = (16,20) + Re_VPR128Lo = (16,19) + Rm_VPR128 = (16,20) + Rm_VPR128Lo = (16,19) + Rn_VPR128 = (5,9) + Rnn_VPR128 = (5,9) + Rnnn_VPR128 = (5,9) + Rnnnn_VPR128 = (5,9) + Rd_VPR128 = (0,4) + Rt_VPR128 = (0,4) + Rtt_VPR128 = (0,4) + Rttt_VPR128 = (0,4) + Rtttt_VPR128 = (0,4) + Rt_VPR64 = (0,4) + Rtt_VPR64 = (0,4) + Rttt_VPR64 = (0,4) + Rtttt_VPR64 = (0,4) + Rt_FPR128 = (0,4) + vRm_VPR64 = (16,20) + vRm_VPR128Lo = (16,19) + vRe_VPR128 = (16,20) + vRe_VPR128Lo = (16,19) + vRn_VPR64 = (5,9) + vRd_VPR64 = (0,4) + vRm_VPR128 = (16,20) + vRn_VPR128 = (5,9) + vRnn_VPR128 = (5,9) + vRnnn_VPR128 = (5,9) + vRnnnn_VPR128 = (5,9) + vRd_VPR128 = (0,4) + vRa_VPR128 = (10,14) + + Vt = (0,4) + Vtt = (0,4) + Vttt = (0,4) + Vtttt = (0,4) + + vVt = (0,4) + vVtt = (0,4) + vVttt = (0,4) + vVtttt = (0,4) + + aa_Xm = (16,20) + aa_Xn = (5,9) + aa_Xd = (0,4) + aa_Xs = (16,20) + aa_Xss = (16,20) + aa_Xt = (0,4) + aa_Xtt = (0,4) + aa_Xa = (10,14) + aa_Wm = (16,20) + aa_Wn = (5,9) + aa_Wd = (0,4) + aa_Ws = (16,20) + aa_Wss = (16,20) + aa_Wt = (0,4) + aa_Wtt = (0,4) + aa_Wa = (10,14) + aa_Wa2 = (10,14) + aa_CRm = (8,11) + + br_cond_op = (0,3) + cond_op = (12,15) + + aa_prefetch = (0,4) + + aa_hw = (21,22) + + aa_extreg_imm3 = (10,12) + aa_extreg_shift = (22,23) + aa_extreg_option = (13,15) + + imm6 = (10,15) + aa_imm7 = (15,21) + imm12 = (10,21) + imm16 = (5,20) + + simm7 = (15,21) signed + simm9 = (12,20) signed + simm14 = (5,18) signed + simm19 = (5,23) signed + simm26 = (0,25) signed + + immlo = (29,30) + immhi = (5,23) signed + + # Arbitrary bit fields + + b_00 = (0,0) + b_01 = (1,1) + b_02 = (2,2) + b_03 = (3,3) + b_04 = (4,4) + b_05 = (5,5) + b_06 = (6,6) + b_07 = (7,7) + b_08 = (8,8) + b_09 = (9,9) + b_10 = (10,10) + b_11 = (11,11) + b_12 = (12,12) + b_13 = (13,13) + b_14 = (14,14) + b_15 = (15,15) + b_16 = (16,16) + b_17 = (17,17) + b_18 = (18,18) + b_19 = (19,19) + b_20 = (20,20) + b_21 = (21,21) + b_22 = (22,22) + b_23 = (23,23) + b_24 = (24,24) + b_25 = (25,25) + b_26 = (26,26) + b_27 = (27,27) + b_28 = (28,28) + b_29 = (29,29) + b_30 = (30,30) + b_31 = (31,31) + + b_0001 = (0,1) + b_0003 = (0,3) + b_0004 = (0,4) + b_0006 = (0,6) + b_0007 = (0,7) + b_0009 = (0,9) + b_0011 = (0,11) + b_0015 = (0,15) + b_0027 = (0,27) + b_0031 = (0,31) + b_0102 = (1,2) + b_0103 = (1,3) + b_0204 = (2,4) + b_0304 = (3,4) + b_0405 = (4,5) + b_0406 = (4,6) + b_0407 = (4,7) + b_0409 = (4,9) + b_0411 = (4,11) + b_0427 = (4,27) + b_0431 = (4,31) + b_0506 = (5,6) + b_0507 = (5,7) + b_0508 = (5,8) + b_0509 = (5,9) + b_0510 = (5,10) + b_0515 = (5,15) + b_0607 = (6,7) + b_0609 = (6,9) + b_0610 = (6,10) + b_0611 = (6,11) + b_0708 = (7,8) + b_0709 = (7,9) + b_0710 = (7,10) + b_0711 = (7,11) + b_0809 = (8,9) + b_0810 = (8,10) + b_0811 = (8,11) + b_0910 = (9,10) + b_0911 = (9,11) + b_0916 = (9,16) + b_1010 = (10,10) + b_1011 = (10,11) + b_1012 = (10,12) + b_1013 = (10,13) + b_1014 = (10,14) + b_1015 = (10,15) + b_1021 = (10,21) + b_1022 = (10,22) + b_1028 = (10,28) + b_1029 = (10,29) + b_1031 = (10,31) + b_1111 = (11,11) + b_1112 = (11,12) + b_1113 = (11,13) + b_1114 = (11,14) + b_1115 = (11,15) + b_1116 = (11,16) + b_1131 = (11,31) + b_1212 = (12,12) + b_1213 = (12,13) + b_1214 = (12,14) + b_1215 = (12,15) + b_1216 = (12,16) + b_1217 = (12,17) + b_1220 = (12,20) + b_1223 = (12,23) + b_1229 = (12,29) + b_1230 = (12,30) + b_1231 = (12,31) + b_1313 = (13,13) + b_1314 = (13,14) + b_1315 = (13,15) + b_1317 = (13,17) + b_1321 = (13,21) + b_1322 = (13,22) + b_1414 = (14,14) + b_1417 = (14,17) + b_1415 = (14,15) + b_1431 = (14,31) + b_1515 = (15,15) + b_1517 = (15,17) + b_1520 = (15,20) + b_1531 = (15,31) + b_1616 = (16,16) + b_1617 = (16,17) + b_1618 = (16,18) + b_1619 = (16,19) + b_1620 = (16,20) + b_1621 = (16,21) + b_1623 = (16,23) + b_1627 = (16,27) + b_1629 = (16,29) + b_1631 = (16,31) + b_1718 = (17,18) + b_1719 = (17,19) + b_1720 = (17,20) + b_1721 = (17,21) + b_1722 = (17,22) + b_1818 = (18,18) + b_1819 = (18,19) + b_1820 = (18,20) + b_1821 = (18,21) + b_1920 = (19,20) + b_1921 = (19,21) + b_1922 = (19,22) + b_1923 = (19,23) + b_1928 = (19,28) + b_1929 = (19,29) + b_1931 = (19,31) + b_2020 = (20,20) + b_2021 = (20,21) + b_2022 = (20,22) + b_2023 = (20,23) + b_2024 = (20,24) + b_2027 = (20,27) + b_2121 = (21,21) + b_2122 = (21,22) + b_2123 = (21,23) + b_2124 = (21,24) + b_2125 = (21,25) + b_2127 = (21,27) + b_2128 = (21,28) + b_2129 = (21,29) + b_2130 = (21,30) + b_2131 = (21,31) + b_2222 = (22,22) + b_2223 = (22,23) + b_2224 = (22,24) + b_2225 = (22,25) + b_2229 = (22,29) + b_2231 = (22,31) + b_2323 = (23,23) + b_2324 = (23,24) + b_2325 = (23,25) + b_2327 = (23,27) + b_2328 = (23,28) + b_2329 = (23,29) + b_2331 = (23,31) + b_2425 = (24,25) + b_2427 = (24,27) + b_2428 = (24,28) + b_2429 = (24,29) + b_2430 = (24,30) + b_2431 = (24,31) + b_2525 = (25,25) + b_2527 = (25,27) + b_2529 = (25,29) + b_2530 = (25,30) + b_2531 = (25,31) + b_2627 = (26,27) + b_2629 = (26,29) + b_2630 = (26,30) + b_2631 = (26,31) + b_2729 = (27,29) + b_2929 = (29,29) + b_2930 = (29,30) + b_2931 = (29,31) + b_3030 = (30,30) + b_3031 = (30,31) + b_3131 = (31,31) + + cmpr_op = (24,24) + sf = (31,31) + + imm_neon_uimm1 = (20,20) + imm_neon_uimm2 = (19,20) + imm_neon_uimm3 = (18,20) + imm_neon_uimm4 = (17,20) + immN_neon_uimm1 = (14,14) + immN_neon_uimm2 = (13,14) + immN_neon_uimm3 = (12,14) + immN_neon_uimm4 = (11,14) + + fpOpcode = (16,18) + fpDpOpcode = (15,20) + + CRm_CRx = (8,11) + CRm_32 = (10,11) + CRm_10 = (8,9) + CRm_dbarrier_op = (8,11) + CRm_isb_op = (8,11) + + CRn = (12,15) + CRm = (8,11) + CRn_CRx = (12,15) + + Imm4 = (11,13) + + # C2.2.3 Modified immediate constants in A64 instructions page C2-158 + + Imm8_fmov_sign = (20,20) # a + Imm8_fmov_exph = (19,19) # b + Imm8_fmov_expl = (17,18) # cd + Imm8_fmov_frac = (13,16) # efgh + + ImmN = (22,22) + ImmR = (16,21) + ImmS = (10,15) + Imm_imm0_63 = (16,21) + + n_uimm8L = (5,9) + n_uimm8H = (16,18) + + Imm_uimm3 = (16,18) + Imm_uimm4 = (16,19) + Imm_uimm5 = (16,20) + Imm_uimm5_31 = (31,31) + Imm_uimm6 = (31,31) + + L = (22,22) + + N = (21,21) + + Op0 = (19,20) + Op1 = (16,18) + Op1_uimm3 = (16,18) + Op2 = (5,7) + Op2_uimm3 = (5,7) + Q = (30,30) + S = (29,29) + + Scale = (10,15) + + excCode = (21,23) + excCode2 = (2,4) + + imm7Low = (5,11) + + cmode = (12,15) + imm4 = (11,14) + imm5 = (5,9) + l = (21,21) + ll = (0,1) + m = (31,31) + mode = (19,20) + n = (22,22) + o0 = (4,4) + o1 = (24,24) + o2 = (10,10) + + o3 = (4,4) + op = (30,30) + + fpccmp.op = (4,4) + fpcmp.op = (14,15) + + op2 = (16,20) + + op3 = (10,15) + op4 = (0,4) + opc = (29,30) + opc.indexmode = (10,11) + + op.dp3 = (29,30) + op.dp3_o0 = (15,15) + op.dp3_op31 = (21,23) + op.dp3_op54 = (29,30) + + opcode2 = (10,15) + dp1.opcode2 = (16,20) + fpcmp.opcode2 = (0,4) + opt = (22,23) + option = (13,15) + optionlo = (13,13) + q = (30,30) + rmode = (19,20) + s = (29,29) + + size.ldstr = (30,31) + + shift = (22,23) + advSIMD3.size = (22,23) + size.neon = (10,11) + + size_high = (23,23) + ftype = (22,23) + u = (29,29) + v = (26,26) + + # SVE tokens + + Zd = (0,4) + Zt = (0,4) + Ztt = (0,4) + Zttt = (0,4) + Ztttt = (0,4) + Ze = (16,20) + Zm = (16,20) + Zn = (5,9) + Zt2 = (10,14) + + sve_b_00 = (0,0) + sve_b_0001 = (0,1) + sve_b_01 = (1,1) + sve_b_02 = (2,2) + sve_b_03 = (3,3) + sve_b_04 = (4,4) + sve_b_0409 = (4,9) + sve_b_0609 = (6,9) + sve_b_09 = (9,9) + sve_b_10 = (10,10) + sve_b_1015 = (10,15) + sve_b_1019 = (10,19) + sve_b_1021 = (10,21) + sve_b_11 = (11,11) + sve_b_1112 = (11,12) + sve_b_1115 = (11,15) + sve_b_12 = (12,12) + sve_b_1215 = (12,15) + sve_b_13 = (13,13) + sve_b_1315 = (13,15) + sve_b_1321 = (13,21) + sve_b_14 = (14,14) + sve_b_1415 = (14,15) + sve_b_1416 = (14,16) + sve_b_1419 = (14,19) + sve_b_1421 = (14,21) + sve_b_15 = (15,15) + sve_b_16 = (16,16) + sve_b_17 = (17,17) + sve_b_1718 = (17,18) + sve_b_1719 = (17,19) + sve_b_1720 = (17,20) + sve_b_1721 = (17,21) + sve_b_1731 = (17,31) + sve_b_18 = (18,18) + sve_b_1821 = (18,21) + sve_b_1831 = (18,31) + sve_b_1921 = (19,21) + sve_b_20 = (20,20) + sve_b_2021 = (20,21) + sve_b_2022 = (20,22) + sve_b_21 = (21,21) + sve_b_2122 = (21,22) + sve_b_2131 = (21,31) + sve_b_22 = (22,22) + sve_b_2224 = (22,24) + sve_b_2231 = (22,31) + sve_b_23 = (23,23) + sve_b_2331 = (23,31) + sve_b_24 = (24,24) + sve_b_2429 = (24,29) + sve_b_2431 = (24,31) + sve_b_2531 = (25,31) + sve_b_3031 = (30,31) + sve_float_dec = (5,8) + sve_float_exp = (9,11) + sve_i1_05 = (5,5) + sve_i1_20 = (20,20) + sve_i2_1920 = (19,20) + sve_i3h_22 = (22,22) + sve_i3l_1920 = (19,20) + sve_imm13_0517 = (5,17) + sve_imm2_2223 = (22,23) + sve_imm3_0507 = (5,7) + sve_imm3_1618 = (16,18) + sve_imm4_1619 = (16,19) + sve_imm4s_1619 = (16,19) signed + sve_imm5_0509 = (5,9) + sve_imm5s_0509 = (5,9) signed + sve_imm5_1620 = (16,20) + sve_imm5s_1620 = (16,20) signed + sve_imm5b_1620 = (16,20) signed + sve_imm6_0510 = (5,10) + sve_imm6s_0510 = (5,10) signed + sve_imm6_1621 = (16,21) + sve_imm6s_1621 = (16,21) signed + sve_imm7_1420 = (14,20) + sve_imm8_0512 = (5,12) + sve_imm8s_0512 = (5,12) signed + sve_imm8h_1620 = (16,20) + sve_imm8l_1012 = (10,12) + sve_imm9h_1621 = (16,21) + sve_imm9hs_1621 = (16,21) signed + sve_imm9l_1012 = (10,12) + sve_m_04 = (4,4) + sve_m_14 = (14,14) + sve_m_16 = (16,16) + sve_msz_1011 = (10,11) + sve_pattern_0509 = (5,9) + sve_pd_0003 = (0,3) + sve_pdm_0003 = (0,3) + sve_pdn_0003 = (0,3) + sve_pg_0508 = (5,8) + sve_pg_1012 = (10,12) + sve_pg_1013 = (10,13) + sve_pg_1619 = (16,19) + sve_pm_1619 = (16,19) + sve_pn_0508 = (5,8) + sve_prfop_0003 = (0,3) + sve_pt_0003 = (0,3) + sve_rd_0004 = (0,4) + sve_rdn_0004 = (0,4) + sve_rm_0509 = (5,9) + sve_rm_1620 = (16,20) + sve_rn_0509 = (5,9) + sve_rn_1620 = (16,20) + sve_rot_1011 = (10,11) + sve_rot_1314 = (13,14) + sve_rot_16 = (16,16) + sve_s_22 = (22,22) + sve_sf_12 = (12,12) + sve_sh_13 = (13,13) + sve_size_2122 = (21,22) + sve_size_2223 = (22,23) + sve_sz_22 = (22,22) + sve_tsz_1620 = (16,20) + sve_tszh_2223 = (22,23) + sve_tszl_0809 = (8,9) + sve_tszl_1920 = (19,20) + sve_vd_0004 = (0,4) + sve_vdn_0004 = (0,4) + sve_vm_0509 = (5,9) + sve_vn_0509 = (5,9) + sve_xs_14 = (14,14) + sve_xs_22 = (22,22) + sve_za_0509 = (5,9) + sve_za_1620 = (16,20) + sve_zd_0004 = (0,4) + sve_zda_0004 = (0,4) + sve_zdn_0004 = (0,4) + sve_zm_0509 = (5,9) + sve_zm_1618 = (16,18) + sve_zm_1619 = (16,19) + sve_zm_1620 = (16,20) + sve_zn_0509 = (5,9) + sve_zt_0004 = (0,4) + sve_ztt_0004 = (0,4) + sve_zttt_0004 = (0,4) + sve_ztttt_0004 = (0,4) +; + +# SECTION variables and variable names + +attach variables [ Zd Ze Zm Zn Zt Zt2 ] +[ + z0 z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 + z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 +]; + +attach variables [ Ztt ] +[ + z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 + z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 +]; + +attach variables [ Zttt ] +[ + z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 + z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 z1 +]; + +attach variables [ Ztttt ] +[ + z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 + z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 z1 z2 +]; + +attach variables [ aa_Xn aa_Xm aa_Xs aa_Xd aa_Xt aa_Xa ] +[ + x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 + x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 xzr +]; + +attach variables [ aa_Xss aa_Xtt ] +[ + x1 _ x3 _ x5 _ x7 _ x9 _ x11 _ x13 _ x15 + _ x17 _ x19 _ x21 _ x23 _ x25 _ x27 _ x29 _ xzr _ +]; + +attach variables [ aa_Wn aa_Wm aa_Ws aa_Wd aa_Wt aa_Wa ] +[ + w0 w1 w2 w3 w4 w5 w6 w7 w8 w9 w10 w11 w12 w13 w14 w15 + w16 w17 w18 w19 w20 w21 w22 w23 w24 w25 w26 w27 w28 w29 w30 wzr +]; + +attach variables [ aa_Wss aa_Wtt ] +[ + w1 _ w3 _ w5 _ w7 _ w9 _ w11 _ w13 _ w15 + _ w17 _ w19 _ w21 _ w23 _ w25 _ w27 _ w29 _ wzr _ +]; + +attach variables [ Rm_VPR128 Rn_VPR128 Rd_VPR128 Rt_VPR128 Rt2_FPR128 Re_VPR128 Rt_FPR128 Ra_VPR128 ] +[ + q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 + q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 +]; + +attach variables [ Rnn_VPR128 Rtt_VPR128 ] +[ + q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 + q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 + q0 +]; + +attach variables [ Rnnn_VPR128 Rttt_VPR128 ] +[ + q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 + q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 + q0 q1 +]; + +attach variables [ Rnnnn_VPR128 Rtttt_VPR128 ] +[ + q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 + q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 + q0 q1 q2 +]; + +attach names [ vRm_VPR128 vRn_VPR128 vRd_VPR128 vRe_VPR128 vRa_VPR128 ] +[ + v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 + v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 +]; + +attach names [ vRnn_VPR128 ] +[ + v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 + v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 + v0 +]; + +attach names [ vRnnn_VPR128 ] +[ + v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 + v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 + v0 v1 +]; + +attach names [ vRnnnn_VPR128 ] +[ + v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 + v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 + v0 v1 v2 +]; + +attach variables [ Rm_VPR128Lo Re_VPR128Lo ] [ q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 ]; + +attach names [ vRm_VPR128Lo vRe_VPR128Lo ] [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 ]; + +attach variables [ Rm_VPR64 Rn_VPR64 Rd_VPR64 Rt_VPR64 ] +[ + d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 + d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 +]; + +attach variables [ Rtt_VPR64 ] +[ + d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 + d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 d0 +]; + +attach variables [ Rttt_VPR64 ] +[ + d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 + d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 d0 d1 +]; + +attach variables [ Rtttt_VPR64 ] +[ + d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 + d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 d0 d1 d2 +]; + +attach names [ vRm_VPR64 vRn_VPR64 vRd_VPR64 ] +[ + v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 + v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 +]; + +attach variables [ Rm_FPR64 Rn_FPR64 Rd_FPR64 Rd_FPR64_2 Rt2_FPR64 Ra_FPR64 Rt_FPR64 ] +[ + d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 + d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 +]; + +attach variables [ Rm_FPR32 Rn_FPR32 Rd_FPR32 Rd_FPR32_2 Rt2_FPR32 Ra_FPR32 Rt_FPR32 ] +[ + s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 + s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 s27 s28 s29 s30 s31 +]; + +attach variables [ Rm_FPR16 Rn_FPR16 Rd_FPR16 Rd_FPR16_2 Rt_FPR16 Ra_FPR16 ] +[ + h0 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 h14 h15 + h16 h17 h18 h19 h20 h21 h22 h23 h24 h25 h26 h27 h28 h29 h30 h31 +]; + +attach variables [ Rm_FPR8 Rn_FPR8 Rd_FPR8 Rd_FPR8_2 Rt_FPR8 ] +[ + b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 + b16 b17 b18 b19 b20 b21 b22 b23 b24 b25 b26 b27 b28 b29 b30 b31 +]; + +attach variables [ Vt ] +[ + q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 + q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 +]; + +attach variables [ Vtt ] +[ + q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 + q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 q0 +]; + +attach variables [ Vttt ] +[ + q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 + q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 q0 q1 +]; + +attach variables [ Vtttt ] +[ + q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 + q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 q0 q1 q2 +]; + +attach names [ vVt ] +[ + v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 + v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 +]; + +attach names [ vVtt ] +[ + v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 + v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 v0 +]; + +attach names [ vVttt ] +[ + v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 + v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 v0 v1 +]; + +attach names [ vVtttt ] +[ + v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 + v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 v0 v1 v2 +]; + +attach names [ aa_prefetch ] +[ + PLDL1KEEP PLDL1STRM PLDL2KEEP PLDL2STRM PLDL3KEEP PLDL3STRM P_0x06 P_0x07 + PLIL1KEEP PLIL1STRM PLIL2KEEP PLIL2STRM PLIL3KEEP PLIL3STRM P_0x0e P_0x0f + PSTL1KEEP PSTL1STRM PSTL2KEEP PSTL2STRM PSTL3KEEP PSTL3STRM + P_0x16 P_0x17 P_0x18 P_0x19 P_0x1a P_0x1b P_0x1c P_0x1d P_0x1e P_0x1f +]; + +attach names [ CRm_dbarrier_op ] [ _ OSHLD OSHST OSH _ NSHLD NSHST NSH _ ISHLD ISHST ISH _ LD ST SY ]; + +# SVE registers and names + +attach variables [ sve_zm_1618 ] +[ + z0 z1 z2 z3 z4 z5 z6 z7 +]; + +attach variables [ sve_zm_1619 ] +[ + z0 z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 +]; + +attach variables [ sve_za_0509 sve_za_1620 sve_zd_0004 sve_zda_0004 sve_zdn_0004 sve_zm_0509 sve_zm_1620 sve_zn_0509 sve_zt_0004 ] +[ + z0 z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 + z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 +]; + +attach variables [ sve_ztt_0004 ] +[ + z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 + z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 +]; + +attach variables [ sve_zttt_0004 ] +[ + z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 + z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 z1 +]; + +attach variables [ sve_ztttt_0004 ] +[ + z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 + z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 z1 z2 +]; + +attach variables [ sve_pg_1012 ] +[ + p0 p1 p2 p3 p4 p5 p6 p7 +]; + +attach variables [ sve_pd_0003 sve_pdm_0003 sve_pdn_0003 sve_pg_0508 sve_pg_1013 sve_pg_1619 sve_pm_1619 sve_pn_0508 sve_pt_0003 ] +[ + p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 +]; + +attach names [ sve_sz_22 ] [ b h ]; +attach names [ sve_msz_1011 ] [ "" " #1" " #2" " #3" ]; +attach names [ sve_rot_16 ] [ "#90" "#270" ]; +attach names [ sve_rot_1314 ] [ "#0" "#90" "#180" "#270" ]; +attach names [ sve_rot_1011 ] [ "#0" "#90" "#180" "#270" ]; + +# SECTION subtables + +Rm_GPR32: aa_Wm is aa_Wm { export aa_Wm; } +Rm_GPR32: wzr is aa_Wm=31 & wzr { tmp:4 = 0; export tmp; } + +Rd_GPR32: aa_Wd is aa_Wd { export aa_Wd; } +Rd_GPR32: wzr is aa_Wd=31 & wzr { tmp:4 = 0; export tmp; } +Rd_GPR32_2: aa_Wd is aa_Wd { export aa_Wd; } +Rd_GPR32_2: wzr is aa_Wd=31 & wzr { tmp:4 = 0; export tmp; } + +Rd_GPR32xsp: aa_Wd is aa_Wd { export aa_Wd; } +Rd_GPR32xsp: wsp is aa_Wd=31 & wsp { export wsp; } + +Rd_GPR32wsp: Rd_GPR32xsp is Rd_GPR32xsp { export Rd_GPR32xsp; } + +Rn_GPR32: aa_Wn is aa_Wn { export aa_Wn; } +Rn_GPR32: wzr is aa_Wn=31 & wzr { tmp:4 = 0; export tmp; } + +Ra_GPR32: aa_Wa is aa_Wa { export aa_Wa; } +Ra_GPR32: wzr is aa_Wa=31 & wzr { tmp:4 = 0; export tmp; } + +Rt2_GPR32: aa_Wa is aa_Wa { export aa_Wa; } +Rt2_GPR32: wzr is aa_Wa=31 & wzr { tmp:4 = 0; export tmp; } + +Rn_GPR32xsp: aa_Wn is aa_Wn { export aa_Wn; } +Rn_GPR32xsp: wsp is aa_Wn=31 & wsp { export wsp; } + +Rn_GPR32wsp: aa_Wn is aa_Wn { export aa_Wn; } +Rn_GPR32wsp: wsp is aa_Wn=31 & wsp { export wsp; } + +Rt_GPR32: aa_Wt is aa_Wt { export aa_Wt; } +Rt_GPR32: wzr is aa_Wt=31 & wzr { tmp:4 = 0; export tmp; } + +Rm_GPR64: aa_Xm is aa_Xm { export aa_Xm; } +Rm_GPR64: xzr is aa_Xm=31 & xzr { export 0:8; } + +Rd_GPR64: aa_Xd is aa_Xd { export aa_Xd; } +Rd_GPR64: xzr is aa_Xd=31 & xzr { tmp:8 = 0; export tmp; } +Rd_GPR64_2: aa_Xd is aa_Xd { export aa_Xd; } +Rd_GPR64_2: xzr is aa_Xd=31 & xzr { tmp:8 = 0; export tmp; } + +Ra_GPR64: aa_Xa is aa_Xa { export aa_Xa; } +Ra_GPR64: xzr is aa_Xa=31 & xzr { tmp:8 = 0; export tmp; } + +Rt2_GPR64: aa_Xa is aa_Xa { export aa_Xa; } +Rt2_GPR64: xzr is aa_Xa=31 & xzr { tmp:8 = 0; export tmp; } + +Rd_GPR64xsp: aa_Xd is aa_Xd { export aa_Xd; } +Rd_GPR64xsp: sp is aa_Xd=31 & sp { export sp; } + +Rn_GPR64: aa_Xn is aa_Xn { export aa_Xn; } +Rn_GPR64: xzr is aa_Xn=31 & xzr { tmp:8 = 0; export tmp; } + +Rt_GPR64: aa_Xt is aa_Xt { export aa_Xt; } +Rt_GPR64: xzr is aa_Xt=31 & xzr { tmp:8 = 0; export tmp; } + +Rn_GPR64xsp: aa_Xn is aa_Xn { export aa_Xn; } +Rn_GPR64xsp: sp is aa_Xn=31 & sp { export sp; } + +Rm_GPR64xsp: aa_Xm is aa_Xm { export aa_Xm; } +Rm_GPR64xsp: sp is aa_Xm=31 & sp { export sp; } + +Rt_GPR64xsp: aa_Xt is aa_Xt { export aa_Xt; } +Rt_GPR64xsp: sp is aa_Xt=31 & sp { export sp; } + +Rs_GPR32: Rm_GPR32 is Rm_GPR32 { export Rm_GPR32; } +Rs_GPR64: Rm_GPR64 is Rm_GPR64 { export Rm_GPR64; } + +Rm_fpz16: "#0.0" is Rm { tmp:2 = int2float(0:2); export tmp; } +Rm_fpz32: "#0.0" is Rm { tmp:4 = int2float(0:4); export tmp; } +Rm_fpz64: "#0.0" is Rm { tmp:8 = int2float(0:8); export tmp; } + +Rd_VPR128.16B: vRd_VPR128^".16B" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } +Rd_VPR128.8H: vRd_VPR128^".8H" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } +Rd_VPR128.4S: vRd_VPR128^".4S" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } +Rd_VPR128.2S: vRd_VPR128^".2S" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } +Rd_VPR128.2D: vRd_VPR128^".2D" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } +Rd_VPR128.1Q: vRd_VPR128^".1Q" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } + +Rn_VPR128.16B: vRn_VPR128^".16B" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } +Rnn_VPR128.16B: vRnn_VPR128^".16B" is Rnn_VPR128 & vRnn_VPR128 { export Rnn_VPR128; } +Rnnn_VPR128.16B: vRnnn_VPR128^".16B" is Rnnn_VPR128 & vRnnn_VPR128 { export Rnnn_VPR128; } +Rnnnn_VPR128.16B: vRnnnn_VPR128^".16B" is Rnnnn_VPR128 & vRnnnn_VPR128 { export Rnnnn_VPR128; } + +Rn_VPR128.8B: vRn_VPR128^".8B" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } +Rn_VPR128.8H: vRn_VPR128^".8H" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } +Rn_VPR128.4S: vRn_VPR128^".4S" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } +Rn_VPR128.4H: vRn_VPR128^".4H" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } +Rn_VPR128.2D: vRn_VPR128^".2D" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } + +Rm_VPR128.8B: vRm_VPR128^".8B" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } +Rm_VPR128.16B: vRm_VPR128^".16B" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } +Rm_VPR128.8H: vRm_VPR128^".8H" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } +Rm_VPR128.4S: vRm_VPR128^".4S" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } +Rm_VPR128.4H: vRm_VPR128^".4H" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } +Rm_VPR128.2D: vRm_VPR128^".2D" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } + +Ra_VPR128.16B: vRa_VPR128^".16B" is Ra_VPR128 & vRa_VPR128 { export Ra_VPR128; } +# Ra_VPR128.8H: vRa_VPR128^".8H" is Ra_VPR128 & vRa_VPR128 { export Ra_VPR128; } +Ra_VPR128.4S: vRa_VPR128^".4S" is Ra_VPR128 & vRa_VPR128 { export Ra_VPR128; } +# Ra_VPR128.2D: vRa_VPR128^".2D" is Ra_VPR128 & vRa_VPR128 { export Ra_VPR128; } +# Ra_VPR128.1Q: vRa_VPR128^".1Q" is Ra_VPR128 & vRa_VPR128 { export Ra_VPR128; } + +Rd_VPR64.8B: vRd_VPR64^".8B" is Rd_VPR64 & vRd_VPR64 { export Rd_VPR64; } +Rd_VPR64.4H: vRd_VPR64^".4H" is Rd_VPR64 & vRd_VPR64 { export Rd_VPR64; } +Rd_VPR64.2S: vRd_VPR64^".2S" is Rd_VPR64 & vRd_VPR64 { export Rd_VPR64; } +Rd_VPR64.1D: vRd_VPR64^".1D" is Rd_VPR64 & vRd_VPR64 { export Rd_VPR64; } + +Rn_VPR64.8B: vRn_VPR64^".8B" is Rn_VPR64 & vRn_VPR64 { export Rn_VPR64; } +Rn_VPR64.4H: vRn_VPR64^".4H" is Rn_VPR64 & vRn_VPR64 { export Rn_VPR64; } +Rn_VPR64.2S: vRn_VPR64^".2S" is Rn_VPR64 & vRn_VPR64 { export Rn_VPR64; } +Rn_VPR64.1D: vRn_VPR64^".1D" is Rn_VPR64 & vRn_VPR64 { export Rn_VPR64; } + +Rm_VPR64.8B: vRm_VPR64^".8B" is Rm_VPR64 & vRm_VPR64 { export Rm_VPR64; } +Rm_VPR64.4H: vRm_VPR64^".4H" is Rm_VPR64 & vRm_VPR64 { export Rm_VPR64; } +Rm_VPR64.2S: vRm_VPR64^".2S" is Rm_VPR64 & vRm_VPR64 { export Rm_VPR64; } +Rm_VPR64.1D: vRm_VPR64^".1D" is Rm_VPR64 & vRm_VPR64 { export Rm_VPR64; } + +Rd_VPR128.B: vRd_VPR128^".B" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } +Rd_VPR128.H: vRd_VPR128^".H" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } +Rd_VPR128.S: vRd_VPR128^".S" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } +Rd_VPR128.D: vRd_VPR128^".D" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } + +Rn_VPR128.B: vRn_VPR128^".B" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } +Rn_VPR128.H: vRn_VPR128^".H" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } +Rn_VPR128.S: vRn_VPR128^".S" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } +Rn_VPR128.D: vRn_VPR128^".D" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } + +Re_VPR128.B: vRe_VPR128^".B" is Re_VPR128 & vRe_VPR128 { export Re_VPR128; } +Re_VPR128.H: vRe_VPR128^".H" is Re_VPR128 & vRe_VPR128 { export Re_VPR128; } +Re_VPR128.S: vRe_VPR128^".S" is Re_VPR128 & vRe_VPR128 { export Re_VPR128; } +Re_VPR128.D: vRe_VPR128^".D" is Re_VPR128 & vRe_VPR128 { export Re_VPR128; } + +Re_VPR128Lo.H: vRe_VPR128Lo^".H" is Re_VPR128Lo & vRe_VPR128Lo { export Re_VPR128Lo; } + +br_cc_op: "eq" is br_cond_op=0 { export ZR; } +br_cc_op: "ne" is br_cond_op=1 { tmp:1 = !ZR; export tmp; } +br_cc_op: "cs" is br_cond_op=2 { export CY; } +br_cc_op: "cc" is br_cond_op=3 { tmp:1 = !CY; export tmp; } +br_cc_op: "mi" is br_cond_op=4 { export NG; } +br_cc_op: "pl" is br_cond_op=5 { tmp:1 = !NG; export tmp; } +br_cc_op: "vs" is br_cond_op=6 { export OV; } +br_cc_op: "vc" is br_cond_op=7 { tmp:1 = !OV; export tmp; } +br_cc_op: "hi" is br_cond_op=8 { tmp:1 = CY && (!ZR); export tmp; } +br_cc_op: "ls" is br_cond_op=9 { tmp:1 = (!CY) || ZR; export tmp; } +br_cc_op: "ge" is br_cond_op=10 { tmp:1 = (NG==OV); export tmp; } +br_cc_op: "lt" is br_cond_op=11 { tmp:1 = (NG!=OV); export tmp; } +br_cc_op: "gt" is br_cond_op=12 { tmp:1 = (!ZR) && (NG==OV); export tmp; } +br_cc_op: "le" is br_cond_op=13 { tmp:1 = ZR || (NG!=OV); export tmp; } +br_cc_op: "al" is br_cond_op=14 { export 1:1; } +br_cc_op: "nv" is br_cond_op=15 { export 1:1; } + +BranchCondOp: br_cc_op is br_cc_op { export br_cc_op; } + +cc_op: "eq" is cond_op=0 { export ZR; } +cc_op: "ne" is cond_op=1 { tmp:1 = !ZR; export tmp; } +cc_op: "cs" is cond_op=2 { export CY; } +cc_op: "cc" is cond_op=3 { tmp:1 = !CY; export tmp; } +cc_op: "mi" is cond_op=4 { export NG; } +cc_op: "pl" is cond_op=5 { tmp:1 = !NG; export tmp; } +cc_op: "vs" is cond_op=6 { export OV; } +cc_op: "vc" is cond_op=7 { tmp:1 = !OV; export tmp; } +cc_op: "hi" is cond_op=8 { tmp:1 = CY && (!ZR); export tmp; } +cc_op: "ls" is cond_op=9 { tmp:1 = (!CY) || ZR; export tmp; } +cc_op: "ge" is cond_op=10 { tmp:1 = (NG==OV); export tmp; } +cc_op: "lt" is cond_op=11 { tmp:1 = (NG!=OV); export tmp; } +cc_op: "gt" is cond_op=12 { tmp:1 = (!ZR) && (NG==OV); export tmp; } +cc_op: "le" is cond_op=13 { tmp:1 = ZR || (NG!=OV); export tmp; } +cc_op: "al" is cond_op=14 { export 1:1; } +cc_op: "nv" is cond_op=15 { export 1:1; } + +CondOp: cc_op is cc_op { export cc_op; } + +inv_cc_op: "eq" is cond_op=1 { export ZR; } +inv_cc_op: "ne" is cond_op=0 { tmp:1 = !ZR; export tmp; } +inv_cc_op: "cs" is cond_op=3 { export CY; } +inv_cc_op: "cc" is cond_op=2 { tmp:1 = !CY; export tmp; } +inv_cc_op: "mi" is cond_op=5 { export NG; } +inv_cc_op: "pl" is cond_op=4 { tmp:1 = !NG; export tmp; } +inv_cc_op: "vs" is cond_op=7 { export OV; } +inv_cc_op: "vc" is cond_op=6 { tmp:1 = !OV; export tmp; } +inv_cc_op: "hi" is cond_op=9 { tmp:1 = CY && (!ZR); export tmp; } +inv_cc_op: "ls" is cond_op=8 { tmp:1 = (!CY) || ZR; export tmp; } +inv_cc_op: "ge" is cond_op=11 { tmp:1 = (NG==OV); export tmp; } +inv_cc_op: "lt" is cond_op=10 { tmp:1 = (NG!=OV); export tmp; } +inv_cc_op: "gt" is cond_op=13 { tmp:1 = (!ZR) && (NG==OV); export tmp; } +inv_cc_op: "le" is cond_op=12 { tmp:1 = ZR || (NG!=OV); export tmp; } +inv_cc_op: "al" is cond_op=15 { export 1:1; } +inv_cc_op: "nv" is cond_op=14 { export 1:1; } + +InvCondOp: inv_cc_op is inv_cc_op { export inv_cc_op; } + +SBIT_CZNO: is b_29=0 { } # Do nothing to the flag bits +SBIT_CZNO: "s" is b_29=1 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } + +Imm_uimm_exact8: "#"^value is aa_extreg_shift [ value = 8 << aa_extreg_shift; ] { export *[const]:4 value; } +Imm_uimm_exact16: "#"^value is aa_extreg_shift [ value = 8 << aa_extreg_shift; ] { export *[const]:4 value; } +Imm_uimm_exact32: "#"^value is aa_extreg_shift [ value = 8 << aa_extreg_shift; ] { export *[const]:4 value; } + +Imm_shr_imm8: "#"^val is b_1922 & b_1618 [ val = (8*2) - (b_1922 << 3 | b_1618); ] { export *[const]:4 val; } +Imm_shr_imm16: "#"^val is b_1922 & b_1618 [ val = (16*2) - (b_1922 << 3 | b_1618); ] { export *[const]:4 val; } +Imm_shr_imm32: "#"^val is b_1922 & b_1618 [ val = (32*2) - (b_1922 << 3 | b_1618); ] { export *[const]:4 val; } +Imm_shr_imm64: "#"^val is b_1922 & b_1618 [ val = (64*2) - (b_1922 << 3 | b_1618); ] { export *[const]:4 val; } + +NZCVImm_uimm4: "#"^b_0003 is b_0003 { export *[const]:1 b_0003; } +UImm5: "#"^b_1620 is b_1620 { export *[const]:4 b_1620; } +UImm6: "#"^b_1520 is b_1520 { export *[const]:4 b_1520; } + +CRm_uimm4: "#"^b_0811 is b_0811 { export *[const]:1 b_0811; } + +CRm_uimm4_def15: "#"^b_0811 is b_0811 { export *[const]:1 b_0811; } +CRm_uimm4_def15: is b_0811=0xf { export 15:1; } + +LSB_bitfield32_imm: "#"^imm6 is b_1515=0 & imm6 { export *[const]:8 imm6; } +LSB_bitfield64_imm: "#"^imm6 is imm6 { export *[const]:8 imm6; } + +LSB_bitfield32_imm_shift: "#"^shift is b_1515=0 & imm6 [ shift = 31 - imm6; ] { export *[const]:4 shift; } +LSB_bitfield64_imm_shift: "#"^shift is imm6 [ shift = 63 - imm6; ] { export *[const]:8 shift; } + +AddrLoc14: reloc is simm14 [ reloc = inst_start + (4*simm14); ] { export *[const]:8 reloc; } + +AddrLoc19: reloc is simm19 [ reloc = inst_start + (4*simm19); ] { export *[const]:8 reloc; } + +AddrLoc26: reloc is simm26 [ reloc = inst_start + (4*simm26); ] { export *[const]:8 reloc; } + +Addr14: AddrLoc14 is AddrLoc14 { export *:8 AddrLoc14; } + +Addr19: AddrLoc19 is AddrLoc19 { export *:8 AddrLoc19; } + +Addr26: AddrLoc26 is AddrLoc26 { export *:8 AddrLoc26; } + +AdrReloff: reloff is b_31=1 & immlo & immhi [ reloff = ((inst_start) & ~0xfff) + ( ((immhi << 2) | immlo) << 12 ); ] { export *[const]:8 reloff; } +AdrReloff: reloff is b_31=0 & immlo & immhi [ reloff = (inst_start) + ( ((immhi << 2) | immlo) ); ] { export *[const]:8 reloff; } + +ImmShift32: "#"^imm12 is aa_extreg_shift=0 & imm12 { export *[const]:4 imm12; } +ImmShift32: "#"^imm12, "LSL #12" is aa_extreg_shift=1 & imm12 { tmp:4 = imm12 << 12; export tmp; } + +ImmShift64: "#"^imm12 is aa_extreg_shift=0 & imm12 { export *[const]:8 imm12; } +ImmShift64: "#"^imm12, "LSL #12" is aa_extreg_shift=1 & imm12 { tmp:8 = imm12 << 12; export tmp; } + +# TODO some instructions can't do ROR operation on immediate! + +RegShift32: Rm_GPR32, "LSL #"^imm6 is Rm_GPR32 & aa_extreg_shift = 0 & imm6 & b_1515=0 { tmp:4 = Rm_GPR32 << imm6; export tmp; } +RegShift32: Rm_GPR32 is Rm_GPR32 & aa_extreg_shift = 0 & imm6=0 { export Rm_GPR32; } +RegShift32: Rm_GPR32, "LSR #"^imm6 is Rm_GPR32 & aa_extreg_shift = 1 & imm6 & b_1515=0 { tmp:4 = Rm_GPR32 >> imm6; export tmp; } +RegShift32: Rm_GPR32, "ASR #"^imm6 is Rm_GPR32 & aa_extreg_shift = 2 & imm6 & b_1515=0 { tmp:4 = Rm_GPR32 s>> imm6; export tmp; } + +RegShift32Log: RegShift32 is aa_extreg_shift & RegShift32 { export RegShift32; } +RegShift32Log: Rm_GPR32, "ROR #"^imm6 is aa_extreg_shift=3 & Rm_GPR32 & imm6 & b_1515=0 { tmp:4 = (Rm_GPR32 >> imm6) | (Rm_GPR32 << (32 - imm6)); export tmp; } + +RegShift64: Rm_GPR64, "LSL #"^imm6 is Rm_GPR64 & aa_extreg_shift = 0 & imm6 { tmp:8 = Rm_GPR64 << imm6; export tmp; } +RegShift64: Rm_GPR64 is Rm_GPR64 & aa_extreg_shift = 0 & imm6=0 { export Rm_GPR64; } +RegShift64: Rm_GPR64, "LSR #"^imm6 is Rm_GPR64 & aa_extreg_shift = 1 & imm6 { tmp:8 = Rm_GPR64 >> imm6; export tmp; } +RegShift64: Rm_GPR64, "ASR #"^imm6 is Rm_GPR64 & aa_extreg_shift = 2 & imm6 { tmp:8 = Rm_GPR64 s>> imm6; export tmp; } + +RegShift64Log: RegShift64 is aa_extreg_shift & RegShift64 & aa_Xn & aa_Xm & imm6 { export RegShift64; } +RegShift64Log: Rm_GPR64, "ROR #"^imm6 is aa_extreg_shift=3 & Rm_GPR64 & aa_Xn & aa_Xm & imm6 { tmp:8 = (Rm_GPR64 >> imm6) | (Rm_GPR64 << (64 - imm6)); export tmp; } + +RegShiftVal32: " #"^b_1012 is aa_extreg_imm3=1 & b_1012 { export 1:4; } +RegShiftVal32: " #"^b_1012 is aa_extreg_imm3=2 & b_1012 { export 2:4; } +RegShiftVal32: " #"^b_1012 is aa_extreg_imm3=3 & b_1012 { export 3:4; } +RegShiftVal32: " #"^b_1012 is aa_extreg_imm3=4 & b_1012 { export 4:4; } +RegShiftVal32: "" is aa_extreg_imm3=0 { export 0:4; } + +LSL_Sp_Special32: Rm_GPR32, "LSL " is Rm_GPR32 & aa_extreg_imm3 { export Rm_GPR32; } +LSL_Sp_Special32: Rm_GPR32 is Rm_GPR32 & aa_extreg_imm3=0 { export Rm_GPR32; } + +ExtendReg32: Rm_GPR32, "UXTB " is Rm_GPR32 & b_2121=1 & aa_extreg_option=0 { tmp0:4 = Rm_GPR32; tmp:4 = zext(tmp0:1); export tmp; } +ExtendReg32: Rm_GPR32, "UXTH " is Rm_GPR32 & b_2121=1 & aa_extreg_option=1 { tmp0:4 = Rm_GPR32; tmp:4 = zext(tmp0:2); export tmp; } +ExtendReg32: LSL_Sp_Special32 is Rm_GPR32 & b_2121=1 & aa_extreg_option=2 & b_29=1 & (Rn=0x1f) & LSL_Sp_Special32 { export Rm_GPR32; } +ExtendReg32: LSL_Sp_Special32 is Rm_GPR32 & b_2121=1 & aa_extreg_option=2 & b_29=0 & (Rd=0x1f | Rn=0x1f) & LSL_Sp_Special32 { export Rm_GPR32; } +ExtendReg32: Rm_GPR32, "UXTW " is Rm_GPR32 & b_2121=1 & aa_extreg_option=2 { tmp:4 = Rm_GPR32; export tmp; } +ExtendReg32: Rm_GPR32, "UXTX " is Rm_GPR32 & b_2121=1 & aa_extreg_option=3 { tmp:4 = Rm_GPR32; export tmp; } +ExtendReg32: Rm_GPR32, "SXTB " is Rm_GPR32 & b_2121=1 & aa_extreg_option=4 { tmp0:4 = Rm_GPR32; tmp:4 = sext(tmp0:1); export tmp; } +ExtendReg32: Rm_GPR32, "SXTH " is Rm_GPR32 & b_2121=1 & aa_extreg_option=5 { tmp0:4 = Rm_GPR32; tmp:4 = sext(tmp0:2); export tmp; } + +ExtendReg32: Rm_GPR32, "SXTW " is Rm_GPR32 & b_2121=1 & aa_extreg_option=6 { tmp:4 = Rm_GPR32; export tmp; } +ExtendReg32: Rm_GPR32, "SXTX " is Rm_GPR32 & b_2121=1 & aa_extreg_option=7 { tmp:4 = Rm_GPR32; export tmp; } + +ExtendRegShift32: ExtendReg32^RegShiftVal32 is aa_extreg_shift = 0 & aa_extreg_option & aa_extreg_imm3 & ExtendReg32 & RegShiftVal32 { tmp:4 = ExtendReg32; tmp = tmp << RegShiftVal32; export tmp; } +ExtendRegShift32: ExtendReg32 is Rm_GPR32 & aa_extreg_shift = 0 & aa_extreg_option=2 & aa_extreg_imm3=0 & ExtendReg32 & RegShiftVal32 { export Rm_GPR32; } +ExtendRegShift32: ExtendReg32 is Rm_GPR32 & aa_extreg_shift = 0 & aa_extreg_option=3 & aa_extreg_imm3=0 & ExtendReg32 & RegShiftVal32 { export Rm_GPR32; } +ExtendRegShift32: ExtendReg32 is Rm_GPR32 & aa_extreg_shift = 0 & aa_extreg_option=6 & aa_extreg_imm3=0 & ExtendReg32 & RegShiftVal32 { export Rm_GPR32; } +ExtendRegShift32: ExtendReg32 is Rm_GPR32 & aa_extreg_shift = 0 & aa_extreg_option=7 & aa_extreg_imm3=0 & ExtendReg32 & RegShiftVal32 { export Rm_GPR32; } + +Imm12_addsubimm_operand_i32_negimm_lsl0: ImmShift32 is ImmShift32 { export ImmShift32; } +Imm12_addsubimm_operand_i32_negimm_lsl12: ImmShift32 is ImmShift32 { export ImmShift32; } +Imm12_addsubimm_operand_i32_posimm_lsl0: ImmShift32 is ImmShift32 { export ImmShift32; } +Imm12_addsubimm_operand_i32_posimm_lsl12: ImmShift32 is ImmShift32 { export ImmShift32; } +Imm12_addsubimm_operand_i64_negimm_lsl0: ImmShift64 is ImmShift64 { export ImmShift64; } +Imm12_addsubimm_operand_i64_negimm_lsl12: ImmShift64 is ImmShift64 { export ImmShift64; } +Imm12_addsubimm_operand_i64_posimm_lsl0: ImmShift64 is ImmShift64 { export ImmShift64; } +Imm12_addsubimm_operand_i64_posimm_lsl12: ImmShift64 is ImmShift64 { export ImmShift64; } + +RegShiftVal64: " #"^b_1012 is aa_extreg_imm3=1 & b_1012 { export 1:8; } +RegShiftVal64: " #"^b_1012 is aa_extreg_imm3=2 & b_1012 { export 2:8; } +RegShiftVal64: " #"^b_1012 is aa_extreg_imm3=3 & b_1012 { export 3:8; } +RegShiftVal64: " #"^b_1012 is aa_extreg_imm3=4 & b_1012 { export 4:8; } +RegShiftVal64: "" is aa_extreg_imm3=0 { export 0:8; } + +LSL_Sp_Special64: Rm_GPR64, "LSL " is Rm_GPR64 & aa_extreg_imm3 { export Rm_GPR64; } +LSL_Sp_Special64: Rm_GPR64 is Rm_GPR64 & aa_extreg_imm3=0 { export Rm_GPR64; } + +ExtendReg64: Rm_GPR32, "UXTB " is Rm_GPR32 & b_2121=1 & aa_extreg_option=0 { tmp0:4 = Rm_GPR32; tmp:8 = zext(tmp0:1); export tmp; } +ExtendReg64: Rm_GPR32, "UXTH " is Rm_GPR32 & b_2121=1 & aa_extreg_option=1 { tmp0:4 = Rm_GPR32; tmp:8 = zext(tmp0:2); export tmp; } +ExtendReg64: Rm_GPR32, "UXTW " is Rm_GPR32 & b_2121=1 & aa_extreg_option=2 { tmp:8 = zext(Rm_GPR32); export tmp; } +ExtendReg64: LSL_Sp_Special64 is Rm_GPR64 & b_2121=1 & aa_extreg_option=3 & b_29=1 & b_25=1 & (Rn=0x1f) & LSL_Sp_Special64 { tmp:8 = Rm_GPR64; export tmp; } +ExtendReg64: LSL_Sp_Special64 is Rm_GPR64 & b_2121=1 & aa_extreg_option=3 & b_29=0 & b_25=1 & (Rd=0x1f | Rn=0x1f) & LSL_Sp_Special64 { tmp:8 = Rm_GPR64; export tmp; } +ExtendReg64: Rm_GPR64, "LSL " is Rm_GPR64 & b_2121=1 & aa_extreg_option=3 & b_29 & b_25=0 { tmp:8 = Rm_GPR64; export tmp; } +ExtendReg64: Rm_GPR64, "UXTX " is Rm_GPR64 & b_2121=1 & aa_extreg_option=3 { tmp:8 = Rm_GPR64; export tmp; } +ExtendReg64: Rm_GPR32, "SXTB " is Rm_GPR32 & b_2121=1 & aa_extreg_option=4 { tmp0:4 = Rm_GPR32; tmp:8 = sext(tmp0:1); export tmp; } +ExtendReg64: Rm_GPR32, "SXTH " is Rm_GPR32 & b_2121=1 & aa_extreg_option=5 { tmp0:4 = Rm_GPR32; tmp:8 = sext(tmp0:2); export tmp; } +ExtendReg64: Rm_GPR32, "SXTW " is Rm_GPR32 & b_2121=1 & aa_extreg_option=6 { tmp:8 = sext(Rm_GPR32); export tmp; } +ExtendReg64: Rm_GPR64, "SXTX " is Rm_GPR64 & b_2121=1 & aa_extreg_option=7 { tmp:8 = Rm_GPR64; export tmp; } + +ExtendRegShift64: ExtendReg64^RegShiftVal64 +is aa_extreg_shift = 0 & aa_extreg_option & aa_extreg_imm3 & ExtendReg64 & RegShiftVal64 +{ + build ExtendReg64; + build RegShiftVal64; + tmp:8 = ExtendReg64; + tmp = tmp << RegShiftVal64; + export tmp; +} + +ExtendRegShift64: ExtendReg64 is Rm_GPR64 & aa_extreg_shift = 0 & aa_extreg_option=3 & aa_extreg_imm3=0 & ExtendReg64 & RegShiftVal64 { export Rm_GPR64; } +ExtendRegShift64: ExtendReg64 is Rm_GPR64 & aa_extreg_shift = 0 & aa_extreg_option=7 & aa_extreg_imm3=0 & ExtendReg64 & RegShiftVal64 { export Rm_GPR64; } + +UnscPriv: "u" is b_1011=0 { } +UnscPriv: "t" is b_1011=2 { } + +# Simple register load or store +addrReg: "["^Rn_GPR64xsp^"]" is Rn_GPR64xsp { export Rn_GPR64xsp; } + +# Scaled Offset +addrUIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is size.ldstr=0 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12 [ pimm = imm12 << 0; ] { tmp:8 = Rn_GPR64xsp + pimm; export tmp; } +addrUIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is size.ldstr=1 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12 [ pimm = imm12 << 1; ] { tmp:8 = Rn_GPR64xsp + pimm; export tmp; } +addrUIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is size.ldstr=2 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12 [ pimm = imm12 << 2; ] { tmp:8 = Rn_GPR64xsp + pimm; export tmp; } +addrUIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is size.ldstr=3 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12 [ pimm = imm12 << 3; ] { tmp:8 = Rn_GPR64xsp + pimm; export tmp; } + +addrUIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is size.ldstr=0 & b_2729=7 & v=1 & b_2425=1 & b_2323=1 & Rn_GPR64xsp & imm12 [ pimm = imm12 << 4; ] { tmp:8 = Rn_GPR64xsp + pimm; export tmp; } +addrUIMM: "["^Rn_GPR64xsp^"]" is size.ldstr=0 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12=0 { tmp:8 = Rn_GPR64xsp; export tmp; } +addrUIMM: "["^Rn_GPR64xsp^"]" is size.ldstr=1 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12=0 { tmp:8 = Rn_GPR64xsp; export tmp; } +addrUIMM: "["^Rn_GPR64xsp^"]" is size.ldstr=2 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12=0 { tmp:8 = Rn_GPR64xsp; export tmp; } +addrUIMM: "["^Rn_GPR64xsp^"]" is size.ldstr=3 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12=0 { tmp:8 = Rn_GPR64xsp; export tmp; } +addrUIMM: "["^Rn_GPR64xsp^"]" is size.ldstr=0 & b_2729=7 & v=1 & b_2425=1 & b_2323=1 & Rn_GPR64xsp & imm12=0 { tmp:8 = Rn_GPR64xsp; export tmp; } + +# Address Reg + signed offset -256 to 255 +addr_SIMM9: "["^Rn_GPR64xsp, "#"^simm9^"]" is Rn_GPR64xsp & simm9 { tmp:8 = Rn_GPR64xsp + simm9; export tmp; } +addr_SIMM9: "["^Rn_GPR64xsp^"]" is Rn_GPR64xsp & simm9=0 { tmp:8 = Rn_GPR64xsp; export tmp; } + +addrRegShift64: "#"^val is size.ldstr=0 & v=0 & opt & b_1212=1 [ val = 0 & 0xff; ] { export *[const]:8 val; } +addrRegShift64: "" is size.ldstr=0 & v=0 & opt & b_1212=0 { export 0:8; } + +addrRegShift64: "#"^val is size.ldstr=0 & v=1 & opt=0 & b_1212=1 [ val = 0 & 0xff; ] { export *[const]:8 val; } +addrRegShift64: "" is size.ldstr=0 & v=1 & opt=0 & b_1212=0 { export 0:8; } +addrRegShift64: "#"^val is size.ldstr=0 & v=1 & opt=1 & b_1212=1 [ val = 0 & 0xff; ] { export *[const]:8 val; } +addrRegShift64: "" is size.ldstr=0 & v=1 & opt=1 & b_1212=0 { export 0:8; } +addrRegShift64: "#"^val is size.ldstr=0 & v=1 & opt=2 & b_1212 [ val = b_1212 * 4; ] { export *[const]:8 val; } +addrRegShift64: "#"^val is size.ldstr=0 & v=1 & opt=3 & b_1212 [ val = b_1212 * 4; ] { export *[const]:8 val; } + +addrRegShift64: "#"^val is size.ldstr=1 & v=0 & opt & b_1212 [ val = b_1212 * 1; ] { export *[const]:8 val; } +addrRegShift64: "#"^val is size.ldstr=1 & v=1 & opt & b_1212 [ val = b_1212 * 1; ] { export *[const]:8 val; } + +addrRegShift64: "#"^val is size.ldstr=2 & v=0 & opt & b_1212 [ val = b_1212 * 2; ] { export *[const]:8 val; } +addrRegShift64: "#"^val is size.ldstr=2 & v=1 & opt & b_1212 [ val = b_1212 * 2; ] { export *[const]:8 val; } + +addrRegShift64: "#"^val is size.ldstr=3 & v=0 & opt & b_1212 [ val = b_1212 * 3; ] { export *[const]:8 val; } +addrRegShift64: "#"^val is size.ldstr=3 & v=1 & opt & b_1212 [ val = b_1212 * 3; ] { export *[const]:8 val; } + +addrExtendRegShift64: ExtendReg64^addrRegShift64 is aa_extreg_option=2 & aa_extreg_imm3 & addrRegShift64 & ExtendReg64 { tmp:8 = ExtendReg64; tmp = tmp << addrRegShift64; export tmp; } +addrExtendRegShift64: ExtendReg64^addrRegShift64 is aa_extreg_option=3 & aa_extreg_imm3 & addrRegShift64 & ExtendReg64 { tmp:8 = ExtendReg64; tmp = tmp << addrRegShift64; export tmp; } +addrExtendRegShift64: ExtendReg64^addrRegShift64 is aa_extreg_option=6 & aa_extreg_imm3 & addrRegShift64 & ExtendReg64 { tmp:8 = ExtendReg64; tmp = tmp << addrRegShift64; export tmp; } +addrExtendRegShift64: ExtendReg64^addrRegShift64 is aa_extreg_option=7 & aa_extreg_imm3 & addrRegShift64 & ExtendReg64 { tmp:8 = ExtendReg64; tmp = tmp << addrRegShift64; export tmp; } +addrExtendRegShift64: Rm_GPR64 is Rm_GPR64 & aa_extreg_option=3 & aa_extreg_imm3=0 & ExtendReg64 { export Rm_GPR64; } +addrExtendRegShift64: Rm_GPR64 is Rm_GPR64 & aa_extreg_option=7 & aa_extreg_imm3=0 & ExtendReg64 { export Rm_GPR64; } + +# unsigned offset +addrIndexed: addrUIMM is size.ldstr & b_2729=7 & b_2425=1 & addrUIMM { export addrUIMM; } + +# unsinged offset unscaled immediate +addrIndexed: addr_SIMM9 is size.ldstr & b_2729=7 & b_2425=0 & b_2121=0 & opc.indexmode=0 & addr_SIMM9 { export addr_SIMM9; } + +# register unpriviledged +addrIndexed: addr_SIMM9 is size.ldstr & b_2729=7 & b_2425=0 & b_2121=0 & opc.indexmode=2 & addr_SIMM9 { export addr_SIMM9; } + +# post indexed wback +addrIndexed: "["^Rn_GPR64xsp^"]", "#"^simm9 +is size.ldstr & b_2729=7 & b_2425=0 & b_2121=0 & Rn_GPR64xsp & simm9 & opc.indexmode=1 +{ + tmp:8 = Rn_GPR64xsp; + Rn_GPR64xsp = Rn_GPR64xsp + simm9; + export tmp; +} + +# Register, Register offset extended +addrIndexed: "["^Rn_GPR64xsp, addrExtendRegShift64^"]" +is size.ldstr & b_2729=7 & b_2425=0 & b_2121=1 & Rn_GPR64xsp & opc.indexmode=2 & addrExtendRegShift64 +{ + tmp:8 = Rn_GPR64xsp + addrExtendRegShift64; + export tmp; +} + +# pre indexed wback +addrIndexed: "["^Rn_GPR64xsp, "#"^simm9^"]!" +is size.ldstr & b_2729=7 & b_2425=0 & b_2121=0 & Rn_GPR64xsp & simm9 & opc.indexmode=3 +{ + Rn_GPR64xsp = Rn_GPR64xsp + simm9; + export Rn_GPR64xsp; +} + +# For LDRAA/LDRAB + +# no offset (with S) +addrIndexed: "["^Rn_GPR64xsp^"]" +is size.ldstr & b_2729=7 & b_2425=0 & b_22=0 & b_2121=1 & Rn_GPR64xsp & simm9=0 & opc.indexmode=1 +{ + export Rn_GPR64xsp; +} + +# offset (with S) +addrIndexed: "["^Rn_GPR64xsp, "#"^sim^"]" +is size.ldstr & b_2729=7 & b_2425=0 & b_22 & b_2121=1 & Rn_GPR64xsp & simm9 & opc.indexmode=1 +[ sim = (b_22 * (-1<<9) | (simm9 & 0x1ff)) << 3; ] +{ + tmp:8 = Rn_GPR64xsp + sim; + export tmp; +} + +# no offset writeback (with S) +addrIndexed: "["^Rn_GPR64xsp^"]!" +is size.ldstr & b_2729=7 & b_2425=0 & b_22=0 & b_2121=1 & Rn_GPR64xsp & simm9=0 & opc.indexmode=3 +{ + export Rn_GPR64xsp; +} + +# pre indexed wback (with S) +addrIndexed: "["^Rn_GPR64xsp, "#"^sim^"]!" +is size.ldstr & b_2729=7 & b_2425=0 & b_22 & b_2121=1 & Rn_GPR64xsp & simm9 & opc.indexmode=3 +[ sim = (b_22 * (-1<<9) | (simm9 & 0x1ff)) << 3; ] +{ + Rn_GPR64xsp = Rn_GPR64xsp + sim; + export Rn_GPR64xsp; +} + +addrPairScale: pimm is b_3031=0 & v=0 & simm7 [ pimm = simm7 << 2; ] { export *[const]:8 pimm; } +addrPairScale: pimm is b_3031=0 & v=1 & simm7 [ pimm = simm7 << 2; ] { export *[const]:8 pimm; } +addrPairScale: pimm is b_3031=2 & v=0 & simm7 [ pimm = simm7 << 3; ] { export *[const]:8 pimm; } +addrPairScale: pimm is b_3031=1 & v=0 & simm7 [ pimm = simm7 << 2; ] { export *[const]:8 pimm; } +addrPairScale: pimm is b_3031=1 & v=1 & simm7 [ pimm = simm7 << 3; ] { export *[const]:8 pimm; } +addrPairScale: pimm is b_3031=2 & v=1 & simm7 [ pimm = simm7 << 4; ] { export *[const]:8 pimm; } + +# Scaled Offset +addrPairUIMM: "["^Rn_GPR64xsp, "#"^addrPairScale^"]" +is sf & Rn_GPR64xsp & addrPairScale & simm7 +{ + tmp:8 = Rn_GPR64xsp + addrPairScale; + export tmp; +} + +addrPairUIMM: "["^Rn_GPR64xsp^"]" +is sf & Rn_GPR64xsp & addrPairScale & simm7=0 +{ + tmp:8 = Rn_GPR64xsp; + export tmp; +} + +# unsigned offset +addrPairIndexed: addrPairUIMM +is b_2729=0b101 & b_2325=0b010 & addrPairUIMM +{ export addrPairUIMM; } + +# unsigned offset, non-temporal hint +addrPairIndexed: addrPairUIMM +is b_2729=0b101 & b_2325=0b000 & addrPairUIMM +{ export addrPairUIMM; } + +# post indexed wback +addrPairIndexed: "["^Rn_GPR64xsp^"]", "#"^addrPairScale +is b_2729=0b101 & b_2325=0b001 & Rn_GPR64xsp & addrPairScale +{ + tmp:8 = Rn_GPR64xsp; + Rn_GPR64xsp = Rn_GPR64xsp + addrPairScale; + export tmp; +} + +# pre indexed wback +addrPairIndexed: "["^Rn_GPR64xsp, "#"^addrPairScale^"]!" +is b_2729=0b101 & b_2325=0b011 & Rn_GPR64xsp & addrPairScale +{ + Rn_GPR64xsp = Rn_GPR64xsp + addrPairScale; + export Rn_GPR64xsp; +} + +#### Undefined behavior on writeback #### +# +# Most instructions with writeback have unpredictable behavior when their address input register Rn +# is the same register as another input, e.g. Rt. For example, LDR x1, [x1, 0x8]! has unpredictable +# behavior in the ARM spec. Similarly, STR x5, [x5], 0x28 has unpredictable behavior in the spec +# (but with slightly different possibilities for what forms that unpredictable behavior might take!). +# +# One of the few exceptions is STGP, which has no mention of unpredictable behavior. In such cases, +# it's important to read all registers before addrGranuleIndexed or addrPairGranuleIndexed takes effect, +# or pre-index writeback will modify the register values used if Rn is the same register as another R. +# +# This is an example of how to code a definition for an instruction with no unpredictable behavior: +#{ +# # save the initial register values +# data1:8 = Rt_GPR64; +# data2:8 = Rt2_GPR64; +# +# build addrPairGranuleIndexed; # may modify Rt or Rt2, so use data1/data2 instead afterward +# +# ...etc... +#} + +OPTIONAL_XM: is Rm=0b11111 { export 0:8; } # default to XZR if Xm is absent +OPTIONAL_XM: ,Rm_GPR64 is Rm_GPR64 { export Rm_GPR64; } + +addr_granuleSIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is Rn_GPR64xsp & simm9 [ pimm = simm9 << $(LOG2_TAG_GRANULE); ] { tmp:8 = Rn_GPR64xsp + ( simm9 << $(LOG2_TAG_GRANULE) ); export tmp; } +addr_granuleSIMM: "["^Rn_GPR64xsp^"]" is Rn_GPR64xsp & simm9=0 { tmp:8 = Rn_GPR64xsp; export tmp; } + +# signed offset +addrGranuleIndexed: addr_granuleSIMM is opc.indexmode=2 & addr_granuleSIMM { export addr_granuleSIMM; } + +# post indexed wback +addrGranuleIndexed: "["^Rn_GPR64xsp^"]", "#"^pimm +is Rn_GPR64xsp & simm9 & opc.indexmode=1 +[ pimm = simm9 << $(LOG2_TAG_GRANULE); ] +{ + tmp:8 = Rn_GPR64xsp; + Rn_GPR64xsp = Rn_GPR64xsp + pimm; + export tmp; +} + +# pre indexed wback +addrGranuleIndexed: "["^Rn_GPR64xsp, "#"^pimm^"]!" +is Rn_GPR64xsp & simm9 & opc.indexmode=3 +[ pimm = simm9 << $(LOG2_TAG_GRANULE); ] +{ + Rn_GPR64xsp = Rn_GPR64xsp + pimm; + tmp:8 = Rn_GPR64xsp; + export tmp; +} + +addrPairGranuleScale: pimm is simm7 [ pimm = simm7 << $(LOG2_TAG_GRANULE); ] { export *[const]:8 pimm; } + +# Scaled Offset +addrPairGranuleSIMM: "["^Rn_GPR64xsp, "#"^addrPairGranuleScale^"]" +is sf & Rn_GPR64xsp & addrPairGranuleScale & simm7 +{ + tmp:8 = Rn_GPR64xsp + addrPairGranuleScale; + export tmp; +} + +addrPairGranuleSIMM: "["^Rn_GPR64xsp^"]" +is sf & Rn_GPR64xsp & addrPairGranuleScale & simm7=0 +{ + tmp:8 = Rn_GPR64xsp; + export tmp; +} + + +# signed offset +addrPairGranuleIndexed: addrPairGranuleSIMM +is b_2729=0b101 & b_2325=0b010 & addrPairGranuleSIMM +{ export addrPairGranuleSIMM; } + +# post indexed wback +addrPairGranuleIndexed: "["^Rn_GPR64xsp^"]", "#"^addrPairGranuleScale +is b_2729=0b101 & b_2325=0b001 & Rn_GPR64xsp & addrPairGranuleScale +{ + tmp:8 = Rn_GPR64xsp; + Rn_GPR64xsp = Rn_GPR64xsp + addrPairGranuleScale; + export tmp; +} + +# pre indexed wback +addrPairGranuleIndexed: "["^Rn_GPR64xsp, "#"^addrPairGranuleScale^"]!" +is b_2729=0b101 & b_2325=0b011 & Rn_GPR64xsp & addrPairGranuleScale +{ + Rn_GPR64xsp = Rn_GPR64xsp + addrPairGranuleScale; + export Rn_GPR64xsp; +} + +# esize=32, len=5, levels=0x1f: 32 bits with b_1014+1 1s; rotate right b_1620; replicate 1 time +DecodeWMask32: "#"^wmask is b_31=0 & b_22=0 & b_15=0 & b_1014 & b_1620 +[ wmask=(((~(-1<<(b_1014+1)))*0x100000001)>>b_1620)&0xffffffff; ] +{ export * [const]:4 wmask; } + +# esize=32, len=5, levels=0x1f: 32 bits with |b_1014-b_1620|+1 1s; replicate 1 time +DecodeTMask32: "#"^tmask is b_31=0 & b_22=0 & b_15=0 & b_1014 & b_1620 +[ tmask=(~(-1<<(((b_1014-b_1620)&0x1f)+1)))&0xffffffff; ] +{ export * [const]:4 tmask; } + +# esize=16, len=4, levels=0xf: 16 bits with b_1013+1 1s; rotate right b_1619; replicate 2 times +DecodeWMask32: "#"^wmask is b_31=0 & b_22=0 & b_1415=0x2 & b_1013 & b_1619 +[ wmask=((((~(-1<<(b_1013+1)))*0x10001)>>b_1619)&0xffff)*0x10001; ] +{ export * [const]:4 wmask; } + +# esize=16, len=4, levels=0xf: 16 bits with |b_1013-b_1619|+1 1s; replicate 2 times +DecodeTMask32: "#"^tmask is b_31=0 & b_22=0 & b_1415=0x2 & b_1013 & b_1619 +[ tmask=((~(-1<<(((b_1013-b_1619)&0xf)+1)))&0xffff)*0x10001; ] +{ export * [const]:4 tmask; } + +# esize=8, len=3, levels=0x7: 8 bits with b_1012+1 1s; rotate right b_1618; replicate 4 times +DecodeWMask32: "#"^wmask is b_31=0 & b_22=0 & b_1315=0x6 & b_1012 & b_1618 +[ wmask=((((~(-1<<(b_1012+1)))*0x101)>>b_1618)&0xff)*0x101*0x10001; ] +{ export * [const]:4 wmask; } + +# esize=8, len=3, levels=0x7: 8 bits with |b_1012-b_1618|+1 1s; replicate 4 times +DecodeTMask32: "#"^tmask is b_31=0 & b_22=0 & b_1315=0x6 & b_1012 & b_1618 +[ tmask=((~(-1<<(((b_1012-b_1618)&0x7)+1)))&0xff)*0x101*0x10001; ] +{ export * [const]:4 tmask; } + +# esize=4, len=2, levels=0x3: 4 bits with b_1011+1 1s; rotate right b_1617; replicate 8 times +DecodeWMask32: "#"^wmask is b_31=0 & b_22=0 & b_1215=0xe & b_1011 & b_1617 +[ wmask=((((~(-1<<(b_1011+1)))*0x11)>>b_1617)&0xf)*0x11*0x101*0x10001; ] +{ export * [const]:4 wmask; } + +# esize=4, len=2, levels=0x3: 4 bits with |b_1011-b_1617|+1 1s; replicate 8 times +DecodeTMask32: "#"^tmask is b_31=0 & b_22=0 & b_1215=0xe & b_1011 & b_1617 +[ tmask=((~(-1<<(((b_1011-b_1617)&0x7)+1)))&0xf)*0x11*0x101*0x10001; ] +{ export * [const]:4 tmask; } + +# esize=2, len=1, levels=0x1: 2 bits with b_1010+1 1s; rotate right b_1616; replicate 16 times +DecodeWMask32: "#"^wmask is b_31=0 & b_22=0 & b_1115=0x1e & b_1010 & b_1616 +[ wmask=((((~(-1<<(b_1010+1)))*0x5)>>b_1616)&0x3)*0x5*0x11*0x101*0x10001; ] +{ export * [const]:4 wmask; } + +# esize=2, len=1, levels=0x1: 2 bits with |b_1010-b_1616|+1 1s; replicate 16 times +DecodeTMask32: "#"^tmask is b_31=0 & b_22=0 & b_1115=0x1e & b_1010 & b_1616 +[ tmask=((~(-1<<(((b_1010-b_1616)&0x1)+1)))&0x3)*0x5*0x11*0x101*0x10001; ] +{ export * [const]:4 tmask; } + +# esize=64, len=6, levels=0x3f: 64 bits with b_1015+1 1s; rotate right b_1621; repeat 1 time +# can't rotate 64 bits by multiplying, and can't shift by 64 bits all at once +DecodeWMask64: "#"^wmask is b_31=1 & b_22=1 & b_1015 & b_1621 +[ wmask=((~((-1<>b_1621)|((~((-1<>b_1620)&0xffffffff)*0x100000001; ] +{ export * [const]:8 wmask; } + +# esize=32, len=5, levels=0x1f: 32 bits with |b_1014-b_1620|+1 1s; replicate 2 times +DecodeTMask64: "#"^tmask is b_31=1 & b_22=0 & b_15=0 & b_1014 & b_1620 +[ tmask=((~(-1<<(((b_1014-b_1620)&0x1f)+1)))&0xffffffff)*0x100000001; ] +{ export * [const]:8 tmask; } + +# returned 0xffcfffdefcfffcf +# shouldbe 0xffcfffcfffcfffcf +# esize=16, len=4, levels=0xf: 16 bits with b_1013+1 1s; rotate right b_1619; replicate 4 times +DecodeWMask64: "#"^wmask is b_31=1 & b_22=0 & b_1415=0x2 & b_1013 & b_1619 +[ wmask=((((~(-1<<(b_1013+1)))*0x10001)>>b_1619)&0xffff)*0x10001*0x100000001; ] +{ export * [const]:8 wmask; } + +# esize=16, len=4, levels=0xf: 16 bits with |b_1013-b_1619|+1 1s; replicate 4 times +DecodeTMask64: "#"^tmask is b_31=1 & b_22=0 & b_1415=0x2 & b_1013 & b_1619 +[ tmask=((~(-1<<(((b_1013-b_1619)&0xf)+1)))&0xffff)*0x10001*0x100000001; ] +{ export * [const]:8 tmask; } + +# esize=8, len=3, levels=0x7: 8 bits with b_1012+1 1s; rotate right b_1618; replicate 8 times +DecodeWMask64: "#"^wmask is b_31=1 & b_22=0 & b_1315=0x6 & b_1012 & b_1618 +[ wmask=((((~(-1<<(b_1012+1)))*0x101)>>b_1618)&0xff)*0x101*0x10001*0x100000001; ] +{ export * [const]:8 wmask; } + +# esize=8, len=3, levels=0x7: 8 bits with |b_1012-b_1618|+1 1s; replicate 8 times +DecodeTMask64: "#"^tmask is b_31=1 & b_22=0 & b_1315=0x6 & b_1012 & b_1618 +[ tmask=((~(-1<<(((b_1012-b_1618)&0x7)+1)))&0xff)*0x101*0x10001*0x100000001; ] +{ export * [const]:8 tmask; } + +# esize=4, len=2, levels=0x3: 4 bits with b_1011+1 1s; rotate right b_1617; replicate 16 times +DecodeWMask64: "#"^wmask is b_31=1 & b_22=0 & b_1215=0xe & b_1011 & b_1617 +[ wmask=((((~(-1<<(b_1011+1)))*0x11)>>b_1617)&0xf)*0x11*0x101*0x10001*0x100000001; ] +{ export * [const]:8 wmask; } + +# esize=4, len=2, levels=0x3: 4 bits with |b_1011-b_1617|+1 1s; replicate 16 times +DecodeTMask64: "#"^tmask is b_31=1 & b_22=0 & b_1215=0xe & b_1011 & b_1617 +[ tmask=((~(-1<<(((b_1011-b_1617)&0x3)+1)))&0xf)*0x11*0x101*0x10001*0x100000001; ] +{ export * [const]:8 tmask; } + +# esize=2, len=1, levels=0x1: 2 bits with b_1010+1 1s; rotate right b_1616; replicate 32 times +DecodeWMask64: "#"^wmask is b_31=1 & b_22=0 & b_1115=0x1e & b_1010 & b_1616 +[ wmask=((((~((-1)<<(b_1010+1)))*0x5)>>b_1616)&0x3)*0x5*0x11*0x101*0x10001*0x100000001; ] +{ export * [const]:8 wmask; } + +# esize=2, len=1, levels=0x1: 2 bits with |b_1010-b_1616|+1 1s; replicate 32 times +DecodeTMask64: "#"^tmask is b_31=1 & b_22=0 & b_1115=0x1e & b_1010 & b_1616 +[ tmask=((~(-1<<(((b_1010-b_1616)&0x1)+1)))&0x3)*0x5*0x11*0x101*0x10001*0x100000001; ] +{ export * [const]:8 tmask; } + +ImmRConst32: "#"^ImmR is ImmR { export *[const]:4 ImmR; } +ImmRConst64: "#"^ImmR is ImmR { export *[const]:8 ImmR; } + +ImmSConst32: "#"^ImmS is ImmS { export *[const]:4 ImmS; } +ImmSConst64: "#"^ImmS is ImmS { export *[const]:8 ImmS; } + +ImmR_bitfield64_imm: "#"^ImmR is ImmR & DecodeWMask64 { export DecodeWMask64; } +ImmR_bitfield32_imm: "#"^ImmR is ImmR & DecodeWMask32 { export DecodeWMask32; } + +ImmS_bitfield64_imm: "#"^ImmS is ImmS & DecodeTMask64 { export DecodeTMask64; } +ImmS_bitfield32_imm: "#"^ImmS is ImmS & DecodeTMask32 { export DecodeTMask32; } + +abcdefgh: "#"^imm is n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L)); ] { export *[const]:8 imm; } + +repl000: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 32) | ((n_uimm8H << 5 | n_uimm8L)); ] { export *[const]:8 imm; } +repl001: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 40) | ((n_uimm8H << 5 | n_uimm8L) << 8); ] { export *[const]:8 imm; } +repl010: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 48) | ((n_uimm8H << 5 | n_uimm8L) << 16); ] { export *[const]:8 imm; } +repl011: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 56) | ((n_uimm8H << 5 | n_uimm8L) << 24); ] { export *[const]:8 imm; } +repl100: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 48) | ((n_uimm8H << 5 | n_uimm8L) << 32) | ((n_uimm8H << 5 | n_uimm8L) << 16) | ((n_uimm8H << 5 | n_uimm8L)); ] { export *[const]:8 imm; } +repl101: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 56) | ((n_uimm8H << 5 | n_uimm8L) << 40) | ((n_uimm8H << 5 | n_uimm8L) << 24) | ((n_uimm8H << 5 | n_uimm8L) << 8); ] { export *[const]:8 imm; } +repl1100: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((((n_uimm8H << 5 | n_uimm8L) << 8) | 0xff) << 32) | (((n_uimm8H << 5 | n_uimm8L) << 8) | 0xff); ] { export *[const]:8 imm; } +repl1101: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((((n_uimm8H << 5 | n_uimm8L) << 16) | 0xffff) << 32) | (((n_uimm8H << 5 | n_uimm8L) << 16) | 0xffff); ] { export *[const]:8 imm; } +repl11100: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 56) | ((n_uimm8H << 5 | n_uimm8L) << 48) | ((n_uimm8H << 5 | n_uimm8L) << 40) | ((n_uimm8H << 5 | n_uimm8L) << 32) | ((n_uimm8H << 5 | n_uimm8L) << 24) | ((n_uimm8H << 5 | n_uimm8L) << 16) | ((n_uimm8H << 5 | n_uimm8L) << 8) | (n_uimm8H << 5 | n_uimm8L); ] { export *[const]:8 imm; } +repl11101: "#"^imm is abcdefgh & b_18 & b_17 & b_16 & b_09 & b_08 & b_07 & b_06 & b_05 [ imm = ((b_18 * 0xff) << 56) | ((b_17 * 0xff) << 48) | ((b_16 * 0xff) << 40) | ((b_09 * 0xff) << 32) | ((b_08 * 0xff) << 24) | ((b_07 * 0xff) << 16) | ((b_06 * 0xff) << 8) | (b_05 * 0xff); ] { export *[const]:8 imm; } +repl11110: "#"^imm is abcdefgh & b_18 & b_17 & b_16 & b_09 & b_08 & b_07 & b_06 & b_05 [ imm = (b_18 << 31) | ((b_17 $xor 1) << 30) | ((b_17 * 0x1f) << 25) | (b_16 << 24) | (b_09 << 23) | (b_08 << 22) | (b_07 << 21) | (b_06 << 20) | (b_05 << 19); ] { tmp:8 = imm; tmp = (tmp << 32) | tmp; export tmp; } +repl11111: "#"^imm is abcdefgh & b_18 & b_17 & b_16 & b_09 & b_08 & b_07 & b_06 & b_05 [ imm = (b_18 << 63) | ((b_17 $xor 1) << 62) | ((b_17 * 0xff) << 54) | (b_16 << 53) | (b_09 << 52) | (b_08 << 51) | (b_07 << 50) | (b_06 << 49) | (b_05 << 48); ] { tmp:8 = imm; export tmp; } + +Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x0 & b_29=0 & repl000 { export repl000; } +Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x1 & b_29=0 & repl000 { export repl000; } +Imm_neon_uimm8Shift: abcdefgh^", LSL #8" is abcdefgh & cmode=0x2 & b_29=0 & repl001 { export repl001; } +Imm_neon_uimm8Shift: abcdefgh^", LSL #8" is abcdefgh & cmode=0x3 & b_29=0 & repl001 { export repl001; } +Imm_neon_uimm8Shift: abcdefgh^", LSL #16" is abcdefgh & cmode=0x4 & b_29=0 & repl010 { export repl010; } +Imm_neon_uimm8Shift: abcdefgh^", LSL #16" is abcdefgh & cmode=0x5 & b_29=0 & repl010 { export repl010; } +Imm_neon_uimm8Shift: abcdefgh^", LSL #24" is abcdefgh & cmode=0x6 & b_29=0 & repl011 { export repl011; } +Imm_neon_uimm8Shift: abcdefgh^", LSL #24" is abcdefgh & cmode=0x7 & b_29=0 & repl011 { export repl011; } +Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x8 & b_29=0 & repl100 { export repl100; } +Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x9 & b_29=0 & repl100 { export repl100; } +Imm_neon_uimm8Shift: abcdefgh^", LSL #8" is abcdefgh & cmode=0xa & b_29=0 & repl101 { export repl101; } +Imm_neon_uimm8Shift: abcdefgh^", LSL #8" is abcdefgh & cmode=0xb & b_29=0 & repl101 { export repl101; } +Imm_neon_uimm8Shift: abcdefgh^", MSL #8" is abcdefgh & cmode=0xc & b_29=0 & repl1100 { export repl1100; } +Imm_neon_uimm8Shift: abcdefgh^", MSL #16" is abcdefgh & cmode=0xd & b_29=0 & repl1101 { export repl1101; } +Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0xe & b_29=0 & repl11100 { export repl11100; } +Imm_neon_uimm8Shift: repl11101 is abcdefgh & cmode=0xe & b_29=1 & repl11101 { export repl11101; } # MOVI 64 +Imm_neon_uimm8Shift: repl11110 is abcdefgh & cmode=0xf & b_29=0 & repl11110 { export repl11110; } # FMOV +Imm_neon_uimm8Shift: repl11111 is abcdefgh & cmode=0xf & b_29=1 & repl11111 { export repl11111; } # FMOV + +Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x1 & b_29=1 & repl000 { export repl000; } # BIC32 +Imm_neon_uimm8Shift: abcdefgh^", LSL 8" is abcdefgh & cmode=0x3 & b_29=1 & repl001 { export repl001; } +Imm_neon_uimm8Shift: abcdefgh^", LSL 16" is abcdefgh & cmode=0x5 & b_29=1 & repl010 { export repl010; } +Imm_neon_uimm8Shift: abcdefgh^", LSL 24" is abcdefgh & cmode=0x7 & b_29=1 & repl011 { export repl011; } # BIC16 +Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x9 & b_29=1 & repl000 { export repl000; } +Imm_neon_uimm8Shift: abcdefgh^", LSL 8" is abcdefgh & cmode=0xb & b_29=1 & repl001 { export repl001; } + +Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x0 & b_29=1 & repl000 { export repl000; } # MVNI +Imm_neon_uimm8Shift: abcdefgh^", LSL 8" is abcdefgh & cmode=0x2 & b_29=1 & repl001 { export repl001; } +Imm_neon_uimm8Shift: abcdefgh^", LSL 16" is abcdefgh & cmode=0x4 & b_29=1 & repl010 { export repl010; } +Imm_neon_uimm8Shift: abcdefgh^", LSL 24" is abcdefgh & cmode=0x6 & b_29=1 & repl011 { export repl011; } +Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x8 & b_29=1 & repl000 { export repl000; } # MVNI +Imm_neon_uimm8Shift: abcdefgh^", LSL 8" is abcdefgh & cmode=0xa & b_29=1 & repl001 { export repl001; } +Imm_neon_uimm8Shift: abcdefgh^", MSL 8" is abcdefgh & cmode=0xc & b_29=1 & repl1100 { export repl1100; } # MVNI +Imm_neon_uimm8Shift: abcdefgh^", MSL 16" is abcdefgh & cmode=0xd & b_29=1 & repl1101 { export repl1101; } + +vIndex: val is b_2222=0 & b_2121 & b_1111 [ val = b_1111 << 1 | b_2121; ] { export *[const]:8 val; } +vIndex: val is b_2222=1 & b_2121=0 & b_1111 [ val = b_1111 & 0x1; ] { export *[const]:8 val; } + +vIndexHLM: val is b_2223=2 & b_2121 & b_1111 [ val = b_1111 << 1 | b_2121; ] { export *[const]:8 val; } +vIndexHLM: val is b_2223=1 & b_2121 & b_1111 & b_2020 [ val = b_1111 << 2 | b_2121 << 1 | b_2020; ] { export *[const]:8 val; } +vIndexHLM: val is b_2223=0 & b_2121 & b_1111 & b_2020 [ val = b_1111 << 2 | b_2121 << 1 | b_2020; ] { export *[const]:8 val; } + +vIndexHL: val is b_2223=0b01 & b_21 & b_11 [ val = b_11 << 1 | b_21; ] { export *[const]:8 val; } +vIndexHL: b_11 is b_2223=0b10 & b_11 { export *[const]:8 b_11; } + +@if DATA_ENDIAN == "little" +Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111 * 2 + b_2121; ] { export *[register]:1 val; } +Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111; ] { export *[register]:1 val; } +Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + (b_1111 * 2 + b_2121) * 4; ] { export *[register]:4 val; } +Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111 * 4; ] { export *[register]:4 val; } +Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + (b_1111 * 2 + b_2121) * 8; ] { export *[register]:8 val; } +Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111 * 8; ] { export *[register]:8 val; } +@else +Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x501f + 32*Re_VPR128 - b_1111 * 2 - b_2121; ] { export *[register]:1 val; } +Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x501f + 32*Re_VPR128 - b_1111; ] { export *[register]:1 val; } +Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x501c + 32*Re_VPR128 - (b_1111 * 2 + b_2121) * 4; ] { export *[register]:4 val; } +Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x501c + 32*Re_VPR128 - b_1111 * 4; ] { export *[register]:4 val; } +Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5018 + 32*Re_VPR128 - (b_1111 * 2 + b_2121) * 8; ] { export *[register]:8 val; } +Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5018 + 32*Re_VPR128 - b_1111 * 8; ] { export *[register]:8 val; } +@endif + +Re_VPR128.B.vIndex: Re_VPR128.B^"["^vIndex^"]" is Re_VPR128.B & vIndex & Re_VPR128.B.sel { export Re_VPR128.B.sel; } +Re_VPR128.S.vIndex: Re_VPR128.S^"["^vIndex^"]" is Re_VPR128.S & vIndex & Re_VPR128.S.sel { export Re_VPR128.S.sel; } +Re_VPR128.D.vIndex: Re_VPR128.D^"["^vIndex^"]" is Re_VPR128.D & vIndex & Re_VPR128.D.sel { export Re_VPR128.D.sel; } + +@if DATA_ENDIAN == "little" +Rd_VPR128.B.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm4 [ val = 0x5000 + 32*Rd_VPR128 + imm_neon_uimm4; ] { export *[register]:1 val; } +Rd_VPR128.H.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm3 [ val = 0x5000 + 32*Rd_VPR128 + 2*imm_neon_uimm3; ] { export *[register]:2 val; } +Rd_VPR128.S.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm2 [ val = 0x5000 + 32*Rd_VPR128 + 4*imm_neon_uimm2; ] { export *[register]:4 val; } +Rd_VPR128.D.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm1 [ val = 0x5000 + 32*Rd_VPR128 + 8*imm_neon_uimm1; ] { export *[register]:8 val; } +@else +Rd_VPR128.B.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm4 [ val = 0x501f + 32*Rd_VPR128 - imm_neon_uimm4; ] { export *[register]:1 val; } +Rd_VPR128.H.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm3 [ val = 0x501e + 32*Rd_VPR128 - 2*imm_neon_uimm3; ] { export *[register]:2 val; } +Rd_VPR128.S.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm2 [ val = 0x501c + 32*Rd_VPR128 - 4*imm_neon_uimm2; ] { export *[register]:4 val; } +Rd_VPR128.D.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm1 [ val = 0x5018 + 32*Rd_VPR128 - 8*imm_neon_uimm1; ] { export *[register]:8 val; } +@endif +Rd_VPR128.B.imm_neon_uimm4: Rd_VPR128.B^"["^imm_neon_uimm4^"]" is Rd_VPR128.B & imm_neon_uimm4 & Rd_VPR128.B.sel { export Rd_VPR128.B.sel; } +Rd_VPR128.H.imm_neon_uimm3: Rd_VPR128.H^"["^imm_neon_uimm3^"]" is Rd_VPR128.H & imm_neon_uimm3 & Rd_VPR128.H.sel { export Rd_VPR128.H.sel; } +Rd_VPR128.S.imm_neon_uimm2: Rd_VPR128.S^"["^imm_neon_uimm2^"]" is Rd_VPR128.S & imm_neon_uimm2 & Rd_VPR128.S.sel { export Rd_VPR128.S.sel; } +Rd_VPR128.D.imm_neon_uimm1: Rd_VPR128.D^"["^imm_neon_uimm1^"]" is Rd_VPR128.D & imm_neon_uimm1 & Rd_VPR128.D.sel { export Rd_VPR128.D.sel; } + +@if DATA_ENDIAN == "little" +Rn_VPR128.B.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm4 [ val = 0x5000 + 32*Rn_VPR128 + immN_neon_uimm4; ] { export *[register]:1 val; } +Rn_VPR128.H.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm3 [ val = 0x5000 + 32*Rn_VPR128 + 2*immN_neon_uimm3; ] { export *[register]:2 val; } +Rn_VPR128.S.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm2 [ val = 0x5000 + 32*Rn_VPR128 + 4*immN_neon_uimm2; ] { export *[register]:4 val; } +Rn_VPR128.D.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm1 [ val = 0x5000 + 32*Rn_VPR128 + 8*immN_neon_uimm1; ] { export *[register]:8 val; } +@else +Rn_VPR128.B.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm4 [ val = 0x501f + 32*Rn_VPR128 - immN_neon_uimm4; ] { export *[register]:1 val; } +Rn_VPR128.H.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm3 [ val = 0x501e + 32*Rn_VPR128 - 2*immN_neon_uimm3; ] { export *[register]:2 val; } +Rn_VPR128.S.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm2 [ val = 0x501c + 32*Rn_VPR128 - 4*immN_neon_uimm2; ] { export *[register]:4 val; } +Rn_VPR128.D.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm1 [ val = 0x5018 + 32*Rn_VPR128 - 8*immN_neon_uimm1; ] { export *[register]:8 val; } +@endif +Rn_VPR128.B.immN_neon_uimm4: Rn_VPR128.B^"["^immN_neon_uimm4^"]" is Rn_VPR128.B & immN_neon_uimm4 & Rn_VPR128.B.selN { export Rn_VPR128.B.selN; } +Rn_VPR128.H.immN_neon_uimm3: Rn_VPR128.H^"["^immN_neon_uimm3^"]" is Rn_VPR128.H & immN_neon_uimm3 & Rn_VPR128.H.selN { export Rn_VPR128.H.selN; } +Rn_VPR128.S.immN_neon_uimm2: Rn_VPR128.S^"["^immN_neon_uimm2^"]" is Rn_VPR128.S & immN_neon_uimm2 & Rn_VPR128.S.selN { export Rn_VPR128.S.selN; } +Rn_VPR128.D.immN_neon_uimm1: Rn_VPR128.D^"["^immN_neon_uimm1^"]" is Rn_VPR128.D & immN_neon_uimm1 & Rn_VPR128.D.selN { export Rn_VPR128.D.selN; } + +@if DATA_ENDIAN == "little" +Rn_VPR128.B.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm4 [ val = 0x5000 + 32*Rn_VPR128 + imm_neon_uimm4; ] { export *[register]:1 val; } +Rn_VPR128.H.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm3 [ val = 0x5000 + 32*Rn_VPR128 + 2*imm_neon_uimm3; ] { export *[register]:2 val; } +Rn_VPR128.S.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm2 [ val = 0x5000 + 32*Rn_VPR128 + 4*imm_neon_uimm2; ] { export *[register]:4 val; } +Rn_VPR128.D.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm1 [ val = 0x5000 + 32*Rn_VPR128 + 8*imm_neon_uimm1; ] { export *[register]:8 val; } +@else +Rn_VPR128.B.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm4 [ val = 0x501f + 32*Rn_VPR128 - imm_neon_uimm4; ] { export *[register]:1 val; } +Rn_VPR128.H.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm3 [ val = 0x501e + 32*Rn_VPR128 - 2*imm_neon_uimm3; ] { export *[register]:2 val; } +Rn_VPR128.S.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm2 [ val = 0x501c + 32*Rn_VPR128 - 4*imm_neon_uimm2; ] { export *[register]:4 val; } +Rn_VPR128.D.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm1 [ val = 0x5018 + 32*Rn_VPR128 - 8*imm_neon_uimm1; ] { export *[register]:8 val; } +@endif +Rn_VPR128.B.imm_neon_uimm4: Rn_VPR128.B^"["^imm_neon_uimm4^"]" is Rn_VPR128.B & imm_neon_uimm4 & Rn_VPR128.B.sel { export Rn_VPR128.B.sel; } +Rn_VPR128.H.imm_neon_uimm3: Rn_VPR128.H^"["^imm_neon_uimm3^"]" is Rn_VPR128.H & imm_neon_uimm3 & Rn_VPR128.H.sel { export Rn_VPR128.H.sel; } +Rn_VPR128.S.imm_neon_uimm2: Rn_VPR128.S^"["^imm_neon_uimm2^"]" is Rn_VPR128.S & imm_neon_uimm2 & Rn_VPR128.S.sel { export Rn_VPR128.S.sel; } +Rn_VPR128.D.imm_neon_uimm1: Rn_VPR128.D^"["^imm_neon_uimm1^"]" is Rn_VPR128.D & imm_neon_uimm1 & Rn_VPR128.D.sel { export Rn_VPR128.D.sel; } + +Re_VPR128.H.vIndexHL: Re_VPR128.H^"["^vIndexHL^"]" is Re_VPR128.H & vIndexHL { } + +@if DATA_ENDIAN == "little" +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=2 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + (b_1111 * 2 + b_2121)*2; ] { export *[register]:2 val; } +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=1 & b_2121 & b_1111 & b_2020 [ val = 0x5000 + 32*Re_VPR128 + (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=0 & b_2121 & b_1111 & b_2020 [ val = 0x5000 + 32*Re_VPR128 + (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } +@else +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=2 & b_2121 & b_1111 [ val = 0x501e + 32*Re_VPR128 - (b_1111 * 2 + b_2121)*2; ] { export *[register]:2 val; } +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=1 & b_2121 & b_1111 & b_2020 [ val = 0x501e + 32*Re_VPR128 - (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=0 & b_2121 & b_1111 & b_2020 [ val = 0x501e + 32*Re_VPR128 - (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } +@endif +Re_VPR128Lo.H.vIndexHLM: Re_VPR128Lo.H^"["^vIndexHLM^"]" is Re_VPR128Lo.H & vIndexHLM & Re_VPR128Lo.H.sel { export Re_VPR128Lo.H.sel; } + +FBitsOp: "#"^fbits is Scale [ fbits = 64 - Scale; ] { export *[const]:2 fbits; } + +FBits64: factor is FBitsOp & Scale [ factor = 1 << (64 - Scale); ] { fval:8 = int2float(factor:8); export fval; } +FBits32: factor is FBitsOp & Scale [ factor = 1 << (64 - Scale); ] { fval:4 = int2float(factor:8); export fval; } +FBits16: factor is FBitsOp & Scale [ factor = 1 << (64 - Scale); ] { fval:2 = int2float(factor:8); export fval; } + +# float + +Imm8_fmov16_operand: imm is Imm8_fmov_sign & Imm8_fmov_exph & Imm8_fmov_expl & Imm8_fmov_frac & ftype=3 [ imm = (Imm8_fmov_sign << 15) | ((Imm8_fmov_exph $xor 1) << 14) | ((Imm8_fmov_exph * 0x3) << 12) | (Imm8_fmov_expl << 10) | (Imm8_fmov_frac << 6); ] { export *[const]:2 imm; } + +Imm8_fmov32_operand: imm is Imm8_fmov_sign & Imm8_fmov_exph & Imm8_fmov_expl & Imm8_fmov_frac & ftype=0 [ imm = (Imm8_fmov_sign << 31) | ((Imm8_fmov_exph $xor 1) << 30) | ((Imm8_fmov_exph * 0x1f) << 25) | (Imm8_fmov_expl << 23) | (Imm8_fmov_frac << 19); ] { export *[const]:4 imm; } + +# double +Imm8_fmov64_operand: imm is Imm8_fmov_sign & Imm8_fmov_exph & Imm8_fmov_expl & Imm8_fmov_frac & ftype=1 [ imm = (Imm8_fmov_sign << 63) | ((Imm8_fmov_exph $xor 1) << 62) | ((Imm8_fmov_exph * 0xff) << 54) | (Imm8_fmov_expl << 52) | (Imm8_fmov_frac << 48); ] { export *[const]:8 imm; } + +# SVE subtables + +# The size qualifer (T) is encoded in several different ways. The +# majority of encodings are in sve_size_2223 + +# encoded in "size" -- Is the size specifier, size 00 B 01 H 10 S 11 D + +T: "B" is sve_size_2223=0b00 { export 1:1; } +T: "H" is sve_size_2223=0b01 { export 2:1; } +T: "S" is sve_size_2223=0b10 { export 4:1; } +T: "D" is sve_size_2223=0b11 { export 8:1; } + +T_sz: "S" is sve_sz_22=0 { export 4:1; } +T_sz: "D" is sve_sz_22=1 { export 8:1; } + +# encoded in "tszh:tszl" -- Is the size specifier, tszh tszl 00 00 RESERVED 00 01 B 00 1x H 01 xx S 1x xx D +# Note that tszl is either in b_0809 (if b_21=0) or b_1920 (if b_21=1) + +T_tszh: "B" is sve_tszh_2223=0b00 & b_21=0 & sve_tszl_0809=0b01 { export 1:1; } +T_tszh: "B" is sve_tszh_2223=0b00 & b_21=1 & sve_tszl_1920=0b01 { export 1:1; } +T_tszh: "H" is sve_tszh_2223=0b00 & b_21=0 & b_09=1 { export 2:1; } +T_tszh: "H" is sve_tszh_2223=0b00 & b_21=1 & b_20=1 { export 2:1; } +T_tszh: "S" is sve_tszh_2223=0b01 { export 4:1; } +T_tszh: "D" is b_23=1 { export 8:1; } + +# encoded in "size" -- Is the size specifier, size 00 B 01 H 10 S 11 D + +T_size_2122: "B" is sve_size_2122=0b00 { export 1:1; } +T_size_2122: "H" is sve_size_2122=0b01 { export 2:1; } +T_size_2122: "S" is sve_size_2122=0b10 { export 4:1; } +T_size_2122: "D" is sve_size_2122=0b11 { export 8:1; } + +# encoded in "tsz" -- Is the size specifier, tsz 00000 RESERVED xxxx1 B xxx10 H xx100 S x1000 D 10000 Q + +T_tsz: "B" is b_16=1 { export 1:1; } +T_tsz: "H" is b_1617=0b10 { export 2:1; } +T_tsz: "S" is b_1618=0b100 { export 4:1; } +T_tsz: "D" is b_1619=0b1000 { export 8:1; } +T_tsz: "Q" is b_1620=0b10000 { export 16:1; } +sve_imm2_tsz: tmp is b_16=1 & sve_imm2_2223 & b_1720 [ tmp = sve_imm2_2223 * 16 + b_1720; ] { export *[const]:1 tmp; } +sve_imm2_tsz: tmp is b_1617=0b10 & sve_imm2_2223 & b_1820 [ tmp = sve_imm2_2223 * 8 + b_1820; ] { export *[const]:1 tmp; } +sve_imm2_tsz: tmp is b_1618=0b100 & sve_imm2_2223 & b_1920 [ tmp = sve_imm2_2223 * 4 + b_1920; ] { export *[const]:1 tmp; } +sve_imm2_tsz: tmp is b_1619=0b1000 & sve_imm2_2223 & b_20 [ tmp = sve_imm2_2223 * 2 + b_20; ] { export *[const]:1 tmp; } +sve_imm2_tsz: tmp is b_1620=0b10000 & sve_imm2_2223 [ tmp = sve_imm2_2223 + 0; ] { export *[const]:1 tmp; } + +# encoded in "imm13<12>:imm13<5:0>" -- Is the size specifier, imm13<12> imm13<5:0> 0 0xxxxx S 0 10xxxx H 0 110xxx B 0 1110xx B 0 11110x B 0 111110 RESERVED 0 111111 RESERVED 1 xxxxxx D + +T_imm13: "S" is b_17=0 & b_10=0 { export 4:1; } +T_imm13: "H" is b_17=0 & b_0910=0b10 { export 2:1; } +T_imm13: "B" is b_17=0 & b_0810=0b110 { export 1:1; } +T_imm13: "B" is b_17=0 & b_0710=0b1110 { export 1:1; } +T_imm13: "B" is b_17=0 & b_0610=0b11110 { export 1:1; } +T_imm13: "D" is b_17=1 { export 8:1; } + +Zd.T: Zd^"."^T is Zd & T { export Zd; } +Zd.T_2: Zd^"."^T is Zd & T { export Zd; } +Zd.T_tszh: Zd^"."^T_tszh is Zd & T_tszh { export Zd; } +Zd.T_tszh_2: Zd^"."^T_tszh is Zd & T_tszh { export Zd; } +Zd.T_tsz: Zd^"."^T_tsz is Zd & T_tsz { export Zd; } +Zd.T_imm13: Zd^"."^T_imm13 is Zd & T_imm13 { export Zd; } +Zd.T_imm13_2: Zd^"."^T_imm13 is Zd & T_imm13 { export Zd; } +Zd.T_sz: Zd^"."^T_sz is Zd & T_sz { export Zd; } +Zd.T_sz_2: Zd^"."^T_sz is Zd & T_sz { export Zd; } +Zd.T_size_2122: Zd^"."^T_size_2122 is Zd & T_size_2122 { export Zd; } + +Zd.B: Zd^".B" is Zd { export Zd; } +Zd.B_2: Zd^".B" is Zd { export Zd; } +Zd.H: Zd^".H" is Zd { export Zd; } +Zd.S: Zd^".S" is Zd { export Zd; } +Zd.D: Zd^".D" is Zd { export Zd; } + +Zt.B: sve_zt_0004^".B" is sve_zt_0004 { export sve_zt_0004; } +Ztt.B: sve_ztt_0004^".B" is sve_ztt_0004 { export sve_ztt_0004; } +Zttt.B: sve_zttt_0004^".B" is sve_zttt_0004 { export sve_zttt_0004; } +Ztttt.B: sve_ztttt_0004^".B" is sve_ztttt_0004 { export sve_ztttt_0004; } +Zt.H: sve_zt_0004^".H" is sve_zt_0004 { export sve_zt_0004; } +Ztt.H: sve_ztt_0004^".H" is sve_ztt_0004 { export sve_ztt_0004; } +Zttt.H: sve_zttt_0004^".H" is sve_zttt_0004 { export sve_zttt_0004; } +Ztttt.H: sve_ztttt_0004^".H" is sve_ztttt_0004 { export sve_ztttt_0004; } +Zt.S: sve_zt_0004^".S" is sve_zt_0004 { export sve_zt_0004; } +Ztt.S: sve_ztt_0004^".S" is sve_ztt_0004 { export sve_ztt_0004; } +Zttt.S: sve_zttt_0004^".S" is sve_zttt_0004 { export sve_zttt_0004; } +Ztttt.S: sve_ztttt_0004^".S" is sve_ztttt_0004 { export sve_ztttt_0004; } +Zt.D: sve_zt_0004^".D" is sve_zt_0004 { export sve_zt_0004; } +Ztt.D: sve_ztt_0004^".D" is sve_ztt_0004 { export sve_ztt_0004; } +Zttt.D: sve_zttt_0004^".D" is sve_zttt_0004 { export sve_zttt_0004; } +Ztttt.D: sve_ztttt_0004^".D" is sve_ztttt_0004 { export sve_ztttt_0004; } + +Zn.T: sve_zn_0509^"."^T is sve_zn_0509 & T { export sve_zn_0509; } +Zn.T_sz: sve_zn_0509^"."^T_sz is sve_zn_0509 & T_sz { export sve_zn_0509; } +Zn.T_tszh: sve_zn_0509^"."^T_tszh is sve_zn_0509 & T_tszh { export sve_zn_0509; } +Zn.T_tsz: sve_zn_0509^"."^T_tsz is sve_zn_0509 & T_tsz { export sve_zn_0509; } +Zn.Tb_sz: sve_zn_0509^".B" is sve_zn_0509 & sve_sz_22=0 { export sve_zn_0509; } +Zn.Tb_sz: sve_zn_0509^".H" is sve_zn_0509 & sve_sz_22=1 { export sve_zn_0509; } +Zn.Tb: sve_zn_0509^".B" is sve_zn_0509 & sve_size_2223=0b01 { export sve_zn_0509; } +Zn.Tb: sve_zn_0509^".H" is sve_zn_0509 & sve_size_2223=0b10 { export sve_zn_0509; } +Zn.Tb: sve_zn_0509^".S" is sve_zn_0509 & sve_size_2223=0b11 { export sve_zn_0509; } + +Zn.B: sve_zn_0509^".B" is sve_zn_0509 { export sve_zn_0509; } +Zn.H: sve_zn_0509^".H" is sve_zn_0509 { export sve_zn_0509; } +Zn.S: sve_zn_0509^".S" is sve_zn_0509 { export sve_zn_0509; } +Zn.D: sve_zn_0509^".D" is sve_zn_0509 { export sve_zn_0509; } + +Zm.T: sve_zm_1620^"."^T is sve_zm_1620 & T { export sve_zm_1620; } +Zm.T_sz: sve_zm_1620^"."^T_sz is sve_zm_1620 & T_sz { export sve_zm_1620; } +Zm.Tb_sz: sve_zm_1620^".B" is sve_zm_1620 & sve_sz_22=0 { export sve_zm_1620; } +Zm.Tb_sz: sve_zm_1620^".H" is sve_zm_1620 & sve_sz_22=1 { export sve_zm_1620; } +# Zm.Tb: sve_zm_1620^".B" is sve_zm_1620 & sve_size_2223=0b01 { export sve_zm_1620; } +# Zm.Tb: sve_zm_1620^".H" is sve_zm_1620 & sve_size_2223=0b10 { export sve_zm_1620; } +# Zm.Tb: sve_zm_1620^".S" is sve_zm_1620 & sve_size_2223=0b11 { export sve_zm_1620; } + +# Zm.B: sve_zm_1620^".B" is sve_zm_1620 { export sve_zm_1620; } +# Zm.H: sve_zm_1620^".H" is sve_zm_1620 { export sve_zm_1620; } +Zm.S: sve_zm_1620^".S" is sve_zm_1620 { export sve_zm_1620; } +Zm.D: sve_zm_1620^".D" is sve_zm_1620 { export sve_zm_1620; } + +Zm3.B: sve_zm_1618^".B" is sve_zm_1618 { export sve_zm_1618; } +Zm3.H: sve_zm_1618^".H" is sve_zm_1618 { export sve_zm_1618; } +Zm3.S: sve_zm_1618^".S" is sve_zm_1618 { export sve_zm_1618; } +# Zm3.D: sve_zm_1618^".D" is sve_zm_1618 { export sve_zm_1618; } + +# Zm4.B: sve_zm_1619^".B" is sve_zm_1619 { export sve_zm_1619; } +Zm4.H: sve_zm_1619^".H" is sve_zm_1619 { export sve_zm_1619; } +Zm4.S: sve_zm_1619^".S" is sve_zm_1619 { export sve_zm_1619; } +Zm4.D: sve_zm_1619^".D" is sve_zm_1619 { export sve_zm_1619; } + +Pg: sve_pg_1013 is sve_pg_1013 { export sve_pg_1013; } +Pg_z: sve_pg_1013^"/z" is sve_pg_1013 { export sve_pg_1013; } +Pg_zm: sve_pg_1013^"/z" is sve_pg_1013 & sve_m_04=0 { export sve_pg_1013; } +Pg_zm: sve_pg_1013^"/m" is sve_pg_1013 & sve_m_04=1 { export sve_pg_1013; } +Pg3: sve_pg_1012 is sve_pg_1012 { export sve_pg_1012; } +Pg3_m: sve_pg_1012^"/m" is sve_pg_1012 { export sve_pg_1012; } +Pg3_z: sve_pg_1012^"/z" is sve_pg_1012 { export sve_pg_1012; } +Pg3_zm: sve_pg_1012^"/z" is sve_pg_1012 & sve_m_16=0 { export sve_pg_1012; } +Pg3_zm: sve_pg_1012^"/m" is sve_pg_1012 & sve_m_16=1 { export sve_pg_1012; } + +Pd.T: sve_pd_0003^"."^T is sve_pd_0003 & T { export sve_pd_0003; } +Pd.T_2: sve_pd_0003^"."^T is sve_pd_0003 & T { export sve_pd_0003; } +Pd: sve_pd_0003 is sve_pd_0003 { export sve_pd_0003; } +Pd.B: sve_pd_0003^".B" is sve_pd_0003 { export sve_pd_0003; } +Pd.B_2: sve_pd_0003^".B" is sve_pd_0003 { export sve_pd_0003; } +Pd.H: sve_pd_0003^".H" is sve_pd_0003 { export sve_pd_0003; } +# Pd.S: sve_pd_0003^".S" is sve_pd_0003 { export sve_pd_0003; } +# Pd.D: sve_pd_0003^".D" is sve_pd_0003 { export sve_pd_0003; } + +Pn: sve_pn_0508 is sve_pn_0508 { export sve_pn_0508; } +Pn_z: sve_pn_0508^"/z" is sve_pn_0508 { export sve_pn_0508; } +Pn.T: sve_pn_0508^"."^T is sve_pn_0508 & T { export sve_pn_0508; } +Pn.B: sve_pn_0508^".B" is sve_pn_0508 { export sve_pn_0508; } +# Pn.H: sve_pn_0508^".H" is sve_pn_0508 { export sve_pn_0508; } +# Pn.S: sve_pn_0508^".S" is sve_pn_0508 { export sve_pn_0508; } +# Pn.D: sve_pn_0508^".D" is sve_pn_0508 { export sve_pn_0508; } + +Pm_m: sve_pm_1619^"/m" is sve_pm_1619 { export sve_pm_1619; } +Pm_zm: sve_pm_1619^"/z" is sve_pm_1619 & sve_m_14=0 { export sve_pm_1619; } +Pm_zm: sve_pm_1619^"/m" is sve_pm_1619 & sve_m_14=1 { export sve_pm_1619; } +Pm.T: sve_pm_1619^"."^T is sve_pm_1619 & T { export sve_pm_1619; } +Pm.B: sve_pm_1619^".B" is sve_pm_1619 { export sve_pm_1619; } +# Pm.H: sve_pm_1619^".H" is sve_pm_1619 { export sve_pm_1619; } +# Pm.S: sve_pm_1619^".S" is sve_pm_1619 { export sve_pm_1619; } +# Pm.D: sve_pm_1619^".D" is sve_pm_1619 { export sve_pm_1619; } + +sve_i3h_i3l: tmp is sve_i3h_22 & sve_i3l_1920 [ tmp = sve_i3h_22 * 4 + sve_i3l_1920; ] { export *[const]:1 tmp; } +sve_imm3_1_0to7: sve_imm3_1618 is sve_imm3_1618 { export *[const]:1 sve_imm3_1618; } +sve_imm4_1_1to16: tmp is sve_imm4_1619 [ tmp = sve_imm4_1619 + 1; ] { export *[const]:1 tmp; } +sve_imm4_1_m128to112: tmp is sve_imm4s_1619 [ tmp = sve_imm4s_1619 * 16; ] { export *[const]:1 tmp; } +sve_opt4_1_m128to112: "" is sve_imm4s_1619=0 { export 0:1; } +sve_opt4_1_m128to112: ", #"^sve_imm4_1_m128to112 is sve_imm4_1_m128to112 { export sve_imm4_1_m128to112; } +sve_imm4_1_m16to14: tmp is sve_imm4s_1619 [ tmp = sve_imm4s_1619 * 2; ] { export *[const]:1 tmp; } +sve_mul4_1_m16to14: "" is sve_imm4s_1619=0 { export 0:1; } +sve_mul4_1_m16to14: ", #"^sve_imm4_1_m16to14^", mul vl" is sve_imm4_1_m16to14 { export *[const]:1 sve_imm4_1_m16to14; } +sve_imm4_1_m24to21: tmp is sve_imm4s_1619 [ tmp = sve_imm4s_1619 * 3; ] { export *[const]:1 tmp; } +sve_mul4_1_m24to21: "" is sve_imm4s_1619=0 { export 0:1; } +sve_mul4_1_m24to21: ", #"^sve_imm4_1_m24to21^", mul vl" is sve_imm4_1_m24to21 { export *[const]:1 sve_imm4_1_m24to21; } +sve_imm4_1_m32to28: tmp is sve_imm4s_1619 [ tmp = sve_imm4s_1619 * 4; ] { export *[const]:1 tmp; } +sve_mul4_1_m32to28: "" is sve_imm4s_1619=0 { export 0:1; } +sve_mul4_1_m32to28: ", #"^sve_imm4_1_m32to28^", mul vl" is sve_imm4_1_m32to28 { export *[const]:1 sve_imm4_1_m32to28; } +sve_imm4_1_m8to7: tmp is sve_imm4s_1619 [ tmp = sve_imm4s_1619 * 1; ] { export *[const]:1 tmp; } +sve_mul4_1_m8to7: "" is sve_imm4s_1619=0 { export 0:1; } +sve_mul4_1_m8to7: ", #"^sve_imm4_1_m8to7^", mul vl" is sve_imm4_1_m8to7 { export *[const]:1 sve_imm4_1_m8to7; } +sve_imm5_1_0to124: tmp is sve_imm5_1620 [ tmp = sve_imm5_1620 * 4; ] { export *[const]:1 tmp; } +sve_opt5_1_0to124: "" is sve_imm5_1620=0 { export 0:1; } +sve_opt5_1_0to124: ", #"^sve_imm5_1_0to124 is sve_imm5_1_0to124 { export sve_imm5_1_0to124; } +sve_imm5_1_0to248: tmp is sve_imm5_1620 [ tmp = sve_imm5_1620 * 8; ] { export *[const]:1 tmp; } +sve_opt5_1_0to248: "" is sve_imm5_1620=0 { export 0:1; } +sve_opt5_1_0to248: ", #"^sve_imm5_1_0to248 is sve_imm5_1_0to248 { export sve_imm5_1_0to248; } +sve_imm5_1_0to31: sve_imm5_1620 is sve_imm5_1620 { export *[const]:1 sve_imm5_1620; } +sve_opt5_1_0to31: "" is sve_imm5_1620=0 { export 0:1; } +sve_opt5_1_0to31: ", #"^sve_imm5_1_0to31 is sve_imm5_1_0to31 { export sve_imm5_1_0to31; } +sve_imm5_1_0to62: tmp is sve_imm5_1620 [ tmp = sve_imm5_1620 * 2; ] { export *[const]:1 tmp; } +sve_opt5_1_0to62: "" is sve_imm5_1620=0 { export 0:1; } +sve_opt5_1_0to62: ", #"^sve_imm5_1_0to62 is sve_imm5_1_0to62 { export sve_imm5_1_0to62; } +sve_imm5_1_m16to15: sve_imm5s_1620 is sve_b_1015=0b010001 & sve_imm5s_1620 { export *[const]:1 sve_imm5s_1620; } +sve_imm5_1_m16to15: sve_imm5s_0509 is sve_b_1015=0b010010 & sve_imm5s_0509 { export *[const]:1 sve_imm5s_0509; } +sve_imm6_1_0to126: tmp is sve_imm6_1621 [ tmp = sve_imm6_1621 * 2; ] { export *[const]:1 tmp; } +sve_opt6_1_0to126: "" is sve_imm6_1621=0 { export 0:1; } +sve_opt6_1_0to126: ", #"^sve_imm6_1_0to126 is sve_imm6_1_0to126 { export sve_imm6_1_0to126; } +sve_imm6_1_0to252: tmp is sve_imm6_1621 [ tmp = sve_imm6_1621 * 4; ] { export *[const]:1 tmp; } +sve_opt6_1_0to252: "" is sve_imm6_1621=0 { export 0:1; } +sve_opt6_1_0to252: ", #"^sve_imm6_1_0to252 is sve_imm6_1_0to252 { export sve_imm6_1_0to252; } +sve_imm6_1_0to504: tmp is sve_imm6_1621 [ tmp = sve_imm6_1621 * 8; ] { export *[const]:2 tmp; } +sve_opt6_1_0to504: "" is sve_imm6_1621=0 { export 0:2; } +sve_opt6_1_0to504: ", #"^sve_imm6_1_0to504 is sve_imm6_1_0to504 { export sve_imm6_1_0to504; } +sve_imm6_1_0to63: sve_imm6_1621 is sve_imm6_1621 { export *[const]:1 sve_imm6_1621; } +sve_opt6_1_0to63: "" is sve_imm6_1621=0 { export 0:1; } +sve_opt6_1_0to63: ", #"^sve_imm6_1_0to63 is sve_imm6_1_0to63 { export sve_imm6_1_0to63; } +sve_imm6_1_m32to31: sve_imm6s_0510 is sve_imm6s_0510 { export *[const]:1 sve_imm6s_0510; } +sve_mul6_1_m32to31: "" is sve_imm6_1621=0 { export 0:1; } +sve_mul6_1_m32to31: ", #"^sve_imm6s_1621^", mul vl" is sve_imm6s_1621 { export *[const]:1 sve_imm6s_1621; } +sve_imm8_1_0to255: sve_imm8_0512 is sve_imm8_0512 { export *[const]:1 sve_imm8_0512; } +sve_shf8_1_0to255: "#0, LSL #8" is sve_imm8_0512=0 & sve_sh_13=1 { export 0:2; } +sve_shf8_1_0to255: "#"^tmp is sve_imm8_0512 & sve_sh_13 [ tmp = sve_imm8_0512 << (8 * sve_sh_13); ] { export *[const]:2 tmp; } +sve_imm8_1_m128to127: sve_imm8s_0512 is sve_imm8s_0512 { export *[const]:1 sve_imm8s_0512; } +sve_shf8_1_m128to127: "#0, LSL #8" is sve_imm8s_0512=0 & sve_sh_13=1 { export 0:2; } +sve_shf8_1_m128to127: "#"^tmp is sve_imm8s_0512 & sve_sh_13 [ tmp = sve_imm8s_0512 << (8 * sve_sh_13); ] { export *[const]:2 tmp; } +sve_imm8_2_0to255: tmp is sve_imm8h_1620 & sve_imm8l_1012 [ tmp = sve_imm8h_1620 * 8 + sve_imm8l_1012; ] { export *[const]:1 tmp; } +sve_imm9_2_m256to255: tmp is sve_imm9hs_1621 & sve_imm9l_1012 [ tmp = sve_imm9hs_1621 * 8 + sve_imm9l_1012; ] { export *[const]:2 tmp; } +sve_mul9_2_m256to255: "" is sve_imm6_1621=0 & sve_imm9l_1012=0 { export 0:2; } +sve_mul9_2_m256to255: ", #"^sve_imm9_2_m256to255^", mul vl" is sve_imm9_2_m256to255 { export sve_imm9_2_m256to255; } + +sve_pattern: "POW2" is sve_pattern_0509=0b00000 { export 0b00000:1; } +sve_pattern: "VL1" is sve_pattern_0509=0b00001 { export 0b00001:1; } +sve_pattern: "VL2" is sve_pattern_0509=0b00010 { export 0b00010:1; } +sve_pattern: "VL3" is sve_pattern_0509=0b00011 { export 0b00011:1; } +sve_pattern: "VL4" is sve_pattern_0509=0b00100 { export 0b00100:1; } +sve_pattern: "VL5" is sve_pattern_0509=0b00101 { export 0b00101:1; } +sve_pattern: "VL6" is sve_pattern_0509=0b00110 { export 0b00110:1; } +sve_pattern: "VL7" is sve_pattern_0509=0b00111 { export 0b00111:1; } +sve_pattern: "VL8" is sve_pattern_0509=0b01000 { export 0b01000:1; } +sve_pattern: "VL16" is sve_pattern_0509=0b01001 { export 0b01001:1; } +sve_pattern: "VL32" is sve_pattern_0509=0b01010 { export 0b01010:1; } +sve_pattern: "VL64" is sve_pattern_0509=0b01011 { export 0b01011:1; } +sve_pattern: "VL128" is sve_pattern_0509=0b01100 { export 0b01100:1; } +sve_pattern: "VL256" is sve_pattern_0509=0b01101 { export 0b01101:1; } +sve_pattern: "#"^sve_pattern_0509 is b_0609=0b0111 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } +sve_pattern: "#"^sve_pattern_0509 is b_0709=0b101 & b_05=1 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } +sve_pattern: "#"^sve_pattern_0509 is b_0509=0b10110 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } +sve_pattern: "#"^sve_pattern_0509 is b_09=1 & b_07=0 & b_05=1 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } +sve_pattern: "#"^sve_pattern_0509 is b_09=1 & b_0507=0b010 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } +sve_pattern: "#"^sve_pattern_0509 is b_09=1 & b_0506=0b00 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } +sve_pattern: "MUL4" is sve_pattern_0509=0b11101 { export 0b11101:1; } +sve_pattern: "MUL3" is sve_pattern_0509=0b11110 { export 0b11110:1; } +sve_pattern: "ALL" is sve_pattern_0509=0b11111 { export 0b11111:1; } + +sve_opt_pattern: "" is sve_pattern_0509=0b11111 { export 0b11111:1; } +sve_opt_pattern: ", "^sve_pattern is sve_pattern { export sve_pattern; } + +sve_mul_pattern: "" is sve_pattern_0509=0b11111 & sve_imm4_1619=0b0000 { export 0b11111:1; } +sve_mul_pattern: ", "^sve_pattern is sve_pattern & sve_imm4_1619=0b0000 { export sve_pattern; } +sve_mul_pattern: ", "^sve_pattern^", mul #"^sve_imm4_1_1to16 is sve_pattern & sve_imm4_1_1to16 { export sve_pattern; } + +sve_mod_amount: "" is sve_msz_1011=0b00 { export 0:1; } +sve_mod_amount: ", LSL #1" is sve_msz_1011=0b01 { export 1:1; } +sve_mod_amount: ", LSL #2" is sve_msz_1011=0b10 { export 2:1; } +sve_mod_amount: ", LSL #3" is sve_msz_1011=0b11 { export 3:1; } + +sve_mod: "UXTW" is b_15=1 & b_14=0 { export 2:1; } +sve_mod: "SXTW" is b_15=1 & b_14=1 { export 3:1; } +sve_mod: "UXTW" is b_15=0 & b_22=0 { export 0:1; } +sve_mod: "SXTW" is b_15=0 & b_22=1 { export 1:1; } + +sve_prfop: "PLDL1KEEP" is sve_prfop_0003=0b0000 { export 0b0000:1; } +sve_prfop: "PLDL1STRM" is sve_prfop_0003=0b0001 { export 0b0001:1; } +sve_prfop: "PLDL2KEEP" is sve_prfop_0003=0b0010 { export 0b0010:1; } +sve_prfop: "PLDL2STRM" is sve_prfop_0003=0b0011 { export 0b0011:1; } +sve_prfop: "PLDL3KEEP" is sve_prfop_0003=0b0100 { export 0b0100:1; } +sve_prfop: "PLDL3STRM" is sve_prfop_0003=0b0101 { export 0b0101:1; } +sve_prfop: "#"^sve_prfop_0003 is b_02 & b_01=1 & sve_prfop_0003 { export *[const]:1 sve_prfop_0003; } +sve_prfop: "PSTL1KEEP" is sve_prfop_0003=0b1000 { export 0b1000:1; } +sve_prfop: "PSTL1STRM" is sve_prfop_0003=0b1001 { export 0b1001:1; } +sve_prfop: "PSTL2KEEP" is sve_prfop_0003=0b1010 { export 0b1010:1; } +sve_prfop: "PSTL2STRM" is sve_prfop_0003=0b1011 { export 0b1011:1; } +sve_prfop: "PSTL3KEEP" is sve_prfop_0003=0b1100 { export 0b1100:1; } +sve_prfop: "PSTL3STRM" is sve_prfop_0003=0b1101 { export 0b1101:1; } + +sve_decode_bit_mask: wmask is b_17=0 & b_0510=0b111100 & b_11 [ wmask = (0x5555 >> b_11) & 0xff; ] { export *[const]:8 wmask; } +sve_decode_bit_mask: wmask is b_17=0 & b_0710=0b1110 & b_0506=0b00 & b_1112 [ wmask = (0x1111 >> b_1112) & 0xff; ] { export *[const]:8 wmask; } +sve_decode_bit_mask: wmask is b_17=0 & b_0710=0b1110 & b_0506=0b01 & b_1112 [ wmask = (0x3333 >> b_1112) & 0xff; ] { export *[const]:8 wmask; } +sve_decode_bit_mask: wmask is b_17=0 & b_0710=0b1110 & b_0506=0b10 & b_1112 [ wmask = (0x7777 >> b_1112) & 0xff; ] { export *[const]:8 wmask; } +sve_decode_bit_mask: wmask is b_17=0 & b_0810=0b110 & b_0507 & b_1113 [ wmask = (((~(-1<<(b_0507+1))) | (~(-1<<(b_0507+9)) & 0xff00)) >> b_1113) & 0xff; ] { export *[const]:8 wmask; } +sve_decode_bit_mask: wmask is b_17=0 & b_0910=0b10 & b_0508 & b_1114 [ wmask = (((~(-1<<(b_0508+1))) | (~(-1<<(b_0508+17)) & 0xffff0000)) >> b_1114) & 0xffff; ] { export *[const]:8 wmask; } +sve_decode_bit_mask: wmask is b_17=0 & b_10=0 & b_0509 & b_1115 [ wmask = (((~(-1<<(b_0509+1))) | (~(-1<<(b_0509+33)) & 0xffffffff00000000)) >> b_1115) & 0xffffffff; ] { export *[const]:8 wmask; } +sve_decode_bit_mask: wmask is b_17=1 & b_0510 & b_1116 [ wmask = ( (((~(-1<<(b_0510+1)))) >> b_1116) | (((~(-1<<(b_0510+1)))) << (64-b_1116))) & 0xffffffffffffffff; ] { export *[const]:8 wmask; } + +sve_shift_13: "" is sve_sh_13=0 { export 0:1; } +sve_shift_13: ", LSL #8" is sve_sh_13=1 { export 8:1; } + +# The immediate shift is computed from tszh, tszl, imm8. The formula +# depends on the instruction, as does the location of tszl and imm8. +# The conditions b_21=0/1 and b_17/b_11=0/1 were found by inspecting +# the differences between the instructions. + +# Instructions where the immediate shift is 2 * esize - UInt(tsz:imm3) +sve_imm_shift: tmp is b_21=0 & b_17=0 & sve_tszh_2223=0b00 & sve_tszl_0809=0b01 & sve_imm3_0507 [ tmp = 16 - ( 8 + sve_imm3_0507); ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=0 & b_17=0 & sve_tszh_2223=0b00 & b_09=1 & sve_tszl_0809 & sve_imm3_0507 [ tmp = 32 - ( 8 * sve_tszl_0809 + sve_imm3_0507); ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=0 & b_17=0 & sve_tszh_2223=0b01 & sve_tszl_0809 & sve_imm3_0507 [ tmp = 64 - (32 + 8 * sve_tszl_0809 + sve_imm3_0507); ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=0 & b_17=0 & b_23=1 & sve_tszh_2223 & sve_tszl_0809 & sve_imm3_0507 [ tmp = 128 - (32 * sve_tszh_2223 + 8 * sve_tszl_0809 + sve_imm3_0507); ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=1 & b_11=0 & sve_tszh_2223=0b00 & sve_tszl_1920=0b01 & sve_imm3_1618 [ tmp = 16 - ( 8 + sve_imm3_1618); ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=1 & b_11=0 & sve_tszh_2223=0b00 & b_20=1 & sve_tszl_1920 & sve_imm3_1618 [ tmp = 32 - ( 8 * sve_tszl_1920 + sve_imm3_1618); ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=1 & b_11=0 & sve_tszh_2223=0b01 & sve_tszl_1920 & sve_imm3_1618 [ tmp = 64 - (32 + 8 * sve_tszl_1920 + sve_imm3_1618); ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=1 & b_11=0 & b_23=1 & sve_tszh_2223 & sve_tszl_1920 & sve_imm3_1618 [ tmp = 128 - (32 * sve_tszh_2223 + 8 * sve_tszl_1920 + sve_imm3_1618); ] { export *[const]:1 tmp; } + +# Instructions where the immediate shift is UInt(tsz:imm3) - esize +sve_imm_shift: tmp is b_21=0 & b_17=1 & sve_tszh_2223=0b00 & sve_tszl_0809=0b01 & sve_imm3_0507 [ tmp = ( 8 + sve_imm3_0507) - 8; ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=0 & b_17=1 & sve_tszh_2223=0b00 & b_09=1 & sve_tszl_0809 & sve_imm3_0507 [ tmp = ( 8 * sve_tszl_0809 + sve_imm3_0507) - 16; ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=0 & b_17=1 & sve_tszh_2223=0b01 & sve_tszl_0809 & sve_imm3_0507 [ tmp = (32 + 8 * sve_tszl_0809 + sve_imm3_0507) - 32; ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=0 & b_17=1 & b_23=1 & sve_tszh_2223 & sve_tszl_0809 & sve_imm3_0507 [ tmp = (32 * sve_tszh_2223 + 8 * sve_tszl_0809 + sve_imm3_0507) - 64; ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=1 & b_11=1 & sve_tszh_2223=0b00 & sve_tszl_1920=0b01 & sve_imm3_1618 [ tmp = ( 8 + sve_imm3_1618) - 8; ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=1 & b_11=1 & sve_tszh_2223=0b00 & b_20=1 & sve_tszl_1920 & sve_imm3_1618 [ tmp = ( 8 * sve_tszl_1920 + sve_imm3_1618) - 16; ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=1 & b_11=1 & sve_tszh_2223=0b01 & sve_tszl_1920 & sve_imm3_1618 [ tmp = (32 + 8 * sve_tszl_1920 + sve_imm3_1618) - 32; ] { export *[const]:1 tmp; } +sve_imm_shift: tmp is b_21=1 & b_11=1 & b_23=1 & sve_tszh_2223 & sve_tszl_1920 & sve_imm3_1618 [ tmp = (32 * sve_tszh_2223 + 8 * sve_tszl_1920 + sve_imm3_1618) - 64; ] { export *[const]:1 tmp; } + +sve_float_0510: "#0.5" is sve_i1_05=0 { export 0:1; } +sve_float_0510: "#1.0" is sve_i1_05=1 { export 1:1; } +sve_float_0520: "#0.5" is sve_i1_05=0 { export 0:1; } +sve_float_0520: "#2.0" is sve_i1_05=1 { export 1:1; } +sve_float_0010: "#0.0" is sve_i1_05=0 { export 0:1; } +sve_float_0010: "#1.0" is sve_i1_05=1 { export 1:1; } + +# there are no floating point constants in SLEIGH +# generate equivalent hex floating point constant + +attach names [ sve_float_dec ] [ "0" "1" "2" "3" "4" "5" "6" "7" "8" "9" "a" "b" "c" "d" "e" "f" ]; +attach names [ sve_float_exp ] [ "+1" "+2" "+3" "+4" "-3" "-2" "-1" "+0" ]; + +sve_float_imm8: s^"."^sve_float_dec^"p"^sve_float_exp is sve_imm8_0512 & sve_float_dec & sve_float_exp & b_12 [ s = (1 - 2 * b_12); ] { export *[const]:1 sve_imm8_0512; } + +# SECTION pcodeops + +# The following SIMD and MP versions of SLEIGH primitives are +# implemented in java for AARCH64 + +define pcodeop MP_INT_ABS; +define pcodeop MP_INT_RIGHT; +define pcodeop MP_INT_MULT; +define pcodeop MP_INT_UMULT; + +# The following AARCH64 instructions are implemented in java as a +# pcodeop + +define pcodeop a64_TBL; + +# The following pcode ops are not implemented + +define pcodeop AT_S12E0R; +define pcodeop AT_S12E0W; +define pcodeop AT_S12E1R; +define pcodeop AT_S12E1W; +define pcodeop AT_S1E0R; +define pcodeop AT_S1E0W; +define pcodeop AT_S1E1R; +define pcodeop AT_S1E1RP; +define pcodeop AT_S1E1W; +define pcodeop AT_S1E1WP; +define pcodeop AT_S1E2R; +define pcodeop AT_S1E2W; +define pcodeop AT_S1E3R; +define pcodeop AT_S1E3W; +define pcodeop AuthIA; +define pcodeop AuthIB; +define pcodeop AuthDA; +define pcodeop AuthDB; +define pcodeop CallHyperVisor; +define pcodeop CallSecureMonitor; +define pcodeop CallSupervisor; +define pcodeop ClearExclusiveLocal; +define pcodeop crc32b; define pcodeop crc32h; +define pcodeop crc32w; +define pcodeop crc32x; +define pcodeop DataMemoryBarrier; +define pcodeop DataSynchronizationBarrier; +define pcodeop DC_CISW; +define pcodeop DC_CIVAC; +define pcodeop DC_CSW; +define pcodeop DC_CVAC; +define pcodeop DC_CVAP; +define pcodeop DC_CVAU; +define pcodeop DC_ISW; +define pcodeop DC_IVAC; +define pcodeop DC_IGVAC; +define pcodeop DC_IGSW; +define pcodeop DC_IGDVAC; +define pcodeop DC_IGDSW; +define pcodeop DC_CGSW; +define pcodeop DC_CGDSW; +define pcodeop DC_CIGSW; +define pcodeop DC_CIGDSW; +define pcodeop DC_GVA; +define pcodeop DC_GZVA; +define pcodeop DC_CGVAC; +define pcodeop DC_CGDVAC; +define pcodeop DC_CGVAP; +define pcodeop DC_CGDVAP; +define pcodeop DC_CGVADP; +define pcodeop DC_CGDVADP; +define pcodeop DC_CIGVAC; +define pcodeop DC_CIGDVAC; +define pcodeop DCPSInstruction; +define pcodeop DC_ZVA; +define pcodeop DRPSInstruction; +define pcodeop ExceptionReturn; +define pcodeop ExclusiveMonitorPass; +define pcodeop ExclusiveMonitorsStatus; +define pcodeop HaltBreakPoint; +define pcodeop Hint_Prefetch; +define pcodeop IC_IALLU; +define pcodeop IC_IALLUIS; +define pcodeop IC_IVAU; +define pcodeop InstructionSynchronizationBarrier; +define pcodeop LOAcquire; +define pcodeop LORelease; +define pcodeop pacda; +define pcodeop pacdb; +define pcodeop pacdza; +define pcodeop pacdzb; +define pcodeop pacga; +define pcodeop pacia; +define pcodeop paciza; +define pcodeop pacib; +define pcodeop pacizb; +define pcodeop SendEvent; +define pcodeop SendEventLocally; +define pcodeop SoftwareBreakpoint; +define pcodeop SpeculationBarrier; +define pcodeop SysOp_R; +define pcodeop SysOp_W; +define pcodeop TLBI_ALLE1; +define pcodeop TLBI_ALLE1IS; +define pcodeop TLBI_ALLE2; +define pcodeop TLBI_ALLE2IS; +define pcodeop TLBI_ALLE3; +define pcodeop TLBI_ALLE3IS; +define pcodeop TLBI_ASIDE1; +define pcodeop TLBI_ASIDE1IS; +define pcodeop TLBI_IPAS2E1; +define pcodeop TLBI_IPAS2E1IS; +define pcodeop TLBI_IPAS2LE1; +define pcodeop TLBI_IPAS2LE1IS; +define pcodeop TLBI_VAAE1; +define pcodeop TLBI_VAALE1; +define pcodeop TLBI_VAAE1IS; +define pcodeop TLBI_VAALE1IS; +define pcodeop TLBI_VAE1; +define pcodeop TLBI_VAE1IS; +define pcodeop TLBI_VAE2; +define pcodeop TLBI_VAE2IS; +define pcodeop TLBI_VAE3; +define pcodeop TLBI_VAE3IS; +define pcodeop TLBI_VALE1; +define pcodeop TLBI_VALE1IS; +define pcodeop TLBI_VALE2; +define pcodeop TLBI_VALE2IS; +define pcodeop TLBI_VALE3; +define pcodeop TLBI_VALE3IS; +define pcodeop TLBI_VMALLE1; +define pcodeop TLBI_VMALLE1IS; +define pcodeop TLBI_VMALLS12E1; +define pcodeop TLBI_VMALLS12E1IS; +define pcodeop UndefinedInstructionException; +define pcodeop UnkSytemRegRead; +define pcodeop UnkSytemRegWrite; +define pcodeop WaitForEvent; +define pcodeop WaitForInterrupt; +define pcodeop xpac; +define pcodeop Yield; + +# BTI and MemTag pseudo ops + +define pcodeop CopyPtrTag_AddToPtrTag_Exclude; # a combination of the ARM spec's ChooseNonExcludedTag and AddressWithAllocationTag +define pcodeop ValidCallTarget; +define pcodeop ValidJumpTarget; # jumps are valid regardless of the register holding the target +define pcodeop ValidJumpTargetWhenDestIsX16OrX17; # jumps are valid if the register holding the target is x16 or x17, e.g. "br x16" +define pcodeop ValidJumpTargetIfPermittedBySCTLR; # depending on EL and SCTLR[35,36], jumps using arbitrary registers may or may not be valid. +define pcodeop ControlFlowPredictionRestrictionByContext; +define pcodeop CachePrefetchPredictionRestrictionByContext; +define pcodeop DataValuePredictionRestrictionByContext; +define pcodeop RandomizePtrTag_Exclude; +define pcodeop SetPtrTag; # this could be implemented in pcode, but it would break the data flow of the original ptr value +define pcodeop LoadMemTag; +define pcodeop StoreMemTag; +define pcodeop AlignmentFault; + +# BTI show/hide operations, which use pcodeops defined above + +# for BTI +BTI_BTITARGETS: is b_0607=0 { } # Not a valid target for jumps or calls +BTI_BTITARGETS: "c" is ShowBTI=1 & b_0607=1 { ValidCallTarget(); ValidJumpTargetWhenDestIsX16OrX17(); } # BR x16 is valid, BR x5 isn't +BTI_BTITARGETS: "j" is ShowBTI=1 & b_0607=2 { ValidJumpTarget(); } +BTI_BTITARGETS: "jc" is ShowBTI=1 & b_0607=3 { ValidJumpTarget(); ValidCallTarget(); } +# hidden versions of the above; use to prevent ValidXXXXTarget calls from cluttering decompiled code in switch statements etc. +BTI_BTITARGETS: "c" is ShowBTI=0 & b_0607=1 { } +BTI_BTITARGETS: "j" is ShowBTI=0 & b_0607=2 { } +BTI_BTITARGETS: "jc" is ShowBTI=0 & b_0607=3 { } + +# for BRK and HLT +ALL_BTITARGETS: is ShowBTI=1 { ValidJumpTarget(); ValidCallTarget(); } +ALL_BTITARGETS: is ShowBTI=0 { } + +# for PACIASP and PACIBSP +PACIXSP_BTITARGETS: is ShowBTI=1 { + ValidCallTarget(); + ValidJumpTargetWhenDestIsX16OrX17(); + # global jump target in the following cases: + # EL == 0 and SCTLR[35] == 0 + # EL != 0 and SCTLR[36] == 0 + ValidJumpTargetIfPermittedBySCTLR(); # this doesn't seem important enough to clutter decompilations with a decision tree +} +PACIXSP_BTITARGETS: is ShowBTI=0 { } + + +# These pseudo ops are used in neon + +define pcodeop SIMD_PIECE; + +define pcodeop NEON_addv; +define pcodeop NEON_aesd; +define pcodeop NEON_aese; +define pcodeop NEON_aesimc; +define pcodeop NEON_aesmc; +define pcodeop NEON_bfdot; +define pcodeop NEON_bfmlalb; +define pcodeop NEON_bfmlalt; +define pcodeop NEON_bfmmla; +define pcodeop NEON_bif; +define pcodeop NEON_bit; +define pcodeop NEON_bsl; +define pcodeop NEON_cls; +define pcodeop NEON_clz; +define pcodeop NEON_cmeq; +define pcodeop NEON_cmge; +define pcodeop NEON_cmgt; +define pcodeop NEON_cmhi; +define pcodeop NEON_cmhs; +define pcodeop NEON_cmle; +define pcodeop NEON_cmlt; +define pcodeop NEON_cmtst; +define pcodeop NEON_cnt; +define pcodeop NEON_ext; +define pcodeop NEON_facge; +define pcodeop NEON_facgt; +define pcodeop NEON_fcadd; +define pcodeop NEON_fcmeq; +define pcodeop NEON_fcmge; +define pcodeop NEON_fcmgt; +define pcodeop NEON_fcmla; +define pcodeop NEON_fcmle; +define pcodeop NEON_fcmlt; +define pcodeop NEON_fcvtzs; +define pcodeop NEON_fcvtzu; +define pcodeop NEON_fmadd; +define pcodeop NEON_fmax; +define pcodeop NEON_fmaxnm; +define pcodeop NEON_fmaxnmp; +define pcodeop NEON_fmaxnmv; +define pcodeop NEON_fmaxp; +define pcodeop NEON_fmaxv; +define pcodeop NEON_fmin; +define pcodeop NEON_fminnm; +define pcodeop NEON_fminnmp; +define pcodeop NEON_fminnmv; +define pcodeop NEON_fminp; +define pcodeop NEON_fminv; +define pcodeop NEON_fmov; +define pcodeop NEON_fmsub; +define pcodeop NEON_fmulx; +define pcodeop NEON_fnmadd; +define pcodeop NEON_fnmsub; +define pcodeop NEON_frecpe; +define pcodeop NEON_frecps; +define pcodeop NEON_frecpx; +define pcodeop NEON_frsqrte; +define pcodeop NEON_frsqrts; +define pcodeop NEON_fsqrt; +define pcodeop NEON_neg; +define pcodeop NEON_pmul; +define pcodeop NEON_pmull; +define pcodeop NEON_pmull2; +define pcodeop NEON_raddhn; +define pcodeop NEON_rbit; +define pcodeop NEON_rev16; +define pcodeop NEON_rev32; +define pcodeop NEON_rev64; +define pcodeop NEON_rshrn; +define pcodeop NEON_rshrn2; +define pcodeop NEON_rsubhn; +define pcodeop NEON_rsubhn2; +define pcodeop NEON_saba; +define pcodeop NEON_sabd; +define pcodeop NEON_saddlv; +define pcodeop NEON_scvtf; +define pcodeop NEON_sdot; +define pcodeop NEON_sha1c; +define pcodeop NEON_sha1m; +define pcodeop NEON_sha1p; +define pcodeop NEON_sha1su0; +define pcodeop NEON_sha1su1; +define pcodeop NEON_sha256h; +define pcodeop NEON_sha256h2; +define pcodeop NEON_sha256su0; +define pcodeop NEON_sha256su1; +define pcodeop NEON_sha512h; +define pcodeop NEON_sha512h2; +define pcodeop NEON_sha512su0; +define pcodeop NEON_sha512su1; +define pcodeop NEON_shadd; +define pcodeop NEON_shl; +define pcodeop NEON_shsub; +define pcodeop NEON_sli; +define pcodeop NEON_sm3partw1; +define pcodeop NEON_sm3partw2; +define pcodeop NEON_sm3ss1; +define pcodeop NEON_sm3tt1a; +define pcodeop NEON_sm3tt1b; +define pcodeop NEON_sm3tt2a; +define pcodeop NEON_sm3tt2b; +define pcodeop NEON_sm4e; +define pcodeop NEON_sm4ekey; +define pcodeop NEON_smax; +define pcodeop NEON_smaxp; +define pcodeop NEON_smaxv; +define pcodeop NEON_smin; +define pcodeop NEON_sminp; +define pcodeop NEON_sminv; +define pcodeop NEON_smmla; +define pcodeop NEON_sqadd; +define pcodeop NEON_sqdmulh; +define pcodeop NEON_sqdmull; +define pcodeop NEON_sqrdml_as_h; +define pcodeop NEON_sqrdmulh; +define pcodeop NEON_sqrshl; +define pcodeop NEON_sqrshrn; +define pcodeop NEON_sqrshrn2; +define pcodeop NEON_sqrshrun; +define pcodeop NEON_sqrshrun2; +define pcodeop NEON_sqshl; +define pcodeop NEON_sqshlu; +define pcodeop NEON_sqshrn; +define pcodeop NEON_sqshrn2; +define pcodeop NEON_sqshrun; +define pcodeop NEON_sqshrun2; +define pcodeop NEON_sqsub; +define pcodeop NEON_sqxtn; +define pcodeop NEON_sqxtn2; +define pcodeop NEON_sqxtun; +define pcodeop NEON_sqxtun2; +define pcodeop NEON_srhadd; +define pcodeop NEON_sri; +define pcodeop NEON_srshl; +define pcodeop NEON_srshr; +define pcodeop NEON_sshl; +define pcodeop NEON_sshr; +define pcodeop NEON_sudot; +define pcodeop NEON_uaba; +define pcodeop NEON_uabd; +define pcodeop NEON_uaddlv; +define pcodeop NEON_ucvtf; +define pcodeop NEON_udot; +define pcodeop NEON_uhadd; +define pcodeop NEON_uhsub; +define pcodeop NEON_umax; +define pcodeop NEON_umaxp; +define pcodeop NEON_umaxv; +define pcodeop NEON_umin; +define pcodeop NEON_uminp; +define pcodeop NEON_uminv; +define pcodeop NEON_ummla; +define pcodeop NEON_umull; +define pcodeop NEON_uqadd; +define pcodeop NEON_uqrshl; +define pcodeop NEON_uqrshrn; +define pcodeop NEON_uqrshrn2; +define pcodeop NEON_uqshl; +define pcodeop NEON_uqshrn; +define pcodeop NEON_uqshrn2; +define pcodeop NEON_uqsub; +define pcodeop NEON_uqxtn; +define pcodeop NEON_uqxtn2; +define pcodeop NEON_urecpe; +define pcodeop NEON_urhadd; +define pcodeop NEON_urshl; +define pcodeop NEON_urshr; +define pcodeop NEON_ursqrte; +define pcodeop NEON_usdot; +define pcodeop NEON_ushl; +define pcodeop NEON_usmmla; +define pcodeop NEON_usqadd; + +# These pseudo ops are automatically generated + +define pcodeop SVE_abs; +define pcodeop SVE_add; +define pcodeop SVE_addpl; +define pcodeop SVE_addvl; +define pcodeop SVE_adr; +define pcodeop SVE_and; +define pcodeop SVE_ands; +define pcodeop SVE_andv; +define pcodeop SVE_asr; +define pcodeop SVE_asrd; +define pcodeop SVE_asrr; +define pcodeop SVE_bic; +define pcodeop SVE_bics; +define pcodeop SVE_brka; +define pcodeop SVE_brkas; +define pcodeop SVE_brkb; +define pcodeop SVE_brkbs; +define pcodeop SVE_brkn; +define pcodeop SVE_brkns; +define pcodeop SVE_brkpa; +define pcodeop SVE_brkpas; +define pcodeop SVE_brkpb; +define pcodeop SVE_brkpbs; +define pcodeop SVE_clasta; +define pcodeop SVE_clastb; +define pcodeop SVE_cls; +define pcodeop SVE_clz; +define pcodeop SVE_cmpeq; +define pcodeop SVE_cmpge; +define pcodeop SVE_cmpgt; +define pcodeop SVE_cmphi; +define pcodeop SVE_cmphs; +define pcodeop SVE_cmple; +define pcodeop SVE_cmplo; +define pcodeop SVE_cmpls; +define pcodeop SVE_cmplt; +define pcodeop SVE_cmpne; +define pcodeop SVE_cnot; +define pcodeop SVE_cnt; +define pcodeop SVE_cntb; +define pcodeop SVE_cntd; +define pcodeop SVE_cnth; +define pcodeop SVE_cntp; +define pcodeop SVE_cntw; +define pcodeop SVE_compact; +define pcodeop SVE_cpy; +define pcodeop SVE_ctermeq; +define pcodeop SVE_ctermne; +define pcodeop SVE_decb; +define pcodeop SVE_decd; +define pcodeop SVE_dech; +define pcodeop SVE_decp; +define pcodeop SVE_decw; +define pcodeop SVE_dup; +define pcodeop SVE_dupm; +define pcodeop SVE_eon; +define pcodeop SVE_eor; +define pcodeop SVE_eors; +define pcodeop SVE_eorv; +define pcodeop SVE_ext; +define pcodeop SVE_fabd; +define pcodeop SVE_fabs; +define pcodeop SVE_facge; +define pcodeop SVE_facgt; +define pcodeop SVE_fadd; +define pcodeop SVE_fadda; +define pcodeop SVE_faddv; +define pcodeop SVE_fcadd; +define pcodeop SVE_fcmeq; +define pcodeop SVE_fcmge; +define pcodeop SVE_fcmgt; +define pcodeop SVE_fcmla; +define pcodeop SVE_fcmle; +define pcodeop SVE_fcmlt; +define pcodeop SVE_fcmne; +define pcodeop SVE_fcmuo; +define pcodeop SVE_fcpy; +define pcodeop SVE_fcvt; +define pcodeop SVE_fcvtzs; +define pcodeop SVE_fcvtzu; +define pcodeop SVE_fdiv; +define pcodeop SVE_fdivr; +define pcodeop SVE_fdup; +define pcodeop SVE_fexpa; +define pcodeop SVE_fmad; +define pcodeop SVE_fmax; +define pcodeop SVE_fmaxnm; +define pcodeop SVE_fmaxnmv; +define pcodeop SVE_fmaxv; +define pcodeop SVE_fmin; +define pcodeop SVE_fminnm; +define pcodeop SVE_fminnmv; +define pcodeop SVE_fminv; +define pcodeop SVE_fmla; +define pcodeop SVE_fmls; +define pcodeop SVE_fmov; +define pcodeop SVE_fmsb; +define pcodeop SVE_fmul; +define pcodeop SVE_fmulx; +define pcodeop SVE_fneg; +define pcodeop SVE_fnmad; +define pcodeop SVE_fnmla; +define pcodeop SVE_fnmls; +define pcodeop SVE_fnmsb; +define pcodeop SVE_frecpe; +define pcodeop SVE_frecps; +define pcodeop SVE_frecpx; +define pcodeop SVE_frinta; +define pcodeop SVE_frinti; +define pcodeop SVE_frintm; +define pcodeop SVE_frintn; +define pcodeop SVE_frintp; +define pcodeop SVE_frintx; +define pcodeop SVE_frintz; +define pcodeop SVE_frsqrte; +define pcodeop SVE_frsqrts; +define pcodeop SVE_fscale; +define pcodeop SVE_fsqrt; +define pcodeop SVE_fsub; +define pcodeop SVE_fsubr; +define pcodeop SVE_ftmad; +define pcodeop SVE_ftsmul; +define pcodeop SVE_ftssel; +define pcodeop SVE_incb; +define pcodeop SVE_incd; +define pcodeop SVE_inch; +define pcodeop SVE_incp; +define pcodeop SVE_incw; +define pcodeop SVE_index; +define pcodeop SVE_insr; +define pcodeop SVE_lasta; +define pcodeop SVE_lastb; +define pcodeop SVE_ld1b; +define pcodeop SVE_ld1d; +define pcodeop SVE_ld1h; +define pcodeop SVE_ld1rb; +define pcodeop SVE_ld1rd; +define pcodeop SVE_ld1rh; +define pcodeop SVE_ld1rqb; +define pcodeop SVE_ld1rqd; +define pcodeop SVE_ld1rqh; +define pcodeop SVE_ld1rqw; +define pcodeop SVE_ld1rsb; +define pcodeop SVE_ld1rsh; +define pcodeop SVE_ld1rsw; +define pcodeop SVE_ld1rw; +define pcodeop SVE_ld1sb; +define pcodeop SVE_ld1sh; +define pcodeop SVE_ld1sw; +define pcodeop SVE_ld1w; +define pcodeop SVE_ld2b; +define pcodeop SVE_ld2d; +define pcodeop SVE_ld2h; +define pcodeop SVE_ld2w; +define pcodeop SVE_ld3b; +define pcodeop SVE_ld3d; +define pcodeop SVE_ld3h; +define pcodeop SVE_ld3w; +define pcodeop SVE_ld4b; +define pcodeop SVE_ld4d; +define pcodeop SVE_ld4h; +define pcodeop SVE_ld4w; +define pcodeop SVE_ldff1b; +define pcodeop SVE_ldff1d; +define pcodeop SVE_ldff1h; +define pcodeop SVE_ldff1sb; +define pcodeop SVE_ldff1sh; +define pcodeop SVE_ldff1sw; +define pcodeop SVE_ldff1w; +define pcodeop SVE_ldnf1b; +define pcodeop SVE_ldnf1d; +define pcodeop SVE_ldnf1h; +define pcodeop SVE_ldnf1sb; +define pcodeop SVE_ldnf1sh; +define pcodeop SVE_ldnf1sw; +define pcodeop SVE_ldnf1w; +define pcodeop SVE_ldnt1b; +define pcodeop SVE_ldnt1d; +define pcodeop SVE_ldnt1h; +define pcodeop SVE_ldnt1w; +define pcodeop SVE_ldr; +define pcodeop SVE_lsl; +define pcodeop SVE_lslr; +define pcodeop SVE_lsr; +define pcodeop SVE_lsrr; +define pcodeop SVE_mad; +define pcodeop SVE_mla; +define pcodeop SVE_mls; +define pcodeop SVE_movprfx; +define pcodeop SVE_msb; +define pcodeop SVE_mul; +define pcodeop SVE_nand; +define pcodeop SVE_nands; +define pcodeop SVE_neg; +define pcodeop SVE_nor; +define pcodeop SVE_nors; +define pcodeop SVE_not; +define pcodeop SVE_orn; +define pcodeop SVE_orns; +define pcodeop SVE_orr; +define pcodeop SVE_orrs; +define pcodeop SVE_orv; +define pcodeop SVE_pfalse; +define pcodeop SVE_pfirst; +define pcodeop SVE_pnext; +define pcodeop SVE_prfb; +define pcodeop SVE_prfd; +define pcodeop SVE_prfh; +define pcodeop SVE_prfw; +define pcodeop SVE_ptest; +define pcodeop SVE_ptrue; +define pcodeop SVE_ptrues; +define pcodeop SVE_punpkhi; +define pcodeop SVE_punpklo; +define pcodeop SVE_rbit; +define pcodeop SVE_rdffr; +define pcodeop SVE_rdffrs; +define pcodeop SVE_rdvl; +define pcodeop SVE_rev; +define pcodeop SVE_revb; +define pcodeop SVE_revh; +define pcodeop SVE_revw; +define pcodeop SVE_sabd; +define pcodeop SVE_saddv; +define pcodeop SVE_scvtf; +define pcodeop SVE_sdiv; +define pcodeop SVE_sdivr; +define pcodeop SVE_sdot; +define pcodeop SVE_sel; +define pcodeop SVE_smax; +define pcodeop SVE_smaxv; +define pcodeop SVE_smin; +define pcodeop SVE_sminv; +define pcodeop SVE_smulh; +define pcodeop SVE_splice; +define pcodeop SVE_sqadd; +define pcodeop SVE_sqdecb; +define pcodeop SVE_sqdecd; +define pcodeop SVE_sqdech; +define pcodeop SVE_sqdecp; +define pcodeop SVE_sqdecw; +define pcodeop SVE_sqincb; +define pcodeop SVE_sqincd; +define pcodeop SVE_sqinch; +define pcodeop SVE_sqincp; +define pcodeop SVE_sqincw; +define pcodeop SVE_sqsub; +define pcodeop SVE_st1b; +define pcodeop SVE_st1d; +define pcodeop SVE_st1h; +define pcodeop SVE_st1w; +define pcodeop SVE_st2b; +define pcodeop SVE_st2d; +define pcodeop SVE_st2h; +define pcodeop SVE_st2w; +define pcodeop SVE_st3b; +define pcodeop SVE_st3d; +define pcodeop SVE_st3h; +define pcodeop SVE_st3w; +define pcodeop SVE_st4b; +define pcodeop SVE_st4d; +define pcodeop SVE_st4h; +define pcodeop SVE_st4w; +define pcodeop SVE_stnt1b; +define pcodeop SVE_stnt1d; +define pcodeop SVE_stnt1h; +define pcodeop SVE_stnt1w; +define pcodeop SVE_str; +define pcodeop SVE_sub; +define pcodeop SVE_subr; +define pcodeop SVE_sunpkhi; +define pcodeop SVE_sunpklo; +define pcodeop SVE_sxtb; +define pcodeop SVE_sxth; +define pcodeop SVE_sxtw; +define pcodeop SVE_tbl; +define pcodeop SVE_trn1; +define pcodeop SVE_trn2; +define pcodeop SVE_uabd; +define pcodeop SVE_uaddv; +define pcodeop SVE_ucvtf; +define pcodeop SVE_udiv; +define pcodeop SVE_udivr; +define pcodeop SVE_udot; +define pcodeop SVE_umax; +define pcodeop SVE_umaxv; +define pcodeop SVE_umin; +define pcodeop SVE_uminv; +define pcodeop SVE_umulh; +define pcodeop SVE_uqadd; +define pcodeop SVE_uqdecb; +define pcodeop SVE_uqdecd; +define pcodeop SVE_uqdech; +define pcodeop SVE_uqdecp; +define pcodeop SVE_uqdecw; +define pcodeop SVE_uqincb; +define pcodeop SVE_uqincd; +define pcodeop SVE_uqinch; +define pcodeop SVE_uqincp; +define pcodeop SVE_uqincw; +define pcodeop SVE_uqsub; +define pcodeop SVE_uunpkhi; +define pcodeop SVE_uunpklo; +define pcodeop SVE_uxtb; +define pcodeop SVE_uxth; +define pcodeop SVE_uxtw; +define pcodeop SVE_uzp1; +define pcodeop SVE_uzp2; +define pcodeop SVE_whilele; +define pcodeop SVE_whilelo; +define pcodeop SVE_whilels; +define pcodeop SVE_whilelt; +define pcodeop SVE_wrffr; +define pcodeop SVE_zip1; +define pcodeop SVE_zip2; + +# SECTION macros + +# begin macros related to memory-tagging + +macro AllocationTagFromAddress(result, op) +{ + # Summary: Sometimes the decompiler won't show this, but that's usually okay. + # + # A potential downside to actually implementing this, rather than using a pseudo-op, + # is that the whole operation can get optimized out to zero by the decompiler when + # tags are being ignored/non-populated by the user. (This zero-tagging is helped along by + # SetPtrTag being a pseudo-op rather than a macro, which is done to preserve data-flow.) + # The optimization makes it harder to tell that tag-related things are happening; + # however, it's arguably convenient to omit a bunch of tag-related stuff when tags + # are being ignored by the user. + + result = (op >> 56) & 0xf; + # decompiler output: return unaff_x30 | 1 << ((ulonglong)register0x00000008 >> 0x38 & 0xf); + # An alternate implementation is the following, which has the downside of adding at least one extra length conversion: + # result = zext(op[56,4]); + # decompiler output: return unaff_x30 | 1 << (ulonglong)((byte)((ulonglong)register0x00000008 >> 0x38) & 0xf); +} + +macro Align(value, sze) +{ + value = value & ~(sze - 1); +} + +macro RequireGranuleAlignment(addr) +{ + misalignment:8 = addr & ($(TAG_GRANULE) - 1); + if (misalignment == 0) goto ; + AlignmentFault(); + +} + +macro Or2BytesWithExcludedTags(tmp) +{ + tmp = (tmp | gcr_el1.exclude) & 0xffff; +} + +# end of memory-tagging macros + +macro addflags(op1, op2) +{ + tmpCY = carry(op1, op2); + tmpOV = scarry(op1, op2); +} + +macro add_with_carry_flags(op1,op2){ + local carry_in = zext(CY); + local tempResult = op1 + op2; + tmpCY = carry(op1,op2) || carry(tempResult, carry_in); + tmpOV = scarry(op1,op2) ^^ scarry(tempResult, carry_in); +} + +macro affectflags() +{ + NG = tmpNG; ZR = tmpZR; CY = tmpCY; OV = tmpOV; +} + +macro affectLflags() +{ + NG = tmpNG; ZR = tmpZR; CY = 0; OV = 0; +} + +# NOTE unlike x86, carry flag is SET if there is NO borrow +macro subflags(op1, op2) +{ + tmpCY = op1 >= op2; + tmpOV = sborrow(op1, op2); +} + +# Special case when the first op of the macro call is 0 +macro subflags0(op2) +{ + tmpCY = 0 == op2; + tmpOV = sborrow(0, op2); +} + +macro logicflags() +{ + tmpCY = shift_carry; + tmpOV = OV; +} + +macro CVunaffected() +{ + tmpCY = CY; + tmpOV = OV; +} + +macro resultflags(result) +{ + tmpNG = result s< 0; + tmpZR = result == 0; +} + +macro fcomp(a, b) +{ + NG = a f< b; + ZR = a f== b; + CY = a f>= b; + OV = 0; +} + +macro ftestNAN(a, b) +{ + NG = 0; + ZR = 0; + CY = 0; + OV = 0; + tst:1 = nan(a) || nan(b); + if (tst) goto inst_next; +} + +macro ROR32(out, val, rotate) +{ + out = ( val >> rotate) | ( val << ( 32 - rotate ) ); +} + +macro ROR64(out, val, rotate) +{ + out = ( val >> rotate) | ( val << ( 64 - rotate ) ); +} + +macro selectCC(result, val1, val2, condition) +{ + result = (zext(condition) * val1) + (zext(!condition) * val2); +} + +macro setCC_NZCV(condMask) +{ + NG = (condMask & 0x8) == 0x8; + ZR = (condMask & 0x4) == 0x4; + CY = (condMask & 0x2) == 0x2; + OV = (condMask & 0x1) == 0x1; +} + +macro set_NZCV(value, condMask) +{ + setNG:1 = (condMask & 0x8) == 0x8; + NG = ((setNG==0) * NG) | ((setNG==1) * (((value >> 3) & 1) ==1)); + setZR:1 = (condMask & 0x4) == 0x4; + ZR = ((setZR==0) * NG) | ((setZR==1) * (((value >> 2) & 1) ==1)); + setCY:1 = (condMask & 0x2) == 0x2; + CY = ((setCY==0) * NG) | ((setCY==1) * (((value >> 1) & 1) == 1)); + setOV:1 = (condMask & 0x1) == 0x1; + OV = ((setOV==0) * NG) | ((setOV==1) * (((value >> 0) & 1) == 1)); +} + +# Macro to access simd lanes + +# Macros to zero the high bits of the Z or Q registers +# These are friendlier to the decompiler + +macro zext_zb(reg) +{ + reg[8,56] = 0; + reg[64,64] = 0; + reg[128,64] = 0; + reg[192,64] = 0; +} + +macro zext_zh(reg) +{ + reg[16,48] = 0; + reg[64,64] = 0; + reg[128,64] = 0; + reg[192,64] = 0; +} + +macro zext_zs(reg) +{ + reg[32,32] = 0; + reg[64,64] = 0; + reg[128,64] = 0; + reg[192,64] = 0; +} + +macro zext_zd(reg) +{ + reg[64,64] = 0; + reg[128,64] = 0; + reg[192,64] = 0; +} + +macro zext_zq(reg) +{ + reg[128,64] = 0; + reg[192,64] = 0; +} + +macro zext_rb(reg) +{ + reg[8,56] = 0; +} + +macro zext_rh(reg) +{ + reg[16,48] = 0; +} + +macro zext_rs(reg) +{ + reg[32,32] = 0; +} + +# SECTION instructions + +:^instruction +is ImmS_ImmR_TestSet=0 & ImmR & ImmS & instruction +[ + ImmS_LT_ImmR = (((ImmS - ImmR) >> 6) $and 1); + ImmS_EQ_ImmR = ~((((ImmS - ImmR) >> 6) $and 1) | (((ImmR - ImmS) >> 6) $and 1)); + # For ubfm, lsl is the preferred alias when imms + 1 == immr, so we must subtract an extra one + # to determine when ubfiz is the preferred alias. + ImmS_LT_ImmR_minus_1 = (((ImmS - (ImmR - 1)) >> 6) & 0x1) & (((ImmS - (ImmR - 1)) >> 7) & 0x1); + ImmS_ne_1f = (((ImmS - 0x1f) >> 6) & 0x1) | (((0x1f - ImmS) >> 6) & 0x1); + ImmS_ne_3f = (((ImmS - 0x3f) >> 6) & 0x1) | (((0x3f - ImmS) >> 6) & 0x1); + ImmS_ImmR_TestSet=1; +]{} + +with : ImmS_ImmR_TestSet=1 { + +@include "AARCH64_base_PACoptions.sinc" + +@include "AARCH64base.sinc" +@include "AARCH64neon.sinc" +@include "AARCH64ldst.sinc" +@include "AARCH64sve.sinc" + +# TODO These are placeholders until the correction instruction implementations can be found + +:NotYetImplemented_UNK1 +is b_0031=0xe7ffdeff +unimpl + +:NotYetImplemented_UNK2 +is b_0031=0x00200820 +unimpl + +:NotYetImplemented_UNK3 +is b_0031=0x00200c20 +unimpl + +} # end with ImmS_ImmR_TestSet=1 + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64ldst.sinc b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64ldst.sinc new file mode 100644 index 00000000..afe3af0a --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64ldst.sinc @@ -0,0 +1,10260 @@ +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 KEEPWITH +# INFO This file automatically generated by andre on Fri Jun 8 10:47:29 2018 +# INFO Direct edits to this file may be lost in future updates +# INFO Command line arguments: ['../../../ProcessorTest/test/andre/scrape/a64ldst.py'] + +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=0 & b_14=0 & b_13=0 [ tmp = 1; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=0 & b_10=0 [ tmp = 1; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=0 & b_14=0 & b_13=0 [ tmp = 2; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=0 & b_14=1 & b_13=0 & b_10=0 [ tmp = 2; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=0 & b_10=1 [ tmp = 2; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=0 & b_10=0 [ tmp = 2; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=0 & b_14=0 & b_13=1 [ tmp = 3; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=0 & b_10=0 [ tmp = 3; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=0 & b_14=0 & b_13=1 [ tmp = 4; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=0 & b_14=1 & b_13=0 & b_10=0 [ tmp = 4; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=0 & b_13=0 & b_11=0 & b_10=0 [ tmp = 4; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=1 & b_10=0 [ tmp = 4; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=0 & b_10=1 [ tmp = 4; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=0 & b_10=0 [ tmp = 4; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=0 & b_14=1 & b_13=1 & b_10=0 [ tmp = 6; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=0 & b_10=1 [ tmp = 6; ] { } +ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_13=1 & b_12=1 [ tmp = 8; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=0 & b_14=1 & b_13=1 & b_10=0 [ tmp = 8; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=0 & b_13=0 & b_11=0 & b_10=0 [ tmp = 8; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=0 & b_13=0 & b_12=0 & b_11=0 & b_10=1 [ tmp = 8; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=1 & b_10=1 [ tmp = 8; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=1 & b_10=0 [ tmp = 8; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=0 & b_10=1 [ tmp = 8; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=0 & b_13=1 & b_11=0 & b_10=0 [ tmp = 12; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=1 & b_10=0 [ tmp = 12; ] { } +ldst_imm: tmp is b_30=1 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_13=1 & b_12=1 [ tmp = 16; ] { } +ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=1 & b_14=0 & b_12=0 & b_11=0 [ tmp = 16; ] { } +ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=1 & b_14=0 & b_12=0 & b_11=1 & b_10=0 [ tmp = 16; ] { } +ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=1 & b_14=0 & b_13=1 & b_12=0 & b_11=1 & b_10=1 [ tmp = 16; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=0 & b_13=0 & b_12=0 & b_11=0 & b_10=1 [ tmp = 16; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=0 & b_13=1 & b_11=0 & b_10=0 [ tmp = 16; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=1 & b_10=1 [ tmp = 16; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=1 & b_10=0 [ tmp = 16; ] { } +ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_12=0 & b_11=0 [ tmp = 24; ] { } +ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_12=0 & b_11=1 & b_10=0 [ tmp = 24; ] { } +ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_13=1 & b_12=0 & b_11=1 & b_10=1 [ tmp = 24; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=0 & b_13=1 & b_12=0 & b_11=0 & b_10=1 [ tmp = 24; ] { } +ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=1 & b_10=1 [ tmp = 24; ] { } +ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=0 & b_12=0 & b_11=0 [ tmp = 32; ] { } +ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=0 & b_12=0 & b_11=1 & b_10=0 [ tmp = 32; ] { } +ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=0 & b_13=1 & b_12=0 & b_11=1 & b_10=1 [ tmp = 32; ] { } +ldst_imm: tmp is b_30=1 & b_24=0 & b_21=0 & b_15=1 & b_14=0 & b_12=0 [ tmp = 32; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=0 & b_13=1 & b_12=0 & b_11=0 & b_10=1 [ tmp = 32; ] { } +ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=1 & b_10=1 [ tmp = 32; ] { } +ldst_imm: tmp is b_30=1 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_12=0 [ tmp = 48; ] { } +ldst_imm: tmp is b_30=1 & b_24=0 & b_21=0 & b_15=0 & b_14=0 & b_12=0 [ tmp = 64; ] { } + +ldst_wback: "" is b_23=0 & b_1620=0b00000 { } +ldst_wback: ", #"^ldst_imm is b_23=1 & b_1620=0b11111 & Rn_GPR64xsp & ldst_imm { Rn_GPR64xsp = tmp_ldXn; } +ldst_wback: ", "^Rm_GPR64 is b_23=1 & Rn_GPR64xsp & Rm_GPR64 { Rn_GPR64xsp = Rm_GPR64; } + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c402000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.8B, Vt2.8B, Vt3.8B, Vt4.8B}, [Xn|SP] [, wback] + +:ld1 {vVt^".8B", vVtt^".8B", vVttt^".8B", vVtttt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c402400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.4H, Vt2.4H, Vt3.4H, Vt4.4H}, [Xn|SP] [, wback] + +:ld1 {vVt^".4H", vVtt^".4H", vVttt^".4H", vVtttt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c402800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.2S, Vt2.2S, Vt3.2S, Vt4.2S}, [Xn|SP] [, wback] + +:ld1 {vVt^".2S", vVtt^".2S", vVttt^".2S", vVtttt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c402c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.1D, Vt2.1D, Vt3.1D, Vt4.1D}, [Xn|SP] [, wback] + +:ld1 {vVt^".1D", vVtt^".1D", vVttt^".1D", vVtttt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR64[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR64[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtttt_VPR64[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c402000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.16B, Vt2.16B, Vt3.16B, Vt4.16B}, [Xn|SP] [, wback] + +:ld1 {vVt^".16B", vVtt^".16B", vVttt^".16B", vVtttt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c402400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.8H, Vt2.8H, Vt3.8H, Vt4.8H}, [Xn|SP] [, wback] + +:ld1 {vVt^".8H", vVtt^".8H", vVttt^".8H", vVtttt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c402800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.4S, Vt2.4S, Vt3.4S, Vt4.4S}, [Xn|SP] [, wback] + +:ld1 {vVt^".4S", vVtt^".4S", vVttt^".4S", vVtttt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c402c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.2D, Vt2.2D, Vt3.2D, Vt4.2D}, [Xn|SP] [, wback] + +:ld1 {vVt^".2D", vVtt^".2D", vVttt^".2D", vVtttt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtttt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtttt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c406000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.8B, Vt2.8B, Vt3.8B}, [Xn|SP] [, wback] + +:ld1 {vVt^".8B", vVtt^".8B", vVttt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c406400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.4H, Vt2.4H, Vt3.4H}, [Xn|SP] [, wback] + +:ld1 {vVt^".4H", vVtt^".4H", vVttt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c406800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.2S, Vt2.2S, Vt3.2S}, [Xn|SP] [, wback] + +:ld1 {vVt^".2S", vVtt^".2S", vVttt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c406c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.1D, Vt2.1D, Vt3.1D}, [Xn|SP] [, wback] + +:ld1 {vVt^".1D", vVtt^".1D", vVttt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR64[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR64[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c406000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.16B, Vt2.16B, Vt3.16B}, [Xn|SP] [, wback] + +:ld1 {vVt^".16B", vVtt^".16B", vVttt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c406400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.8H, Vt2.8H, Vt3.8H}, [Xn|SP] [, wback] + +:ld1 {vVt^".8H", vVtt^".8H", vVttt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c406800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.4S, Vt2.4S, Vt3.4S}, [Xn|SP] [, wback] + +:ld1 {vVt^".4S", vVtt^".4S", vVttt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c406c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.2D, Vt2.2D, Vt3.2D}, [Xn|SP] [, wback] + +:ld1 {vVt^".2D", vVtt^".2D", vVttt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c407000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.8B}, [Xn|SP] [, wback] + +:ld1 {vVt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c407400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.4H}, [Xn|SP] [, wback] + +:ld1 {vVt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c407800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.2S}, [Xn|SP] [, wback] + +:ld1 {vVt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c407c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.1D}, [Xn|SP] [, wback] + +:ld1 {vVt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c407000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.16B}, [Xn|SP] [, wback] + +:ld1 {vVt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c407400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.8H}, [Xn|SP] [, wback] + +:ld1 {vVt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c407800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.4S}, [Xn|SP] [, wback] + +:ld1 {vVt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c407c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.2D}, [Xn|SP] [, wback] + +:ld1 {vVt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c40a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.8B, Vt2.8B}, [Xn|SP] [, wback] + +:ld1 {vVt^".8B", vVtt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c40a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.4H, Vt2.4H}, [Xn|SP] [, wback] + +:ld1 {vVt^".4H", vVtt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c40a800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.2S, Vt2.2S}, [Xn|SP] [, wback] + +:ld1 {vVt^".2S", vVtt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x0c40ac00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.1D, Vt2.1D}, [Xn|SP] [, wback] + +:ld1 {vVt^".1D", vVtt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR64[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c40a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.16B, Vt2.16B}, [Xn|SP] [, wback] + +:ld1 {vVt^".16B", vVtt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c40a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.8H, Vt2.8H}, [Xn|SP] [, wback] + +:ld1 {vVt^".8H", vVtt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c40a800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.4S, Vt2.4S}, [Xn|SP] [, wback] + +:ld1 {vVt^".4S", vVtt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0c402000/mask=xbfff2000 +# C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 MATCH x0cc02000/mask=xbfe02000 +# CONSTRUCT x4c40ac00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.2D, Vt2.2D}, [Xn|SP] [, wback] + +:ld1 {vVt^".2D", vVtt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d400000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[0], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d400400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[1], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d400800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[2], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d400c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[3], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d401000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[4], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d401400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[5], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d401800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[6], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d401c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[7], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d400000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[8], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[8], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d400400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[9], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[9], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d400800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[10], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[10], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d400c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[11], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[11], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d401000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[12], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[12], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d401400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[13], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[13], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d401800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[14], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[14], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d401c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.B}[15], [Xn|SP] [, wback] + +:ld1 {vVt^".B"}[15], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d404000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.H}[0], [Xn|SP] [, wback] + +:ld1 {vVt^".H"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d404800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.H}[1], [Xn|SP] [, wback] + +:ld1 {vVt^".H"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d405000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.H}[2], [Xn|SP] [, wback] + +:ld1 {vVt^".H"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d405800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.H}[3], [Xn|SP] [, wback] + +:ld1 {vVt^".H"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d404000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.H}[4], [Xn|SP] [, wback] + +:ld1 {vVt^".H"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d404800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.H}[5], [Xn|SP] [, wback] + +:ld1 {vVt^".H"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d405000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.H}[6], [Xn|SP] [, wback] + +:ld1 {vVt^".H"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d405800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.H}[7], [Xn|SP] [, wback] + +:ld1 {vVt^".H"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d408000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.S}[0], [Xn|SP] [, wback] + +:ld1 {vVt^".S"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d409000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.S}[1], [Xn|SP] [, wback] + +:ld1 {vVt^".S"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d408000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.S}[2], [Xn|SP] [, wback] + +:ld1 {vVt^".S"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d409000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.S}[3], [Xn|SP] [, wback] + +:ld1 {vVt^".S"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d408400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.D}[0], [Xn|SP] [, wback] + +:ld1 {vVt^".D"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d408400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld1 {Vt.D}[1], [Xn|SP] [, wback] + +:ld1 {vVt^".D"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0d40c000/mask=xbffff000 +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0dc0c000/mask=xbfe0f000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d40c000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld1r {Vt.8B}, [Xn|SP] [, wback] + +:ld1r {vVt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:1 = 0; + tmpv = *:1 tmp_ldXn; + Rt_VPR64[0,8] = tmpv; + Rt_VPR64[8,8] = tmpv; + Rt_VPR64[16,8] = tmpv; + Rt_VPR64[24,8] = tmpv; + Rt_VPR64[32,8] = tmpv; + Rt_VPR64[40,8] = tmpv; + Rt_VPR64[48,8] = tmpv; + Rt_VPR64[56,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0d40c000/mask=xbffff000 +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0dc0c000/mask=xbfe0f000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d40c400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld1r {Vt.4H}, [Xn|SP] [, wback] + +:ld1r {vVt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:2 = 0; + tmpv = *:2 tmp_ldXn; + Rt_VPR64[0,16] = tmpv; + Rt_VPR64[16,16] = tmpv; + Rt_VPR64[32,16] = tmpv; + Rt_VPR64[48,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0d40c000/mask=xbffff000 +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0dc0c000/mask=xbfe0f000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d40c800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld1r {Vt.2S}, [Xn|SP] [, wback] + +:ld1r {vVt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:4 = 0; + tmpv = *:4 tmp_ldXn; + Rt_VPR64[0,32] = tmpv; + Rt_VPR64[32,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0d40c000/mask=xbffff000 +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0dc0c000/mask=xbfe0f000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x0d40cc00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld1r {Vt.1D}, [Xn|SP] [, wback] + +:ld1r {vVt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:8 = 0; + tmpv = *:8 tmp_ldXn; + Rt_VPR64[0,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0d40c000/mask=xbffff000 +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0dc0c000/mask=xbfe0f000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d40c000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld1r {Vt.16B}, [Xn|SP] [, wback] + +:ld1r {vVt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:1 = 0; + tmpv = *:1 tmp_ldXn; + Rt_VPR128[0,8] = tmpv; + Rt_VPR128[8,8] = tmpv; + Rt_VPR128[16,8] = tmpv; + Rt_VPR128[24,8] = tmpv; + Rt_VPR128[32,8] = tmpv; + Rt_VPR128[40,8] = tmpv; + Rt_VPR128[48,8] = tmpv; + Rt_VPR128[56,8] = tmpv; + Rt_VPR128[64,8] = tmpv; + Rt_VPR128[72,8] = tmpv; + Rt_VPR128[80,8] = tmpv; + Rt_VPR128[88,8] = tmpv; + Rt_VPR128[96,8] = tmpv; + Rt_VPR128[104,8] = tmpv; + Rt_VPR128[112,8] = tmpv; + Rt_VPR128[120,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0d40c000/mask=xbffff000 +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0dc0c000/mask=xbfe0f000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d40c400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld1r {Vt.8H}, [Xn|SP] [, wback] + +:ld1r {vVt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:2 = 0; + tmpv = *:2 tmp_ldXn; + Rt_VPR128[0,16] = tmpv; + Rt_VPR128[16,16] = tmpv; + Rt_VPR128[32,16] = tmpv; + Rt_VPR128[48,16] = tmpv; + Rt_VPR128[64,16] = tmpv; + Rt_VPR128[80,16] = tmpv; + Rt_VPR128[96,16] = tmpv; + Rt_VPR128[112,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0d40c000/mask=xbffff000 +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0dc0c000/mask=xbfe0f000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d40c800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld1r {Vt.4S}, [Xn|SP] [, wback] + +:ld1r {vVt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:4 = 0; + tmpv = *:4 tmp_ldXn; + Rt_VPR128[0,32] = tmpv; + Rt_VPR128[32,32] = tmpv; + Rt_VPR128[64,32] = tmpv; + Rt_VPR128[96,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0d40c000/mask=xbffff000 +# C7.2.164 LD1R page C7-1367 line 79486 MATCH x0dc0c000/mask=xbfe0f000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0d400000/mask=xbfff2000 +# C7.2.163 LD1 (single structure) page C7-1363 line 79245 MATCH x0dc00000/mask=xbfe02000 +# CONSTRUCT x4d40cc00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld1r {Vt.2D}, [Xn|SP] [, wback] + +:ld1r {vVt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:8 = 0; + tmpv = *:8 tmp_ldXn; + Rt_VPR128[0,64] = tmpv; + Rt_VPR128[64,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0c408000/mask=xbffff000 +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0cc08000/mask=xbfe0f000 +# CONSTRUCT x0c408000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.8B, Vt2.8B}, [Xn|SP] [, wback] + +:ld2 {vVt^".8B", vVtt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0c408000/mask=xbffff000 +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0cc08000/mask=xbfe0f000 +# CONSTRUCT x0c408400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.4H, Vt2.4H}, [Xn|SP] [, wback] + +:ld2 {vVt^".4H", vVtt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0c408000/mask=xbffff000 +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0cc08000/mask=xbfe0f000 +# CONSTRUCT x0c408800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.2S, Vt2.2S}, [Xn|SP] [, wback] + +:ld2 {vVt^".2S", vVtt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0c408000/mask=xbffff000 +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0cc08000/mask=xbfe0f000 +# CONSTRUCT x4c408000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.16B, Vt2.16B}, [Xn|SP] [, wback] + +:ld2 {vVt^".16B", vVtt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0c408000/mask=xbffff000 +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0cc08000/mask=xbfe0f000 +# CONSTRUCT x4c408400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.8H, Vt2.8H}, [Xn|SP] [, wback] + +:ld2 {vVt^".8H", vVtt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0c408000/mask=xbffff000 +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0cc08000/mask=xbfe0f000 +# CONSTRUCT x4c408800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.4S, Vt2.4S}, [Xn|SP] [, wback] + +:ld2 {vVt^".4S", vVtt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0c408000/mask=xbffff000 +# C7.2.165 LD2 (multiple structures) page C7-1370 line 79672 MATCH x0cc08000/mask=xbfe0f000 +# CONSTRUCT x4c408c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.2D, Vt2.2D}, [Xn|SP] [, wback] + +:ld2 {vVt^".2D", vVtt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d600000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[0], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d600400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[1], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d600800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[2], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d600c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[3], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d601000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[4], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d601400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[5], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d601800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[6], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d601c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[7], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d600000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[8], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[8], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d600400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[9], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[9], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d600800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[10], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[10], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d600c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[11], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[11], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d601000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[12], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[12], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d601400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[13], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[13], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d601800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[14], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[14], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d601c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.B, Vt2.B}[15], [Xn|SP] [, wback] + +:ld2 {vVt^".B", vVtt^".B"}[15], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d604000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.H, Vt2.H}[0], [Xn|SP] [, wback] + +:ld2 {vVt^".H", vVtt^".H"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d604800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.H, Vt2.H}[1], [Xn|SP] [, wback] + +:ld2 {vVt^".H", vVtt^".H"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d605000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.H, Vt2.H}[2], [Xn|SP] [, wback] + +:ld2 {vVt^".H", vVtt^".H"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d605800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.H, Vt2.H}[3], [Xn|SP] [, wback] + +:ld2 {vVt^".H", vVtt^".H"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d604000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.H, Vt2.H}[4], [Xn|SP] [, wback] + +:ld2 {vVt^".H", vVtt^".H"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d604800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.H, Vt2.H}[5], [Xn|SP] [, wback] + +:ld2 {vVt^".H", vVtt^".H"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d605000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.H, Vt2.H}[6], [Xn|SP] [, wback] + +:ld2 {vVt^".H", vVtt^".H"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d605800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.H, Vt2.H}[7], [Xn|SP] [, wback] + +:ld2 {vVt^".H", vVtt^".H"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d608000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.S, Vt2.S}[0], [Xn|SP] [, wback] + +:ld2 {vVt^".S", vVtt^".S"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d609000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.S, Vt2.S}[1], [Xn|SP] [, wback] + +:ld2 {vVt^".S", vVtt^".S"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d608000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.S, Vt2.S}[2], [Xn|SP] [, wback] + +:ld2 {vVt^".S", vVtt^".S"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d609000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.S, Vt2.S}[3], [Xn|SP] [, wback] + +:ld2 {vVt^".S", vVtt^".S"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d608400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.D, Vt2.D}[0], [Xn|SP] [, wback] + +:ld2 {vVt^".D", vVtt^".D"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d608400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld2 {Vt.D, Vt2.D}[1], [Xn|SP] [, wback] + +:ld2 {vVt^".D", vVtt^".D"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0d60c000/mask=xbffff000 +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0de0c000/mask=xbfe0f000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d60c000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld2r {Vt.8B, Vt2.8B}, [Xn|SP] [, wback] + +:ld2r {vVt^".8B", vVtt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:1 = 0; + tmpv = *:1 tmp_ldXn; + Rtt_VPR64[0,8] = tmpv; + Rtt_VPR64[8,8] = tmpv; + Rtt_VPR64[16,8] = tmpv; + Rtt_VPR64[24,8] = tmpv; + Rtt_VPR64[32,8] = tmpv; + Rtt_VPR64[40,8] = tmpv; + Rtt_VPR64[48,8] = tmpv; + Rtt_VPR64[56,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rtt_VPR64[0,8] = tmpv; + Rtt_VPR64[8,8] = tmpv; + Rtt_VPR64[16,8] = tmpv; + Rtt_VPR64[24,8] = tmpv; + Rtt_VPR64[32,8] = tmpv; + Rtt_VPR64[40,8] = tmpv; + Rtt_VPR64[48,8] = tmpv; + Rtt_VPR64[56,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0d60c000/mask=xbffff000 +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0de0c000/mask=xbfe0f000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d60c400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld2r {Vt.4H, Vt2.4H}, [Xn|SP] [, wback] + +:ld2r {vVt^".4H", vVtt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:2 = 0; + tmpv = *:2 tmp_ldXn; + Rtt_VPR64[0,16] = tmpv; + Rtt_VPR64[16,16] = tmpv; + Rtt_VPR64[32,16] = tmpv; + Rtt_VPR64[48,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rtt_VPR64[0,16] = tmpv; + Rtt_VPR64[16,16] = tmpv; + Rtt_VPR64[32,16] = tmpv; + Rtt_VPR64[48,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0d60c000/mask=xbffff000 +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0de0c000/mask=xbfe0f000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d60c800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld2r {Vt.2S, Vt2.2S}, [Xn|SP] [, wback] + +:ld2r {vVt^".2S", vVtt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:4 = 0; + tmpv = *:4 tmp_ldXn; + Rtt_VPR64[0,32] = tmpv; + Rtt_VPR64[32,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rtt_VPR64[0,32] = tmpv; + Rtt_VPR64[32,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0d60c000/mask=xbffff000 +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0de0c000/mask=xbfe0f000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x0d60cc00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld2r {Vt.1D, Vt2.1D}, [Xn|SP] [, wback] + +:ld2r {vVt^".1D", vVtt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:8 = 0; + tmpv = *:8 tmp_ldXn; + Rtt_VPR64[0,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rtt_VPR64[0,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0d60c000/mask=xbffff000 +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0de0c000/mask=xbfe0f000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d60c000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld2r {Vt.16B, Vt2.16B}, [Xn|SP] [, wback] + +:ld2r {vVt^".16B", vVtt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:1 = 0; + tmpv = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = tmpv; + Rtt_VPR128[8,8] = tmpv; + Rtt_VPR128[16,8] = tmpv; + Rtt_VPR128[24,8] = tmpv; + Rtt_VPR128[32,8] = tmpv; + Rtt_VPR128[40,8] = tmpv; + Rtt_VPR128[48,8] = tmpv; + Rtt_VPR128[56,8] = tmpv; + Rtt_VPR128[64,8] = tmpv; + Rtt_VPR128[72,8] = tmpv; + Rtt_VPR128[80,8] = tmpv; + Rtt_VPR128[88,8] = tmpv; + Rtt_VPR128[96,8] = tmpv; + Rtt_VPR128[104,8] = tmpv; + Rtt_VPR128[112,8] = tmpv; + Rtt_VPR128[120,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = tmpv; + Rtt_VPR128[8,8] = tmpv; + Rtt_VPR128[16,8] = tmpv; + Rtt_VPR128[24,8] = tmpv; + Rtt_VPR128[32,8] = tmpv; + Rtt_VPR128[40,8] = tmpv; + Rtt_VPR128[48,8] = tmpv; + Rtt_VPR128[56,8] = tmpv; + Rtt_VPR128[64,8] = tmpv; + Rtt_VPR128[72,8] = tmpv; + Rtt_VPR128[80,8] = tmpv; + Rtt_VPR128[88,8] = tmpv; + Rtt_VPR128[96,8] = tmpv; + Rtt_VPR128[104,8] = tmpv; + Rtt_VPR128[112,8] = tmpv; + Rtt_VPR128[120,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0d60c000/mask=xbffff000 +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0de0c000/mask=xbfe0f000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d60c400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld2r {Vt.8H, Vt2.8H}, [Xn|SP] [, wback] + +:ld2r {vVt^".8H", vVtt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:2 = 0; + tmpv = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = tmpv; + Rtt_VPR128[16,16] = tmpv; + Rtt_VPR128[32,16] = tmpv; + Rtt_VPR128[48,16] = tmpv; + Rtt_VPR128[64,16] = tmpv; + Rtt_VPR128[80,16] = tmpv; + Rtt_VPR128[96,16] = tmpv; + Rtt_VPR128[112,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = tmpv; + Rtt_VPR128[16,16] = tmpv; + Rtt_VPR128[32,16] = tmpv; + Rtt_VPR128[48,16] = tmpv; + Rtt_VPR128[64,16] = tmpv; + Rtt_VPR128[80,16] = tmpv; + Rtt_VPR128[96,16] = tmpv; + Rtt_VPR128[112,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0d60c000/mask=xbffff000 +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0de0c000/mask=xbfe0f000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d60c800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld2r {Vt.4S, Vt2.4S}, [Xn|SP] [, wback] + +:ld2r {vVt^".4S", vVtt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:4 = 0; + tmpv = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = tmpv; + Rtt_VPR128[32,32] = tmpv; + Rtt_VPR128[64,32] = tmpv; + Rtt_VPR128[96,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = tmpv; + Rtt_VPR128[32,32] = tmpv; + Rtt_VPR128[64,32] = tmpv; + Rtt_VPR128[96,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0d60c000/mask=xbffff000 +# C7.2.167 LD2R page C7-1377 line 80085 MATCH x0de0c000/mask=xbfe0f000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0d600000/mask=xbfff2000 +# C7.2.166 LD2 (single structure) page C7-1373 line 79842 MATCH x0de00000/mask=xbfe02000 +# CONSTRUCT x4d60cc00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld2r {Vt.2D, Vt2.2D}, [Xn|SP] [, wback] + +:ld2r {vVt^".2D", vVtt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:8 = 0; + tmpv = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = tmpv; + Rtt_VPR128[64,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = tmpv; + Rtt_VPR128[64,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0c404000/mask=xbffff000 +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0cc04000/mask=xbfe0f000 +# CONSTRUCT x0c404000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.8B, Vt2.8B, Vt3.8B}, [Xn|SP] [, wback] + +:ld3 {vVt^".8B", vVtt^".8B", vVttt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0c404000/mask=xbffff000 +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0cc04000/mask=xbfe0f000 +# CONSTRUCT x0c404400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.4H, Vt2.4H, Vt3.4H}, [Xn|SP] [, wback] + +:ld3 {vVt^".4H", vVtt^".4H", vVttt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0c404000/mask=xbffff000 +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0cc04000/mask=xbfe0f000 +# CONSTRUCT x0c404800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.2S, Vt2.2S, Vt3.2S}, [Xn|SP] [, wback] + +:ld3 {vVt^".2S", vVtt^".2S", vVttt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0c404000/mask=xbffff000 +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0cc04000/mask=xbfe0f000 +# CONSTRUCT x4c404000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.16B, Vt2.16B, Vt3.16B}, [Xn|SP] [, wback] + +:ld3 {vVt^".16B", vVtt^".16B", vVttt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0c404000/mask=xbffff000 +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0cc04000/mask=xbfe0f000 +# CONSTRUCT x4c404400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.8H, Vt2.8H, Vt3.8H}, [Xn|SP] [, wback] + +:ld3 {vVt^".8H", vVtt^".8H", vVttt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0c404000/mask=xbffff000 +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0cc04000/mask=xbfe0f000 +# CONSTRUCT x4c404800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.4S, Vt2.4S, Vt3.4S}, [Xn|SP] [, wback] + +:ld3 {vVt^".4S", vVtt^".4S", vVttt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0c404000/mask=xbffff000 +# C7.2.168 LD3 (multiple structures) page C7-1380 line 80273 MATCH x0cc04000/mask=xbfe0f000 +# CONSTRUCT x4c404c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.2D, Vt2.2D, Vt3.2D}, [Xn|SP] [, wback] + +:ld3 {vVt^".2D", vVtt^".2D", vVttt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d402000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[0], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d402400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[1], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d402800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[2], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d402c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[3], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d403000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[4], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d403400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[5], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d403800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[6], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d403c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[7], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d402000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[8], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[8], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d402400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[9], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[9], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d402800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[10], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[10], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d402c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[11], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[11], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d403000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[12], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[12], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d403400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[13], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[13], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d403800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[14], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[14], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d403c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.B, Vt2.B, Vt3.B}[15], [Xn|SP] [, wback] + +:ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[15], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d406000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.H, Vt2.H, Vt3.H}[0], [Xn|SP] [, wback] + +:ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d406800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.H, Vt2.H, Vt3.H}[1], [Xn|SP] [, wback] + +:ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d407000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.H, Vt2.H, Vt3.H}[2], [Xn|SP] [, wback] + +:ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d407800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.H, Vt2.H, Vt3.H}[3], [Xn|SP] [, wback] + +:ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d406000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.H, Vt2.H, Vt3.H}[4], [Xn|SP] [, wback] + +:ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d406800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.H, Vt2.H, Vt3.H}[5], [Xn|SP] [, wback] + +:ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d407000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.H, Vt2.H, Vt3.H}[6], [Xn|SP] [, wback] + +:ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d407800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.H, Vt2.H, Vt3.H}[7], [Xn|SP] [, wback] + +:ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d40a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.S, Vt2.S, Vt3.S}[0], [Xn|SP] [, wback] + +:ld3 {vVt^".S", vVtt^".S", vVttt^".S"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d40b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.S, Vt2.S, Vt3.S}[1], [Xn|SP] [, wback] + +:ld3 {vVt^".S", vVtt^".S", vVttt^".S"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d40a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.S, Vt2.S, Vt3.S}[2], [Xn|SP] [, wback] + +:ld3 {vVt^".S", vVtt^".S", vVttt^".S"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d40b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.S, Vt2.S, Vt3.S}[3], [Xn|SP] [, wback] + +:ld3 {vVt^".S", vVtt^".S", vVttt^".S"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d40a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.D, Vt2.D, Vt3.D}[0], [Xn|SP] [, wback] + +:ld3 {vVt^".D", vVtt^".D", vVttt^".D"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d40a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld3 {Vt.D, Vt2.D, Vt3.D}[1], [Xn|SP] [, wback] + +:ld3 {vVt^".D", vVtt^".D", vVttt^".D"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0d40e000/mask=xbffff000 +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0dc0e000/mask=xbfe0f000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d40e000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld3r {Vt.8B, Vt2.8B, Vt3.8B}, [Xn|SP] [, wback] + +:ld3r {vVt^".8B", vVtt^".8B", vVttt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:1 = 0; + tmpv = *:1 tmp_ldXn; + Rttt_VPR64[0,8] = tmpv; + Rttt_VPR64[8,8] = tmpv; + Rttt_VPR64[16,8] = tmpv; + Rttt_VPR64[24,8] = tmpv; + Rttt_VPR64[32,8] = tmpv; + Rttt_VPR64[40,8] = tmpv; + Rttt_VPR64[48,8] = tmpv; + Rttt_VPR64[56,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rttt_VPR64[0,8] = tmpv; + Rttt_VPR64[8,8] = tmpv; + Rttt_VPR64[16,8] = tmpv; + Rttt_VPR64[24,8] = tmpv; + Rttt_VPR64[32,8] = tmpv; + Rttt_VPR64[40,8] = tmpv; + Rttt_VPR64[48,8] = tmpv; + Rttt_VPR64[56,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rttt_VPR64[0,8] = tmpv; + Rttt_VPR64[8,8] = tmpv; + Rttt_VPR64[16,8] = tmpv; + Rttt_VPR64[24,8] = tmpv; + Rttt_VPR64[32,8] = tmpv; + Rttt_VPR64[40,8] = tmpv; + Rttt_VPR64[48,8] = tmpv; + Rttt_VPR64[56,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0d40e000/mask=xbffff000 +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0dc0e000/mask=xbfe0f000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d40e400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld3r {Vt.4H, Vt2.4H, Vt3.4H}, [Xn|SP] [, wback] + +:ld3r {vVt^".4H", vVtt^".4H", vVttt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:2 = 0; + tmpv = *:2 tmp_ldXn; + Rttt_VPR64[0,16] = tmpv; + Rttt_VPR64[16,16] = tmpv; + Rttt_VPR64[32,16] = tmpv; + Rttt_VPR64[48,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rttt_VPR64[0,16] = tmpv; + Rttt_VPR64[16,16] = tmpv; + Rttt_VPR64[32,16] = tmpv; + Rttt_VPR64[48,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rttt_VPR64[0,16] = tmpv; + Rttt_VPR64[16,16] = tmpv; + Rttt_VPR64[32,16] = tmpv; + Rttt_VPR64[48,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0d40e000/mask=xbffff000 +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0dc0e000/mask=xbfe0f000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d40e800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld3r {Vt.2S, Vt2.2S, Vt3.2S}, [Xn|SP] [, wback] + +:ld3r {vVt^".2S", vVtt^".2S", vVttt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:4 = 0; + tmpv = *:4 tmp_ldXn; + Rttt_VPR64[0,32] = tmpv; + Rttt_VPR64[32,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rttt_VPR64[0,32] = tmpv; + Rttt_VPR64[32,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rttt_VPR64[0,32] = tmpv; + Rttt_VPR64[32,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0d40e000/mask=xbffff000 +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0dc0e000/mask=xbfe0f000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x0d40ec00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld3r {Vt.1D, Vt2.1D, Vt3.1D}, [Xn|SP] [, wback] + +:ld3r {vVt^".1D", vVtt^".1D", vVttt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:8 = 0; + tmpv = *:8 tmp_ldXn; + Rttt_VPR64[0,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rttt_VPR64[0,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rttt_VPR64[0,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0d40e000/mask=xbffff000 +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0dc0e000/mask=xbfe0f000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d40e000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld3r {Vt.16B, Vt2.16B, Vt3.16B}, [Xn|SP] [, wback] + +:ld3r {vVt^".16B", vVtt^".16B", vVttt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:1 = 0; + tmpv = *:1 tmp_ldXn; + Rttt_VPR128[0,8] = tmpv; + Rttt_VPR128[8,8] = tmpv; + Rttt_VPR128[16,8] = tmpv; + Rttt_VPR128[24,8] = tmpv; + Rttt_VPR128[32,8] = tmpv; + Rttt_VPR128[40,8] = tmpv; + Rttt_VPR128[48,8] = tmpv; + Rttt_VPR128[56,8] = tmpv; + Rttt_VPR128[64,8] = tmpv; + Rttt_VPR128[72,8] = tmpv; + Rttt_VPR128[80,8] = tmpv; + Rttt_VPR128[88,8] = tmpv; + Rttt_VPR128[96,8] = tmpv; + Rttt_VPR128[104,8] = tmpv; + Rttt_VPR128[112,8] = tmpv; + Rttt_VPR128[120,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rttt_VPR128[0,8] = tmpv; + Rttt_VPR128[8,8] = tmpv; + Rttt_VPR128[16,8] = tmpv; + Rttt_VPR128[24,8] = tmpv; + Rttt_VPR128[32,8] = tmpv; + Rttt_VPR128[40,8] = tmpv; + Rttt_VPR128[48,8] = tmpv; + Rttt_VPR128[56,8] = tmpv; + Rttt_VPR128[64,8] = tmpv; + Rttt_VPR128[72,8] = tmpv; + Rttt_VPR128[80,8] = tmpv; + Rttt_VPR128[88,8] = tmpv; + Rttt_VPR128[96,8] = tmpv; + Rttt_VPR128[104,8] = tmpv; + Rttt_VPR128[112,8] = tmpv; + Rttt_VPR128[120,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rttt_VPR128[0,8] = tmpv; + Rttt_VPR128[8,8] = tmpv; + Rttt_VPR128[16,8] = tmpv; + Rttt_VPR128[24,8] = tmpv; + Rttt_VPR128[32,8] = tmpv; + Rttt_VPR128[40,8] = tmpv; + Rttt_VPR128[48,8] = tmpv; + Rttt_VPR128[56,8] = tmpv; + Rttt_VPR128[64,8] = tmpv; + Rttt_VPR128[72,8] = tmpv; + Rttt_VPR128[80,8] = tmpv; + Rttt_VPR128[88,8] = tmpv; + Rttt_VPR128[96,8] = tmpv; + Rttt_VPR128[104,8] = tmpv; + Rttt_VPR128[112,8] = tmpv; + Rttt_VPR128[120,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0d40e000/mask=xbffff000 +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0dc0e000/mask=xbfe0f000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d40e400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld3r {Vt.8H, Vt2.8H, Vt3.8H}, [Xn|SP] [, wback] + +:ld3r {vVt^".8H", vVtt^".8H", vVttt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:2 = 0; + tmpv = *:2 tmp_ldXn; + Rttt_VPR128[0,16] = tmpv; + Rttt_VPR128[16,16] = tmpv; + Rttt_VPR128[32,16] = tmpv; + Rttt_VPR128[48,16] = tmpv; + Rttt_VPR128[64,16] = tmpv; + Rttt_VPR128[80,16] = tmpv; + Rttt_VPR128[96,16] = tmpv; + Rttt_VPR128[112,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rttt_VPR128[0,16] = tmpv; + Rttt_VPR128[16,16] = tmpv; + Rttt_VPR128[32,16] = tmpv; + Rttt_VPR128[48,16] = tmpv; + Rttt_VPR128[64,16] = tmpv; + Rttt_VPR128[80,16] = tmpv; + Rttt_VPR128[96,16] = tmpv; + Rttt_VPR128[112,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rttt_VPR128[0,16] = tmpv; + Rttt_VPR128[16,16] = tmpv; + Rttt_VPR128[32,16] = tmpv; + Rttt_VPR128[48,16] = tmpv; + Rttt_VPR128[64,16] = tmpv; + Rttt_VPR128[80,16] = tmpv; + Rttt_VPR128[96,16] = tmpv; + Rttt_VPR128[112,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0d40e000/mask=xbffff000 +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0dc0e000/mask=xbfe0f000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d40e800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld3r {Vt.4S, Vt2.4S, Vt3.4S}, [Xn|SP] [, wback] + +:ld3r {vVt^".4S", vVtt^".4S", vVttt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:4 = 0; + tmpv = *:4 tmp_ldXn; + Rttt_VPR128[0,32] = tmpv; + Rttt_VPR128[32,32] = tmpv; + Rttt_VPR128[64,32] = tmpv; + Rttt_VPR128[96,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rttt_VPR128[0,32] = tmpv; + Rttt_VPR128[32,32] = tmpv; + Rttt_VPR128[64,32] = tmpv; + Rttt_VPR128[96,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rttt_VPR128[0,32] = tmpv; + Rttt_VPR128[32,32] = tmpv; + Rttt_VPR128[64,32] = tmpv; + Rttt_VPR128[96,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0d40e000/mask=xbffff000 +# C7.2.170 LD3R page C7-1387 line 80704 MATCH x0dc0e000/mask=xbfe0f000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0d402000/mask=xbfff2000 +# C7.2.169 LD3 (single structure) page C7-1383 line 80459 MATCH x0dc02000/mask=xbfe02000 +# CONSTRUCT x4d40ec00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld3r {Vt.2D, Vt2.2D, Vt3.2D}, [Xn|SP] [, wback] + +:ld3r {vVt^".2D", vVtt^".2D", vVttt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:8 = 0; + tmpv = *:8 tmp_ldXn; + Rttt_VPR128[0,64] = tmpv; + Rttt_VPR128[64,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rttt_VPR128[0,64] = tmpv; + Rttt_VPR128[64,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rttt_VPR128[0,64] = tmpv; + Rttt_VPR128[64,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0c400000/mask=xbffff000 +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0cc00000/mask=xbfe0f000 +# CONSTRUCT x0c400000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.8B, Vt2.8B, Vt3.8B, Vt4.8B}, [Xn|SP] [, wback] + +:ld4 {vVt^".8B", vVtt^".8B", vVttt^".8B", vVtttt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR64[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0c400000/mask=xbffff000 +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0cc00000/mask=xbfe0f000 +# CONSTRUCT x0c400400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.4H, Vt2.4H, Vt3.4H, Vt4.4H}, [Xn|SP] [, wback] + +:ld4 {vVt^".4H", vVtt^".4H", vVttt^".4H", vVtttt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR64[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR64[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR64[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR64[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0c400000/mask=xbffff000 +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0cc00000/mask=xbfe0f000 +# CONSTRUCT x0c400800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.2S, Vt2.2S, Vt3.2S, Vt4.2S}, [Xn|SP] [, wback] + +:ld4 {vVt^".2S", vVtt^".2S", vVttt^".2S", vVtttt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR64[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR64[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0c400000/mask=xbffff000 +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0cc00000/mask=xbfe0f000 +# CONSTRUCT x4c400000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.16B, Vt2.16B, Vt3.16B, Vt4.16B}, [Xn|SP] [, wback] + +:ld4 {vVt^".16B", vVtt^".16B", vVttt^".16B", vVtttt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0c400000/mask=xbffff000 +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0cc00000/mask=xbfe0f000 +# CONSTRUCT x4c400400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.8H, Vt2.8H, Vt3.8H, Vt4.8H}, [Xn|SP] [, wback] + +:ld4 {vVt^".8H", vVtt^".8H", vVttt^".8H", vVtttt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0c400000/mask=xbffff000 +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0cc00000/mask=xbfe0f000 +# CONSTRUCT x4c400800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.4S, Vt2.4S, Vt3.4S, Vt4.4S}, [Xn|SP] [, wback] + +:ld4 {vVt^".4S", vVtt^".4S", vVttt^".4S", vVtttt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0c400000/mask=xbffff000 +# C7.2.171 LD4 (multiple structures) page C7-1390 line 80894 MATCH x0cc00000/mask=xbfe0f000 +# CONSTRUCT x4c400c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.2D, Vt2.2D, Vt3.2D, Vt4.2D}, [Xn|SP] [, wback] + +:ld4 {vVt^".2D", vVtt^".2D", vVttt^".2D", vVtttt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtttt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtttt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d602000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[0], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[0,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d602400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[1], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[8,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d602800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[2], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[16,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d602c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[3], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[24,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d603000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[4], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[32,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d603400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[5], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[40,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d603800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[6], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[48,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d603c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[7], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[56,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d602000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[8], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[8], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[64,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d602400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[9], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[9], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[72,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d602800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[10], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[10], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[80,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d602c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[11], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[11], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[88,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d603000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[12], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[12], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[96,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d603400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[13], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[13], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[104,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d603800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[14], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[14], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[112,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d603c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[15], [Xn|SP] [, wback] + +:ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[15], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + Rtttt_VPR128[120,8] = *:1 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d606000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[0], [Xn|SP] [, wback] + +:ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[0,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d606800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[1], [Xn|SP] [, wback] + +:ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[16,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d607000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[2], [Xn|SP] [, wback] + +:ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[32,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d607800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[3], [Xn|SP] [, wback] + +:ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[48,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d606000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[4], [Xn|SP] [, wback] + +:ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[64,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d606800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[5], [Xn|SP] [, wback] + +:ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[80,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d607000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[6], [Xn|SP] [, wback] + +:ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[96,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d607800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[7], [Xn|SP] [, wback] + +:ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + Rtttt_VPR128[112,16] = *:2 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d60a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[0], [Xn|SP] [, wback] + +:ld4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[0,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d60b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[1], [Xn|SP] [, wback] + +:ld4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[32,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d60a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[2], [Xn|SP] [, wback] + +:ld4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[64,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d60b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[3], [Xn|SP] [, wback] + +:ld4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + Rtttt_VPR128[96,32] = *:4 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d60a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.D, Vt2.D, Vt3.D, Vt4.D}[0], [Xn|SP] [, wback] + +:ld4 {vVt^".D", vVtt^".D", vVttt^".D", vVtttt^".D"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtttt_VPR128[0,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d60a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# ld4 {Vt.D, Vt2.D, Vt3.D, Vt4.D}[1], [Xn|SP] [, wback] + +:ld4 {vVt^".D", vVtt^".D", vVttt^".D", vVtttt^".D"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + Rt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + Rtttt_VPR128[64,64] = *:8 tmp_ldXn; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0d60e000/mask=xbffff000 +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0de0e000/mask=xbfe0f000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d60e000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld4r {Vt.8B, Vt2.8B, Vt3.8B, Vt4.8B}, [Xn|SP] [, wback] + +:ld4r {vVt^".8B", vVtt^".8B", vVttt^".8B", vVtttt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:1 = 0; + tmpv = *:1 tmp_ldXn; + Rtttt_VPR64[0,8] = tmpv; + Rtttt_VPR64[8,8] = tmpv; + Rtttt_VPR64[16,8] = tmpv; + Rtttt_VPR64[24,8] = tmpv; + Rtttt_VPR64[32,8] = tmpv; + Rtttt_VPR64[40,8] = tmpv; + Rtttt_VPR64[48,8] = tmpv; + Rtttt_VPR64[56,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rtttt_VPR64[0,8] = tmpv; + Rtttt_VPR64[8,8] = tmpv; + Rtttt_VPR64[16,8] = tmpv; + Rtttt_VPR64[24,8] = tmpv; + Rtttt_VPR64[32,8] = tmpv; + Rtttt_VPR64[40,8] = tmpv; + Rtttt_VPR64[48,8] = tmpv; + Rtttt_VPR64[56,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rtttt_VPR64[0,8] = tmpv; + Rtttt_VPR64[8,8] = tmpv; + Rtttt_VPR64[16,8] = tmpv; + Rtttt_VPR64[24,8] = tmpv; + Rtttt_VPR64[32,8] = tmpv; + Rtttt_VPR64[40,8] = tmpv; + Rtttt_VPR64[48,8] = tmpv; + Rtttt_VPR64[56,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rtttt_VPR64[0,8] = tmpv; + Rtttt_VPR64[8,8] = tmpv; + Rtttt_VPR64[16,8] = tmpv; + Rtttt_VPR64[24,8] = tmpv; + Rtttt_VPR64[32,8] = tmpv; + Rtttt_VPR64[40,8] = tmpv; + Rtttt_VPR64[48,8] = tmpv; + Rtttt_VPR64[56,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0d60e000/mask=xbffff000 +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0de0e000/mask=xbfe0f000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d60e400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld4r {Vt.4H, Vt2.4H, Vt3.4H, Vt4.4H}, [Xn|SP] [, wback] + +:ld4r {vVt^".4H", vVtt^".4H", vVttt^".4H", vVtttt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:2 = 0; + tmpv = *:2 tmp_ldXn; + Rtttt_VPR64[0,16] = tmpv; + Rtttt_VPR64[16,16] = tmpv; + Rtttt_VPR64[32,16] = tmpv; + Rtttt_VPR64[48,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rtttt_VPR64[0,16] = tmpv; + Rtttt_VPR64[16,16] = tmpv; + Rtttt_VPR64[32,16] = tmpv; + Rtttt_VPR64[48,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rtttt_VPR64[0,16] = tmpv; + Rtttt_VPR64[16,16] = tmpv; + Rtttt_VPR64[32,16] = tmpv; + Rtttt_VPR64[48,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rtttt_VPR64[0,16] = tmpv; + Rtttt_VPR64[16,16] = tmpv; + Rtttt_VPR64[32,16] = tmpv; + Rtttt_VPR64[48,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0d60e000/mask=xbffff000 +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0de0e000/mask=xbfe0f000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d60e800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld4r {Vt.2S, Vt2.2S, Vt3.2S, Vt4.2S}, [Xn|SP] [, wback] + +:ld4r {vVt^".2S", vVtt^".2S", vVttt^".2S", vVtttt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:4 = 0; + tmpv = *:4 tmp_ldXn; + Rtttt_VPR64[0,32] = tmpv; + Rtttt_VPR64[32,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rtttt_VPR64[0,32] = tmpv; + Rtttt_VPR64[32,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rtttt_VPR64[0,32] = tmpv; + Rtttt_VPR64[32,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rtttt_VPR64[0,32] = tmpv; + Rtttt_VPR64[32,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0d60e000/mask=xbffff000 +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0de0e000/mask=xbfe0f000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x0d60ec00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld4r {Vt.1D, Vt2.1D, Vt3.1D, Vt4.1D}, [Xn|SP] [, wback] + +:ld4r {vVt^".1D", vVtt^".1D", vVttt^".1D", vVtttt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:8 = 0; + tmpv = *:8 tmp_ldXn; + Rtttt_VPR64[0,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rtttt_VPR64[0,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rtttt_VPR64[0,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rtttt_VPR64[0,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0d60e000/mask=xbffff000 +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0de0e000/mask=xbfe0f000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d60e000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld4r {Vt.16B, Vt2.16B, Vt3.16B, Vt4.16B}, [Xn|SP] [, wback] + +:ld4r {vVt^".16B", vVtt^".16B", vVttt^".16B", vVtttt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:1 = 0; + tmpv = *:1 tmp_ldXn; + Rtttt_VPR128[0,8] = tmpv; + Rtttt_VPR128[8,8] = tmpv; + Rtttt_VPR128[16,8] = tmpv; + Rtttt_VPR128[24,8] = tmpv; + Rtttt_VPR128[32,8] = tmpv; + Rtttt_VPR128[40,8] = tmpv; + Rtttt_VPR128[48,8] = tmpv; + Rtttt_VPR128[56,8] = tmpv; + Rtttt_VPR128[64,8] = tmpv; + Rtttt_VPR128[72,8] = tmpv; + Rtttt_VPR128[80,8] = tmpv; + Rtttt_VPR128[88,8] = tmpv; + Rtttt_VPR128[96,8] = tmpv; + Rtttt_VPR128[104,8] = tmpv; + Rtttt_VPR128[112,8] = tmpv; + Rtttt_VPR128[120,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rtttt_VPR128[0,8] = tmpv; + Rtttt_VPR128[8,8] = tmpv; + Rtttt_VPR128[16,8] = tmpv; + Rtttt_VPR128[24,8] = tmpv; + Rtttt_VPR128[32,8] = tmpv; + Rtttt_VPR128[40,8] = tmpv; + Rtttt_VPR128[48,8] = tmpv; + Rtttt_VPR128[56,8] = tmpv; + Rtttt_VPR128[64,8] = tmpv; + Rtttt_VPR128[72,8] = tmpv; + Rtttt_VPR128[80,8] = tmpv; + Rtttt_VPR128[88,8] = tmpv; + Rtttt_VPR128[96,8] = tmpv; + Rtttt_VPR128[104,8] = tmpv; + Rtttt_VPR128[112,8] = tmpv; + Rtttt_VPR128[120,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rtttt_VPR128[0,8] = tmpv; + Rtttt_VPR128[8,8] = tmpv; + Rtttt_VPR128[16,8] = tmpv; + Rtttt_VPR128[24,8] = tmpv; + Rtttt_VPR128[32,8] = tmpv; + Rtttt_VPR128[40,8] = tmpv; + Rtttt_VPR128[48,8] = tmpv; + Rtttt_VPR128[56,8] = tmpv; + Rtttt_VPR128[64,8] = tmpv; + Rtttt_VPR128[72,8] = tmpv; + Rtttt_VPR128[80,8] = tmpv; + Rtttt_VPR128[88,8] = tmpv; + Rtttt_VPR128[96,8] = tmpv; + Rtttt_VPR128[104,8] = tmpv; + Rtttt_VPR128[112,8] = tmpv; + Rtttt_VPR128[120,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + tmpv = *:1 tmp_ldXn; + Rtttt_VPR128[0,8] = tmpv; + Rtttt_VPR128[8,8] = tmpv; + Rtttt_VPR128[16,8] = tmpv; + Rtttt_VPR128[24,8] = tmpv; + Rtttt_VPR128[32,8] = tmpv; + Rtttt_VPR128[40,8] = tmpv; + Rtttt_VPR128[48,8] = tmpv; + Rtttt_VPR128[56,8] = tmpv; + Rtttt_VPR128[64,8] = tmpv; + Rtttt_VPR128[72,8] = tmpv; + Rtttt_VPR128[80,8] = tmpv; + Rtttt_VPR128[88,8] = tmpv; + Rtttt_VPR128[96,8] = tmpv; + Rtttt_VPR128[104,8] = tmpv; + Rtttt_VPR128[112,8] = tmpv; + Rtttt_VPR128[120,8] = tmpv; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0d60e000/mask=xbffff000 +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0de0e000/mask=xbfe0f000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d60e400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld4r {Vt.8H, Vt2.8H, Vt3.8H, Vt4.8H}, [Xn|SP] [, wback] + +:ld4r {vVt^".8H", vVtt^".8H", vVttt^".8H", vVtttt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:2 = 0; + tmpv = *:2 tmp_ldXn; + Rtttt_VPR128[0,16] = tmpv; + Rtttt_VPR128[16,16] = tmpv; + Rtttt_VPR128[32,16] = tmpv; + Rtttt_VPR128[48,16] = tmpv; + Rtttt_VPR128[64,16] = tmpv; + Rtttt_VPR128[80,16] = tmpv; + Rtttt_VPR128[96,16] = tmpv; + Rtttt_VPR128[112,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rtttt_VPR128[0,16] = tmpv; + Rtttt_VPR128[16,16] = tmpv; + Rtttt_VPR128[32,16] = tmpv; + Rtttt_VPR128[48,16] = tmpv; + Rtttt_VPR128[64,16] = tmpv; + Rtttt_VPR128[80,16] = tmpv; + Rtttt_VPR128[96,16] = tmpv; + Rtttt_VPR128[112,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rtttt_VPR128[0,16] = tmpv; + Rtttt_VPR128[16,16] = tmpv; + Rtttt_VPR128[32,16] = tmpv; + Rtttt_VPR128[48,16] = tmpv; + Rtttt_VPR128[64,16] = tmpv; + Rtttt_VPR128[80,16] = tmpv; + Rtttt_VPR128[96,16] = tmpv; + Rtttt_VPR128[112,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + tmpv = *:2 tmp_ldXn; + Rtttt_VPR128[0,16] = tmpv; + Rtttt_VPR128[16,16] = tmpv; + Rtttt_VPR128[32,16] = tmpv; + Rtttt_VPR128[48,16] = tmpv; + Rtttt_VPR128[64,16] = tmpv; + Rtttt_VPR128[80,16] = tmpv; + Rtttt_VPR128[96,16] = tmpv; + Rtttt_VPR128[112,16] = tmpv; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0d60e000/mask=xbffff000 +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0de0e000/mask=xbfe0f000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d60e800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld4r {Vt.4S, Vt2.4S, Vt3.4S, Vt4.4S}, [Xn|SP] [, wback] + +:ld4r {vVt^".4S", vVtt^".4S", vVttt^".4S", vVtttt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:4 = 0; + tmpv = *:4 tmp_ldXn; + Rtttt_VPR128[0,32] = tmpv; + Rtttt_VPR128[32,32] = tmpv; + Rtttt_VPR128[64,32] = tmpv; + Rtttt_VPR128[96,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rtttt_VPR128[0,32] = tmpv; + Rtttt_VPR128[32,32] = tmpv; + Rtttt_VPR128[64,32] = tmpv; + Rtttt_VPR128[96,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rtttt_VPR128[0,32] = tmpv; + Rtttt_VPR128[32,32] = tmpv; + Rtttt_VPR128[64,32] = tmpv; + Rtttt_VPR128[96,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + tmpv = *:4 tmp_ldXn; + Rtttt_VPR128[0,32] = tmpv; + Rtttt_VPR128[32,32] = tmpv; + Rtttt_VPR128[64,32] = tmpv; + Rtttt_VPR128[96,32] = tmpv; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0d60e000/mask=xbffff000 +# C7.2.173 LD4R page C7-1397 line 81315 MATCH x0de0e000/mask=xbfe0f000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0d602000/mask=xbfff2000 +# C7.2.172 LD4 (single structure) page C7-1393 line 81068 MATCH x0de02000/mask=xbfe02000 +# CONSTRUCT x4d60ec00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES +# ld4r {Vt.2D, Vt2.2D, Vt3.2D, Vt4.2D}, [Xn|SP] [, wback] + +:ld4r {vVt^".2D", vVtt^".2D", vVttt^".2D", vVtttt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + local tmpv:8 = 0; + tmpv = *:8 tmp_ldXn; + Rtttt_VPR128[0,64] = tmpv; + Rtttt_VPR128[64,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rtttt_VPR128[0,64] = tmpv; + Rtttt_VPR128[64,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rtttt_VPR128[0,64] = tmpv; + Rtttt_VPR128[64,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + tmpv = *:8 tmp_ldXn; + Rtttt_VPR128[0,64] = tmpv; + Rtttt_VPR128[64,64] = tmpv; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c002000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.8B, Vt2.8B, Vt3.8B, Vt4.8B}, [Xn|SP] [, wback] + +:st1 {vVt^".8B", vVtt^".8B", vVttt^".8B", vVtttt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c002400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.4H, Vt2.4H, Vt3.4H, Vt4.4H}, [Xn|SP] [, wback] + +:st1 {vVt^".4H", vVtt^".4H", vVttt^".4H", vVtttt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c002800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.2S, Vt2.2S, Vt3.2S, Vt4.2S}, [Xn|SP] [, wback] + +:st1 {vVt^".2S", vVtt^".2S", vVttt^".2S", vVtttt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c002c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.1D, Vt2.1D, Vt3.1D, Vt4.1D}, [Xn|SP] [, wback] + +:st1 {vVt^".1D", vVtt^".1D", vVttt^".1D", vVtttt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR64[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR64[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR64[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtttt_VPR64[0,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c002000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.16B, Vt2.16B, Vt3.16B, Vt4.16B}, [Xn|SP] [, wback] + +:st1 {vVt^".16B", vVtt^".16B", vVttt^".16B", vVtttt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c002400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.8H, Vt2.8H, Vt3.8H, Vt4.8H}, [Xn|SP] [, wback] + +:st1 {vVt^".8H", vVtt^".8H", vVttt^".8H", vVtttt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c002800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.4S, Vt2.4S, Vt3.4S, Vt4.4S}, [Xn|SP] [, wback] + +:st1 {vVt^".4S", vVtt^".4S", vVttt^".4S", vVtttt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c002c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.2D, Vt2.2D, Vt3.2D, Vt4.2D}, [Xn|SP] [, wback] + +:st1 {vVt^".2D", vVtt^".2D", vVttt^".2D", vVtttt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtttt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtttt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c006000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.8B, Vt2.8B, Vt3.8B}, [Xn|SP] [, wback] + +:st1 {vVt^".8B", vVtt^".8B", vVttt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c006400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.4H, Vt2.4H, Vt3.4H}, [Xn|SP] [, wback] + +:st1 {vVt^".4H", vVtt^".4H", vVttt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c006800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.2S, Vt2.2S, Vt3.2S}, [Xn|SP] [, wback] + +:st1 {vVt^".2S", vVtt^".2S", vVttt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c006c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.1D, Vt2.1D, Vt3.1D}, [Xn|SP] [, wback] + +:st1 {vVt^".1D", vVtt^".1D", vVttt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR64[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR64[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR64[0,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c006000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.16B, Vt2.16B, Vt3.16B}, [Xn|SP] [, wback] + +:st1 {vVt^".16B", vVtt^".16B", vVttt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c006400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.8H, Vt2.8H, Vt3.8H}, [Xn|SP] [, wback] + +:st1 {vVt^".8H", vVtt^".8H", vVttt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c006800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.4S, Vt2.4S, Vt3.4S}, [Xn|SP] [, wback] + +:st1 {vVt^".4S", vVtt^".4S", vVttt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c006c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.2D, Vt2.2D, Vt3.2D}, [Xn|SP] [, wback] + +:st1 {vVt^".2D", vVtt^".2D", vVttt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c007000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.8B}, [Xn|SP] [, wback] + +:st1 {vVt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c007400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.4H}, [Xn|SP] [, wback] + +:st1 {vVt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c007800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.2S}, [Xn|SP] [, wback] + +:st1 {vVt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c007c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.1D}, [Xn|SP] [, wback] + +:st1 {vVt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR64[0,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c007000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.16B}, [Xn|SP] [, wback] + +:st1 {vVt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c007400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.8H}, [Xn|SP] [, wback] + +:st1 {vVt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c007800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.4S}, [Xn|SP] [, wback] + +:st1 {vVt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c007c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.2D}, [Xn|SP] [, wback] + +:st1 {vVt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c00a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.8B, Vt2.8B}, [Xn|SP] [, wback] + +:st1 {vVt^".8B", vVtt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c00a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.4H, Vt2.4H}, [Xn|SP] [, wback] + +:st1 {vVt^".4H", vVtt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c00a800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.2S, Vt2.2S}, [Xn|SP] [, wback] + +:st1 {vVt^".2S", vVtt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x0c00ac00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.1D, Vt2.1D}, [Xn|SP] [, wback] + +:st1 {vVt^".1D", vVtt^".1D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR64[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR64[0,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c00a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.16B, Vt2.16B}, [Xn|SP] [, wback] + +:st1 {vVt^".16B", vVtt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c00a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.8H, Vt2.8H}, [Xn|SP] [, wback] + +:st1 {vVt^".8H", vVtt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c00a800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.4S, Vt2.4S}, [Xn|SP] [, wback] + +:st1 {vVt^".4S", vVtt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c002000/mask=xbfff2000 +# C7.2.305 ST1 (multiple structures) page C7-1666 line 96374 MATCH x0c802000/mask=xbfe02000 +# CONSTRUCT x4c00ac00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.2D, Vt2.2D}, [Xn|SP] [, wback] + +:st1 {vVt^".2D", vVtt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d000000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[0], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d000400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[1], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d000800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[2], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d000c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[3], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d001000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[4], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d001400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[5], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d001800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[6], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d001c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[7], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d000000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[8], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[8], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d000400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[9], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[9], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d000800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[10], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[10], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d000c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[11], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[11], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d001000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[12], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[12], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d001400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[13], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[13], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d001800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[14], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[14], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d001c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.B}[15], [Xn|SP] [, wback] + +:st1 {vVt^".B"}[15], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d004000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.H}[0], [Xn|SP] [, wback] + +:st1 {vVt^".H"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d004800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.H}[1], [Xn|SP] [, wback] + +:st1 {vVt^".H"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d005000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.H}[2], [Xn|SP] [, wback] + +:st1 {vVt^".H"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d005800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.H}[3], [Xn|SP] [, wback] + +:st1 {vVt^".H"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d004000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.H}[4], [Xn|SP] [, wback] + +:st1 {vVt^".H"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d004800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.H}[5], [Xn|SP] [, wback] + +:st1 {vVt^".H"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d005000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.H}[6], [Xn|SP] [, wback] + +:st1 {vVt^".H"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d005800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.H}[7], [Xn|SP] [, wback] + +:st1 {vVt^".H"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d008000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.S}[0], [Xn|SP] [, wback] + +:st1 {vVt^".S"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d009000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.S}[1], [Xn|SP] [, wback] + +:st1 {vVt^".S"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d008000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.S}[2], [Xn|SP] [, wback] + +:st1 {vVt^".S"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d009000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.S}[3], [Xn|SP] [, wback] + +:st1 {vVt^".S"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x0d008400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.D}[0], [Xn|SP] [, wback] + +:st1 {vVt^".D"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d000000/mask=xbfff2000 +# C7.2.306 ST1 (single structure) page C7-1670 line 96624 MATCH x0d800000/mask=xbfe02000 +# CONSTRUCT x4d008400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st1 {Vt.D}[1], [Xn|SP] [, wback] + +:st1 {vVt^".D"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c008000/mask=xbffff000 +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c808000/mask=xbfe0f000 +# CONSTRUCT x0c008000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.8B, Vt2.8B}, [Xn|SP] [, wback] + +:st2 {vVt^".8B", vVtt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c008000/mask=xbffff000 +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c808000/mask=xbfe0f000 +# CONSTRUCT x0c008400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.4H, Vt2.4H}, [Xn|SP] [, wback] + +:st2 {vVt^".4H", vVtt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c008000/mask=xbffff000 +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c808000/mask=xbfe0f000 +# CONSTRUCT x0c008800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.2S, Vt2.2S}, [Xn|SP] [, wback] + +:st2 {vVt^".2S", vVtt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c008000/mask=xbffff000 +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c808000/mask=xbfe0f000 +# CONSTRUCT x4c008000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.16B, Vt2.16B}, [Xn|SP] [, wback] + +:st2 {vVt^".16B", vVtt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c008000/mask=xbffff000 +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c808000/mask=xbfe0f000 +# CONSTRUCT x4c008400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.8H, Vt2.8H}, [Xn|SP] [, wback] + +:st2 {vVt^".8H", vVtt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c008000/mask=xbffff000 +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c808000/mask=xbfe0f000 +# CONSTRUCT x4c008800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.4S, Vt2.4S}, [Xn|SP] [, wback] + +:st2 {vVt^".4S", vVtt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c008000/mask=xbffff000 +# C7.2.307 ST2 (multiple structures) page C7-1674 line 96864 MATCH x0c808000/mask=xbfe0f000 +# CONSTRUCT x4c008c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.2D, Vt2.2D}, [Xn|SP] [, wback] + +:st2 {vVt^".2D", vVtt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d200000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[0], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d200400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[1], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d200800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[2], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d200c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[3], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d201000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[4], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d201400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[5], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d201800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[6], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d201c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[7], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d200000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[8], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[8], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d200400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[9], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[9], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d200800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[10], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[10], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d200c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[11], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[11], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d201000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[12], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[12], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d201400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[13], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[13], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d201800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[14], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[14], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d201c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.B, Vt2.B}[15], [Xn|SP] [, wback] + +:st2 {vVt^".B", vVtt^".B"}[15], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d204000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.H, Vt2.H}[0], [Xn|SP] [, wback] + +:st2 {vVt^".H", vVtt^".H"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d204800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.H, Vt2.H}[1], [Xn|SP] [, wback] + +:st2 {vVt^".H", vVtt^".H"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d205000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.H, Vt2.H}[2], [Xn|SP] [, wback] + +:st2 {vVt^".H", vVtt^".H"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d205800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.H, Vt2.H}[3], [Xn|SP] [, wback] + +:st2 {vVt^".H", vVtt^".H"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d204000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.H, Vt2.H}[4], [Xn|SP] [, wback] + +:st2 {vVt^".H", vVtt^".H"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d204800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.H, Vt2.H}[5], [Xn|SP] [, wback] + +:st2 {vVt^".H", vVtt^".H"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d205000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.H, Vt2.H}[6], [Xn|SP] [, wback] + +:st2 {vVt^".H", vVtt^".H"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d205800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.H, Vt2.H}[7], [Xn|SP] [, wback] + +:st2 {vVt^".H", vVtt^".H"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d208000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.S, Vt2.S}[0], [Xn|SP] [, wback] + +:st2 {vVt^".S", vVtt^".S"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d209000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.S, Vt2.S}[1], [Xn|SP] [, wback] + +:st2 {vVt^".S", vVtt^".S"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d208000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.S, Vt2.S}[2], [Xn|SP] [, wback] + +:st2 {vVt^".S", vVtt^".S"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d209000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.S, Vt2.S}[3], [Xn|SP] [, wback] + +:st2 {vVt^".S", vVtt^".S"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x0d208400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.D, Vt2.D}[0], [Xn|SP] [, wback] + +:st2 {vVt^".D", vVtt^".D"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0d200000/mask=xbfff2000 +# C7.2.308 ST2 (single structure) page C7-1677 line 97032 MATCH x0da00000/mask=xbfe02000 +# CONSTRUCT x4d208400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st2 {Vt.D, Vt2.D}[1], [Xn|SP] [, wback] + +:st2 {vVt^".D", vVtt^".D"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c004000/mask=xbffff000 +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c804000/mask=xbfe0f000 +# CONSTRUCT x0c004000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.8B, Vt2.8B, Vt3.8B}, [Xn|SP] [, wback] + +:st3 {vVt^".8B", vVtt^".8B", vVttt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c004000/mask=xbffff000 +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c804000/mask=xbfe0f000 +# CONSTRUCT x0c004400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.4H, Vt2.4H, Vt3.4H}, [Xn|SP] [, wback] + +:st3 {vVt^".4H", vVtt^".4H", vVttt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c004000/mask=xbffff000 +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c804000/mask=xbfe0f000 +# CONSTRUCT x0c004800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.2S, Vt2.2S, Vt3.2S}, [Xn|SP] [, wback] + +:st3 {vVt^".2S", vVtt^".2S", vVttt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c004000/mask=xbffff000 +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c804000/mask=xbfe0f000 +# CONSTRUCT x4c004000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.16B, Vt2.16B, Vt3.16B}, [Xn|SP] [, wback] + +:st3 {vVt^".16B", vVtt^".16B", vVttt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c004000/mask=xbffff000 +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c804000/mask=xbfe0f000 +# CONSTRUCT x4c004400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.8H, Vt2.8H, Vt3.8H}, [Xn|SP] [, wback] + +:st3 {vVt^".8H", vVtt^".8H", vVttt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c004000/mask=xbffff000 +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c804000/mask=xbfe0f000 +# CONSTRUCT x4c004800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.4S, Vt2.4S, Vt3.4S}, [Xn|SP] [, wback] + +:st3 {vVt^".4S", vVtt^".4S", vVttt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c004000/mask=xbffff000 +# C7.2.309 ST3 (multiple structures) page C7-1681 line 97274 MATCH x0c804000/mask=xbfe0f000 +# CONSTRUCT x4c004c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.2D, Vt2.2D, Vt3.2D}, [Xn|SP] [, wback] + +:st3 {vVt^".2D", vVtt^".2D", vVttt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d002000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[0], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d002400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[1], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d002800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[2], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d002c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[3], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d003000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[4], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d003400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[5], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d003800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[6], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d003c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[7], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d002000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[8], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[8], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d002400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[9], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[9], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d002800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[10], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[10], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d002c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[11], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[11], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d003000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[12], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[12], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d003400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[13], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[13], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d003800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[14], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[14], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d003c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.B, Vt2.B, Vt3.B}[15], [Xn|SP] [, wback] + +:st3 {vVt^".B", vVtt^".B", vVttt^".B"}[15], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d006000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.H, Vt2.H, Vt3.H}[0], [Xn|SP] [, wback] + +:st3 {vVt^".H", vVtt^".H", vVttt^".H"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d006800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.H, Vt2.H, Vt3.H}[1], [Xn|SP] [, wback] + +:st3 {vVt^".H", vVtt^".H", vVttt^".H"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d007000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.H, Vt2.H, Vt3.H}[2], [Xn|SP] [, wback] + +:st3 {vVt^".H", vVtt^".H", vVttt^".H"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d007800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.H, Vt2.H, Vt3.H}[3], [Xn|SP] [, wback] + +:st3 {vVt^".H", vVtt^".H", vVttt^".H"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d006000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.H, Vt2.H, Vt3.H}[4], [Xn|SP] [, wback] + +:st3 {vVt^".H", vVtt^".H", vVttt^".H"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d006800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.H, Vt2.H, Vt3.H}[5], [Xn|SP] [, wback] + +:st3 {vVt^".H", vVtt^".H", vVttt^".H"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d007000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.H, Vt2.H, Vt3.H}[6], [Xn|SP] [, wback] + +:st3 {vVt^".H", vVtt^".H", vVttt^".H"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d007800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.H, Vt2.H, Vt3.H}[7], [Xn|SP] [, wback] + +:st3 {vVt^".H", vVtt^".H", vVttt^".H"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d00a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.S, Vt2.S, Vt3.S}[0], [Xn|SP] [, wback] + +:st3 {vVt^".S", vVtt^".S", vVttt^".S"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d00b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.S, Vt2.S, Vt3.S}[1], [Xn|SP] [, wback] + +:st3 {vVt^".S", vVtt^".S", vVttt^".S"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d00a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.S, Vt2.S, Vt3.S}[2], [Xn|SP] [, wback] + +:st3 {vVt^".S", vVtt^".S", vVttt^".S"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d00b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.S, Vt2.S, Vt3.S}[3], [Xn|SP] [, wback] + +:st3 {vVt^".S", vVtt^".S", vVttt^".S"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x0d00a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.D, Vt2.D, Vt3.D}[0], [Xn|SP] [, wback] + +:st3 {vVt^".D", vVtt^".D", vVttt^".D"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d002000/mask=xbfff2000 +# C7.2.310 ST3 (single structure) page C7-1684 line 97444 MATCH x0d802000/mask=xbfe02000 +# CONSTRUCT x4d00a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st3 {Vt.D, Vt2.D, Vt3.D}[1], [Xn|SP] [, wback] + +:st3 {vVt^".D", vVtt^".D", vVttt^".D"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c000000/mask=xbffff000 +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c800000/mask=xbfe0f000 +# CONSTRUCT x0c000000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.8B, Vt2.8B, Vt3.8B, Vt4.8B}, [Xn|SP] [, wback] + +:st4 {vVt^".8B", vVtt^".8B", vVttt^".8B", vVtttt^".8B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR64[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c000000/mask=xbffff000 +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c800000/mask=xbfe0f000 +# CONSTRUCT x0c000400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.4H, Vt2.4H, Vt3.4H, Vt4.4H}, [Xn|SP] [, wback] + +:st4 {vVt^".4H", vVtt^".4H", vVttt^".4H", vVtttt^".4H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR64[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR64[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR64[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR64[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c000000/mask=xbffff000 +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c800000/mask=xbfe0f000 +# CONSTRUCT x0c000800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.2S, Vt2.2S, Vt3.2S, Vt4.2S}, [Xn|SP] [, wback] + +:st4 {vVt^".2S", vVtt^".2S", vVttt^".2S", vVtttt^".2S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR64[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR64[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c000000/mask=xbffff000 +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c800000/mask=xbfe0f000 +# CONSTRUCT x4c000000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.16B, Vt2.16B, Vt3.16B, Vt4.16B}, [Xn|SP] [, wback] + +:st4 {vVt^".16B", vVtt^".16B", vVttt^".16B", vVtttt^".16B"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c000000/mask=xbffff000 +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c800000/mask=xbfe0f000 +# CONSTRUCT x4c000400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.8H, Vt2.8H, Vt3.8H, Vt4.8H}, [Xn|SP] [, wback] + +:st4 {vVt^".8H", vVtt^".8H", vVttt^".8H", vVtttt^".8H"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c000000/mask=xbffff000 +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c800000/mask=xbfe0f000 +# CONSTRUCT x4c000800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.4S, Vt2.4S, Vt3.4S, Vt4.4S}, [Xn|SP] [, wback] + +:st4 {vVt^".4S", vVtt^".4S", vVttt^".4S", vVtttt^".4S"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c000000/mask=xbffff000 +# C7.2.311 ST4 (multiple structures) page C7-1688 line 97688 MATCH x0c800000/mask=xbfe0f000 +# CONSTRUCT x4c000c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.2D, Vt2.2D, Vt3.2D, Vt4.2D}, [Xn|SP] [, wback] + +:st4 {vVt^".2D", vVtt^".2D", vVttt^".2D", vVtttt^".2D"}, [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtttt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtttt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d202000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[0], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[0,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d202400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[1], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[8,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d202800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[2], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[16,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d202c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[3], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[24,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d203000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[4], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[32,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d203400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[5], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[40,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d203800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[6], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[48,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d203c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[7], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[56,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d202000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[8], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[8], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[64,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d202400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[9], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[9], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[72,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d202800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[10], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[10], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[80,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d202c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[11], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[11], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[88,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d203000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[12], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[12], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[96,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d203400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[13], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[13], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[104,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d203800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[14], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[14], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[112,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d203c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[15], [Xn|SP] [, wback] + +:st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[15], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:1 tmp_ldXn = Rt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + *:1 tmp_ldXn = Rtttt_VPR128[120,8]; + tmp_ldXn = tmp_ldXn + 1; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d206000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[0], [Xn|SP] [, wback] + +:st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[0,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d206800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[1], [Xn|SP] [, wback] + +:st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[16,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d207000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[2], [Xn|SP] [, wback] + +:st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[32,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d207800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[3], [Xn|SP] [, wback] + +:st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[48,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d206000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[4], [Xn|SP] [, wback] + +:st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[4], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[64,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d206800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[5], [Xn|SP] [, wback] + +:st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[5], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[80,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d207000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[6], [Xn|SP] [, wback] + +:st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[6], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[96,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d207800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[7], [Xn|SP] [, wback] + +:st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[7], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:2 tmp_ldXn = Rt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + *:2 tmp_ldXn = Rtttt_VPR128[112,16]; + tmp_ldXn = tmp_ldXn + 2; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d20a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[0], [Xn|SP] [, wback] + +:st4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[0,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d20b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[1], [Xn|SP] [, wback] + +:st4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[32,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d20a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[2], [Xn|SP] [, wback] + +:st4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[2], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[64,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d20b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[3], [Xn|SP] [, wback] + +:st4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[3], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:4 tmp_ldXn = Rt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + *:4 tmp_ldXn = Rtttt_VPR128[96,32]; + tmp_ldXn = tmp_ldXn + 4; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x0d20a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.D, Vt2.D, Vt3.D, Vt4.D}[0], [Xn|SP] [, wback] + +:st4 {vVt^".D", vVtt^".D", vVttt^".D", vVtttt^".D"}[0], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtttt_VPR128[0,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0d202000/mask=xbfff2000 +# C7.2.312 ST4 (single structure) page C7-1691 line 97859 MATCH x0da02000/mask=xbfe02000 +# CONSTRUCT x4d20a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES +# st4 {Vt.D, Vt2.D, Vt3.D, Vt4.D}[1], [Xn|SP] [, wback] + +:st4 {vVt^".D", vVtt^".D", vVttt^".D", vVtttt^".D"}[1], [Rn_GPR64xsp]^ldst_wback +is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 +{ + tmp_ldXn = Rn_GPR64xsp; + *:8 tmp_ldXn = Rt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + *:8 tmp_ldXn = Rtttt_VPR128[64,64]; + tmp_ldXn = tmp_ldXn + 8; + # neglected zexts + build ldst_wback; +} + + diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64neon.sinc b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64neon.sinc new file mode 100644 index 00000000..c77d861f --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64neon.sinc @@ -0,0 +1,29911 @@ +# C7.2.1 ABS page C7-1009 line 58362 KEEPWITH +# +# The semantics in this file are auto-generated with armit.py script +# in the andre directory (capture output and replace file): +# +# python ../../../ProcessorTest/test/andre/scrape/armit.py --arch a64 --sort --refurb --smacro primitive --sinc languages/AARCH64neon.sinc +# +# The AUNIT tests are run using the command line options from the +# comment with the python script aunit.py in the cunit directory: +# +# (cd ../../../ProcessorTest/test/cunit; python aunit.py OPTIONS) +# +# (aunit.py may require a local copy of a current andre exhaust). + +# C7.2.1 ABS page C7-1399 line 77427 MATCH x5e20b800/mask=xff3ffc00 +# CONSTRUCT x5ee0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =abs +# SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1 +# AUNIT --inst x5ee0b800/mask=xfffffc00 --status pass +# ABS Scalar + +:abs Rd_FPR64, Rn_FPR64 +is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000101110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = MP_INT_ABS(Rn_FPR64); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 +# CONSTRUCT x0e20b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@1 +# AUNIT --inst x0e20b800/mask=xfffffc00 --status pass +# ABS Vector 8B when size = 00 , Q = 0 + +:abs Rd_VPR64.8B, Rn_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000101110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd +{ + # simd unary Rd_VPR64.8B = MP_INT_ABS(Rn_VPR64.8B) on lane size 1 + Rd_VPR64.8B[0,8] = MP_INT_ABS(Rn_VPR64.8B[0,8]); + Rd_VPR64.8B[8,8] = MP_INT_ABS(Rn_VPR64.8B[8,8]); + Rd_VPR64.8B[16,8] = MP_INT_ABS(Rn_VPR64.8B[16,8]); + Rd_VPR64.8B[24,8] = MP_INT_ABS(Rn_VPR64.8B[24,8]); + Rd_VPR64.8B[32,8] = MP_INT_ABS(Rn_VPR64.8B[32,8]); + Rd_VPR64.8B[40,8] = MP_INT_ABS(Rn_VPR64.8B[40,8]); + Rd_VPR64.8B[48,8] = MP_INT_ABS(Rn_VPR64.8B[48,8]); + Rd_VPR64.8B[56,8] = MP_INT_ABS(Rn_VPR64.8B[56,8]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 +# CONSTRUCT x4e20b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@1 +# AUNIT --inst x4e20b800/mask=xfffffc00 --status pass +# ABS Vector SIMD 16B when size = 00 , Q = 1 + +:abs Rd_VPR128.16B, Rn_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000101110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd +{ + # simd unary Rd_VPR128.16B = MP_INT_ABS(Rn_VPR128.16B) on lane size 1 + Rd_VPR128.16B[0,8] = MP_INT_ABS(Rn_VPR128.16B[0,8]); + Rd_VPR128.16B[8,8] = MP_INT_ABS(Rn_VPR128.16B[8,8]); + Rd_VPR128.16B[16,8] = MP_INT_ABS(Rn_VPR128.16B[16,8]); + Rd_VPR128.16B[24,8] = MP_INT_ABS(Rn_VPR128.16B[24,8]); + Rd_VPR128.16B[32,8] = MP_INT_ABS(Rn_VPR128.16B[32,8]); + Rd_VPR128.16B[40,8] = MP_INT_ABS(Rn_VPR128.16B[40,8]); + Rd_VPR128.16B[48,8] = MP_INT_ABS(Rn_VPR128.16B[48,8]); + Rd_VPR128.16B[56,8] = MP_INT_ABS(Rn_VPR128.16B[56,8]); + Rd_VPR128.16B[64,8] = MP_INT_ABS(Rn_VPR128.16B[64,8]); + Rd_VPR128.16B[72,8] = MP_INT_ABS(Rn_VPR128.16B[72,8]); + Rd_VPR128.16B[80,8] = MP_INT_ABS(Rn_VPR128.16B[80,8]); + Rd_VPR128.16B[88,8] = MP_INT_ABS(Rn_VPR128.16B[88,8]); + Rd_VPR128.16B[96,8] = MP_INT_ABS(Rn_VPR128.16B[96,8]); + Rd_VPR128.16B[104,8] = MP_INT_ABS(Rn_VPR128.16B[104,8]); + Rd_VPR128.16B[112,8] = MP_INT_ABS(Rn_VPR128.16B[112,8]); + Rd_VPR128.16B[120,8] = MP_INT_ABS(Rn_VPR128.16B[120,8]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 +# CONSTRUCT x0e60b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@2 +# AUNIT --inst x0e60b800/mask=xfffffc00 --status pass +# ABS Vector SIMD 4H when size = 01 , Q = 0 + +:abs Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000101110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + # simd unary Rd_VPR64.4H = MP_INT_ABS(Rn_VPR64.4H) on lane size 2 + Rd_VPR64.4H[0,16] = MP_INT_ABS(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = MP_INT_ABS(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = MP_INT_ABS(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = MP_INT_ABS(Rn_VPR64.4H[48,16]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 +# CONSTRUCT x4e60b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@2 +# AUNIT --inst x4e60b800/mask=xfffffc00 --status pass +# ABS Vector SIMD 8H when size = 01 , Q = 1 + +:abs Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000101110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + # simd unary Rd_VPR128.8H = MP_INT_ABS(Rn_VPR128.8H) on lane size 2 + Rd_VPR128.8H[0,16] = MP_INT_ABS(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(Rn_VPR128.8H[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 +# CONSTRUCT x0ea0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@4 +# AUNIT --inst x0ea0b800/mask=xfffffc00 --status pass +# ABS Vector SIMD 2S when size = 10 , Q = 0 + +:abs Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000101110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + # simd unary Rd_VPR64.2S = MP_INT_ABS(Rn_VPR64.2S) on lane size 4 + Rd_VPR64.2S[0,32] = MP_INT_ABS(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = MP_INT_ABS(Rn_VPR64.2S[32,32]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 +# CONSTRUCT x4ea0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@4 +# AUNIT --inst x4ea0b800/mask=xfffffc00 --status pass +# ABS Vector SIMD 4S when size = 10 , Q = 1 + +:abs Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000101110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + # simd unary Rd_VPR128.4S = MP_INT_ABS(Rn_VPR128.4S) on lane size 4 + Rd_VPR128.4S[0,32] = MP_INT_ABS(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(Rn_VPR128.4S[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 +# CONSTRUCT x4ee0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@8 +# SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@8 +# AUNIT --inst x4ee0b800/mask=xfffffc00 --status pass +# ABS Vector SIMD 2D when size = 11 , Q = 1 + +:abs Rd_VPR128.2D, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000101110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + # simd unary Rd_VPR128.2D = MP_INT_ABS(Rn_VPR128.2D) on lane size 8 + Rd_VPR128.2D[0,64] = MP_INT_ABS(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(Rn_VPR128.2D[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x5e208400/mask=xff20fc00 +# CONSTRUCT x5ee08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =+ +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2 +# AUNIT --inst x5ee08400/mask=xffe0fc00 --status pass + +:add Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x10 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = Rn_FPR64 + Rm_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 +# CONSTRUCT x4e208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(force-primitive) ARG1 ARG2 ARG3 =$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@1 +# AUNIT --inst x4e208400/mask=xffe0fc00 --status pass + +:add Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x10 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix Rd_VPR128.16B = Rn_VPR128.16B + Rm_VPR128.16B on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] + Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] + Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] + Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] + Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] + Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] + Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] + Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] + Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] + Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] + Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] + Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] + Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] + Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] + Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] + Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] + Rm_VPR128.16B[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 +# CONSTRUCT x4e608400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@2 +# AUNIT --inst x4e608400/mask=xffe0fc00 --status pass + +:add Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 +# CONSTRUCT x4ea08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(force-primitive) ARG1 ARG2 ARG3 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@4 +# AUNIT --inst x4ea08400/mask=xffe0fc00 --status pass + +:add Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix Rd_VPR128.4S = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 +# CONSTRUCT x4ee08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@8 +# AUNIT --inst x4ee08400/mask=xffe0fc00 --status pass + +:add Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd infix Rd_VPR128.2D = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 +# CONSTRUCT x0e208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@1 +# AUNIT --inst x0e208400/mask=xffe0fc00 --status pass + +:add Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x10 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix Rd_VPR64.8B = Rn_VPR64.8B + Rm_VPR64.8B on lane size 1 + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] + Rm_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] + Rm_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] + Rm_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] + Rm_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] + Rm_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] + Rm_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] + Rm_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] + Rm_VPR64.8B[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 +# CONSTRUCT x0e608400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@2 +# AUNIT --inst x0e608400/mask=xffe0fc00 --status pass + +:add Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x10 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rn_VPR64.4H + Rm_VPR64.4H on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] + Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] + Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] + Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] + Rm_VPR64.4H[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 +# CONSTRUCT x0ea08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(force-primitive) ARG1 ARG2 ARG3 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@4 +# AUNIT --inst x0ea08400/mask=xffe0fc00 --status pass + +:add Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x10 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix Rd_VPR64.2S = Rn_VPR64.2S + Rm_VPR64.2S on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] + Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] + Rm_VPR64.2S[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 +# CONSTRUCT x0ea04000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $+@8 &=$shuffle@1-0@3-1:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn/3@8 +# AUNIT --inst x0ea04000/mask=xffe0fc00 --status pass + +:addhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Rd_VPR128 & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; + # simd shuffle Rd_VPR64.2S = TMPQ1 (@1-0@3-1) lane size 4 + Rd_VPR64.2S[0,32] = TMPQ1[32,32]; + Rd_VPR64.2S[32,32] = TMPQ1[96,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 +# CONSTRUCT x0e604000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $+@4 &=$shuffle@1-0@3-1@5-2@7-3:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn/3@4 +# AUNIT --inst x0e604000/mask=xffe0fc00 --status pass + +:addhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Rd_VPR128 & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; + # simd shuffle Rd_VPR64.4H = TMPQ1 (@1-0@3-1@5-2@7-3) lane size 2 + Rd_VPR64.4H[0,16] = TMPQ1[16,16]; + Rd_VPR64.4H[16,16] = TMPQ1[48,16]; + Rd_VPR64.4H[32,16] = TMPQ1[80,16]; + Rd_VPR64.4H[48,16] = TMPQ1[112,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 +# CONSTRUCT x0e204000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $+@2 &=$shuffle@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn/3@2 +# AUNIT --inst x0e204000/mask=xffe0fc00 --status pass + +:addhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Rd_VPR128 & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; + # simd shuffle Rd_VPR64.8B = TMPQ1 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 1 + Rd_VPR64.8B[0,8] = TMPQ1[8,8]; + Rd_VPR64.8B[8,8] = TMPQ1[24,8]; + Rd_VPR64.8B[16,8] = TMPQ1[40,8]; + Rd_VPR64.8B[24,8] = TMPQ1[56,8]; + Rd_VPR64.8B[32,8] = TMPQ1[72,8]; + Rd_VPR64.8B[40,8] = TMPQ1[88,8]; + Rd_VPR64.8B[48,8] = TMPQ1[104,8]; + Rd_VPR64.8B[56,8] = TMPQ1[120,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 +# CONSTRUCT x4e204000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $+@2 &=$shuffle@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn2/3@2 +# AUNIT --inst x4e204000/mask=xffe0fc00 --status pass + +:addhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Rd_VPR128 & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; + # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 + Rd_VPR128.16B[64,8] = TMPQ1[8,8]; + Rd_VPR128.16B[72,8] = TMPQ1[24,8]; + Rd_VPR128.16B[80,8] = TMPQ1[40,8]; + Rd_VPR128.16B[88,8] = TMPQ1[56,8]; + Rd_VPR128.16B[96,8] = TMPQ1[72,8]; + Rd_VPR128.16B[104,8] = TMPQ1[88,8]; + Rd_VPR128.16B[112,8] = TMPQ1[104,8]; + Rd_VPR128.16B[120,8] = TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 +# CONSTRUCT x4ea04000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $+@8 &=$shuffle@1-2@3-3:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn2/3@8 +# AUNIT --inst x4ea04000/mask=xffe0fc00 --status pass + +:addhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Rd_VPR128 & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; + # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-2@3-3) lane size 4 + Rd_VPR128.4S[64,32] = TMPQ1[32,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 +# CONSTRUCT x4e604000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $+@4 &=$shuffle@1-4@3-5@5-6@7-7:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn2/3@4 +# AUNIT --inst x4e604000/mask=xffe0fc00 --status pass + +:addhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Rd_VPR128 & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; + # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-4@3-5@5-6@7-7) lane size 2 + Rd_VPR128.8H[64,16] = TMPQ1[16,16]; + Rd_VPR128.8H[80,16] = TMPQ1[48,16]; + Rd_VPR128.8H[96,16] = TMPQ1[80,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.4 ADDP (scalar) page C7-1405 line 77812 MATCH x5e31b800/mask=xff3ffc00 +# CONSTRUCT x5ef1b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =#+ +# SMACRO(pseudo) ARG1 ARG2 =NEON_addp/1@8 +# AUNIT --inst x5ef1b800/mask=xfffffc00 --status pass + +:addp Rd_FPR64, Rn_VPR128.2D +is b_3031=1 & u=0 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0x1b & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd +{ + # sipd infix Rd_FPR64 = +(Rn_VPR128.2D) on pairs lane size (8 to 8) + local tmp1 = Rn_VPR128.2D[0,64]; + local tmp2 = Rn_VPR128.2D[64,64]; + Rd_FPR64 = tmp1 + tmp2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 +# CONSTRUCT x4e20bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 ARG3 =#+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@1 +# AUNIT --inst x4e20bc00/mask=xffe0fc00 --status pass + +:addp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x17 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = +(Rn_VPR128.16B,Rm_VPR128.16B) on pairs lane size (1 to 1) + local tmp2 = Rn_VPR128.16B[0,8]; + local tmp3 = Rn_VPR128.16B[8,8]; + TMPQ1[0,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[16,8]; + tmp3 = Rn_VPR128.16B[24,8]; + TMPQ1[8,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[32,8]; + tmp3 = Rn_VPR128.16B[40,8]; + TMPQ1[16,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[48,8]; + tmp3 = Rn_VPR128.16B[56,8]; + TMPQ1[24,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[64,8]; + tmp3 = Rn_VPR128.16B[72,8]; + TMPQ1[32,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[80,8]; + tmp3 = Rn_VPR128.16B[88,8]; + TMPQ1[40,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[96,8]; + tmp3 = Rn_VPR128.16B[104,8]; + TMPQ1[48,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[112,8]; + tmp3 = Rn_VPR128.16B[120,8]; + TMPQ1[56,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[0,8]; + tmp3 = Rm_VPR128.16B[8,8]; + TMPQ1[64,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[16,8]; + tmp3 = Rm_VPR128.16B[24,8]; + TMPQ1[72,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[32,8]; + tmp3 = Rm_VPR128.16B[40,8]; + TMPQ1[80,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[48,8]; + tmp3 = Rm_VPR128.16B[56,8]; + TMPQ1[88,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[64,8]; + tmp3 = Rm_VPR128.16B[72,8]; + TMPQ1[96,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[80,8]; + tmp3 = Rm_VPR128.16B[88,8]; + TMPQ1[104,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[96,8]; + tmp3 = Rm_VPR128.16B[104,8]; + TMPQ1[112,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[112,8]; + tmp3 = Rm_VPR128.16B[120,8]; + TMPQ1[120,8] = tmp2 + tmp3; + Rd_VPR128.16B = TMPQ1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 +# CONSTRUCT x4ee0bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 ARG3 =#+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@8 +# AUNIT --inst x4ee0bc00/mask=xffe0fc00 --status pass + +:addp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x17 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = +(Rn_VPR128.2D,Rm_VPR128.2D) on pairs lane size (8 to 8) + local tmp2 = Rn_VPR128.2D[0,64]; + local tmp3 = Rn_VPR128.2D[64,64]; + TMPQ1[0,64] = tmp2 + tmp3; + tmp2 = Rm_VPR128.2D[0,64]; + tmp3 = Rm_VPR128.2D[64,64]; + TMPQ1[64,64] = tmp2 + tmp3; + Rd_VPR128.2D = TMPQ1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 +# CONSTRUCT x0ea0bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG2 ARG3 =#+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@4 +# AUNIT --inst x0ea0bc00/mask=xffe0fc00 --status pass + +:addp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x17 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = +(Rn_VPR64.2S,Rm_VPR64.2S) on pairs lane size (4 to 4) + local tmp2 = Rn_VPR64.2S[0,32]; + local tmp3 = Rn_VPR64.2S[32,32]; + TMPD1[0,32] = tmp2 + tmp3; + tmp2 = Rm_VPR64.2S[0,32]; + tmp3 = Rm_VPR64.2S[32,32]; + TMPD1[32,32] = tmp2 + tmp3; + Rd_VPR64.2S = TMPD1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 +# CONSTRUCT x0e60bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG2 ARG3 =#+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@2 +# AUNIT --inst x0e60bc00/mask=xffe0fc00 --status pass + +:addp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x17 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = +(Rn_VPR64.4H,Rm_VPR64.4H) on pairs lane size (2 to 2) + local tmp2 = Rn_VPR64.4H[0,16]; + local tmp3 = Rn_VPR64.4H[16,16]; + TMPD1[0,16] = tmp2 + tmp3; + tmp2 = Rn_VPR64.4H[32,16]; + tmp3 = Rn_VPR64.4H[48,16]; + TMPD1[16,16] = tmp2 + tmp3; + tmp2 = Rm_VPR64.4H[0,16]; + tmp3 = Rm_VPR64.4H[16,16]; + TMPD1[32,16] = tmp2 + tmp3; + tmp2 = Rm_VPR64.4H[32,16]; + tmp3 = Rm_VPR64.4H[48,16]; + TMPD1[48,16] = tmp2 + tmp3; + Rd_VPR64.4H = TMPD1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 +# CONSTRUCT x4ea0bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 ARG3 =#+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@4 +# AUNIT --inst x4ea0bc00/mask=xffe0fc00 --status pass + +:addp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x17 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = +(Rn_VPR128.4S,Rm_VPR128.4S) on pairs lane size (4 to 4) + local tmp2 = Rn_VPR128.4S[0,32]; + local tmp3 = Rn_VPR128.4S[32,32]; + TMPQ1[0,32] = tmp2 + tmp3; + tmp2 = Rn_VPR128.4S[64,32]; + tmp3 = Rn_VPR128.4S[96,32]; + TMPQ1[32,32] = tmp2 + tmp3; + tmp2 = Rm_VPR128.4S[0,32]; + tmp3 = Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = tmp2 + tmp3; + tmp2 = Rm_VPR128.4S[64,32]; + tmp3 = Rm_VPR128.4S[96,32]; + TMPQ1[96,32] = tmp2 + tmp3; + Rd_VPR128.4S = TMPQ1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 +# CONSTRUCT x0e20bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG2 ARG3 =#+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@1 +# AUNIT --inst x0e20bc00/mask=xffe0fc00 --status pass + +:addp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x17 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = +(Rn_VPR64.8B,Rm_VPR64.8B) on pairs lane size (1 to 1) + local tmp2 = Rn_VPR64.8B[0,8]; + local tmp3 = Rn_VPR64.8B[8,8]; + TMPD1[0,8] = tmp2 + tmp3; + tmp2 = Rn_VPR64.8B[16,8]; + tmp3 = Rn_VPR64.8B[24,8]; + TMPD1[8,8] = tmp2 + tmp3; + tmp2 = Rn_VPR64.8B[32,8]; + tmp3 = Rn_VPR64.8B[40,8]; + TMPD1[16,8] = tmp2 + tmp3; + tmp2 = Rn_VPR64.8B[48,8]; + tmp3 = Rn_VPR64.8B[56,8]; + TMPD1[24,8] = tmp2 + tmp3; + tmp2 = Rm_VPR64.8B[0,8]; + tmp3 = Rm_VPR64.8B[8,8]; + TMPD1[32,8] = tmp2 + tmp3; + tmp2 = Rm_VPR64.8B[16,8]; + tmp3 = Rm_VPR64.8B[24,8]; + TMPD1[40,8] = tmp2 + tmp3; + tmp2 = Rm_VPR64.8B[32,8]; + tmp3 = Rm_VPR64.8B[40,8]; + TMPD1[48,8] = tmp2 + tmp3; + tmp2 = Rm_VPR64.8B[48,8]; + tmp3 = Rm_VPR64.8B[56,8]; + TMPD1[56,8] = tmp2 + tmp3; + Rd_VPR64.8B = TMPD1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 +# CONSTRUCT x4e60bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 ARG3 =#+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@2 +# AUNIT --inst x4e60bc00/mask=xffe0fc00 --status pass + +:addp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x17 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = +(Rn_VPR128.8H,Rm_VPR128.8H) on pairs lane size (2 to 2) + local tmp2 = Rn_VPR128.8H[0,16]; + local tmp3 = Rn_VPR128.8H[16,16]; + TMPQ1[0,16] = tmp2 + tmp3; + tmp2 = Rn_VPR128.8H[32,16]; + tmp3 = Rn_VPR128.8H[48,16]; + TMPQ1[16,16] = tmp2 + tmp3; + tmp2 = Rn_VPR128.8H[64,16]; + tmp3 = Rn_VPR128.8H[80,16]; + TMPQ1[32,16] = tmp2 + tmp3; + tmp2 = Rn_VPR128.8H[96,16]; + tmp3 = Rn_VPR128.8H[112,16]; + TMPQ1[48,16] = tmp2 + tmp3; + tmp2 = Rm_VPR128.8H[0,16]; + tmp3 = Rm_VPR128.8H[16,16]; + TMPQ1[64,16] = tmp2 + tmp3; + tmp2 = Rm_VPR128.8H[32,16]; + tmp3 = Rm_VPR128.8H[48,16]; + TMPQ1[80,16] = tmp2 + tmp3; + tmp2 = Rm_VPR128.8H[64,16]; + tmp3 = Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = tmp2 + tmp3; + tmp2 = Rm_VPR128.8H[96,16]; + tmp3 = Rm_VPR128.8H[112,16]; + TMPQ1[112,16] = tmp2 + tmp3; + Rd_VPR128.8H = TMPQ1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.6 ADDV page C7-1409 line 77996 MATCH x0e31b800/mask=xbf3ffc00 +# CONSTRUCT x4e31b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_addv/1@1 +# AUNIT --inst x4e31b800/mask=xfffffc00 --status nopcodeop + +:addv Rd_FPR8, Rn_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_addv(Rn_VPR128.16B, 1:1); +} + +# C7.2.6 ADDV page C7-1409 line 77996 MATCH x0e31b800/mask=xbf3ffc00 +# CONSTRUCT x0e31b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_addv/1@1 +# AUNIT --inst x0e31b800/mask=xfffffc00 --status nopcodeop + +:addv Rd_FPR8, Rn_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_addv(Rn_VPR64.8B, 1:1); +} + +# C7.2.6 ADDV page C7-1409 line 77996 MATCH x0e31b800/mask=xbf3ffc00 +# CONSTRUCT x0e71b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_addv/1@2 +# AUNIT --inst x0e71b800/mask=xfffffc00 --status nopcodeop + +:addv Rd_FPR16, Rn_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_addv(Rn_VPR64.4H, 2:1); +} + +# C7.2.6 ADDV page C7-1409 line 77996 MATCH x0e31b800/mask=xbf3ffc00 +# CONSTRUCT x4e71b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_addv/1@2 +# AUNIT --inst x4e71b800/mask=xfffffc00 --status nopcodeop + +:addv Rd_FPR16, Rn_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_addv(Rn_VPR128.8H, 2:1); +} + +# C7.2.6 ADDV page C7-1409 line 77996 MATCH x0e31b800/mask=xbf3ffc00 +# CONSTRUCT x4eb1b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(force-primitive) ARG1 ARG2[0]:4 ARG2[1]:4 + ARG2[2]:4 ARG2[3]:4 + =+ +# SMACRO(pseudo) ARG1 ARG2 =NEON_addv/1@4 +# AUNIT --inst x4eb1b800/mask=xfffffc00 --status pass + +:addv Rd_FPR32, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd +{ + local tmp1:4 = Rn_VPR128.4S[0,32]; + local tmp2:4 = Rn_VPR128.4S[32,32]; + local tmp3:4 = tmp1 + tmp2; + local tmp4:4 = Rn_VPR128.4S[64,32]; + local tmp5:4 = Rn_VPR128.4S[96,32]; + local tmp6:4 = tmp4 + tmp5; + Rd_FPR32 = tmp3 + tmp6; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.7 AESD page C7-1411 line 78085 MATCH x4e285800/mask=xfffffc00 +# CONSTRUCT x4e285800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_aesd/2 +# AUNIT --inst x4e285800/mask=xfffffc00 --status noqemu + +:aesd Rd_VPR128.16B, Rn_VPR128.16B +is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_aesd(Rd_VPR128.16B, Rn_VPR128.16B); +} + +# C7.2.8 AESE page C7-1412 line 78145 MATCH x4e284800/mask=xfffffc00 +# CONSTRUCT x4e284800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_aese/2 +# AUNIT --inst x4e284800/mask=xfffffc00 --status noqemu + +:aese Rd_VPR128.16B, Rn_VPR128.16B +is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=4 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_aese(Rd_VPR128.16B, Rn_VPR128.16B); +} + +# C7.2.9 AESIMC page C7-1413 line 78206 MATCH x4e287800/mask=xfffffc00 +# CONSTRUCT x4e287800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_aesimc/2 +# AUNIT --inst x4e287800/mask=xfffffc00 --status noqemu + +:aesimc Rd_VPR128.16B, Rn_VPR128.16B +is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=7 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Rd_VPR128 & Zd +{ + Rd_VPR128.16B = NEON_aesimc(Rd_VPR128.16B, Rn_VPR128.16B); +} + +# C7.2.10 AESMC page C7-1414 line 78264 MATCH x4e286800/mask=xfffffc00 +# CONSTRUCT x4e286800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_aesmc/2 +# AUNIT --inst x4e286800/mask=xfffffc00 --status noqemu + +:aesmc Rd_VPR128.16B, Rn_VPR128.16B +is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=6 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Rd_VPR128 & Zd +{ + Rd_VPR128.16B = NEON_aesmc(Rd_VPR128.16B, Rn_VPR128.16B); +} + +# C7.2.11 AND (vector) page C7-1415 line 78322 MATCH x0e201c00/mask=xbfe0fc00 +# CONSTRUCT x4e201c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$&@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_and/2@1 +# AUNIT --inst x4e201c00/mask=xffe0fc00 --status pass + +:and Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix Rd_VPR128.16B = Rn_VPR128.16B & Rm_VPR128.16B on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] & Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] & Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] & Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] & Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] & Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] & Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] & Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] & Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] & Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] & Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] & Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] & Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] & Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] & Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] & Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] & Rm_VPR128.16B[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.11 AND (vector) page C7-1415 line 78322 MATCH x0e201c00/mask=xbfe0fc00 +# CONSTRUCT x0e201c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =& +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_and/2@1 +# AUNIT --inst x0e201c00/mask=xffe0fc00 --status pass + +:and Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = Rn_VPR64.8B & Rm_VPR64.8B; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.12 BCAX page C7-1416 line 78391 MATCH xce200000/mask=xffe08000 +# CONSTRUCT xce200000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 ARG4 $~@1 $&@1 =$|@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG3 =NEON_bcax/3@1 +# AUNIT --inst xce200000/mask=xffe08000 --status noqemu + +:bcax Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, Ra_VPR128.16B +is b_2131=0b11001110001 & b_15=0 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B & Ra_VPR128.16B & Zd +{ + # simd unary TMPQ1 = ~(Ra_VPR128.16B) on lane size 1 + TMPQ1[0,8] = ~(Ra_VPR128.16B[0,8]); + TMPQ1[8,8] = ~(Ra_VPR128.16B[8,8]); + TMPQ1[16,8] = ~(Ra_VPR128.16B[16,8]); + TMPQ1[24,8] = ~(Ra_VPR128.16B[24,8]); + TMPQ1[32,8] = ~(Ra_VPR128.16B[32,8]); + TMPQ1[40,8] = ~(Ra_VPR128.16B[40,8]); + TMPQ1[48,8] = ~(Ra_VPR128.16B[48,8]); + TMPQ1[56,8] = ~(Ra_VPR128.16B[56,8]); + TMPQ1[64,8] = ~(Ra_VPR128.16B[64,8]); + TMPQ1[72,8] = ~(Ra_VPR128.16B[72,8]); + TMPQ1[80,8] = ~(Ra_VPR128.16B[80,8]); + TMPQ1[88,8] = ~(Ra_VPR128.16B[88,8]); + TMPQ1[96,8] = ~(Ra_VPR128.16B[96,8]); + TMPQ1[104,8] = ~(Ra_VPR128.16B[104,8]); + TMPQ1[112,8] = ~(Ra_VPR128.16B[112,8]); + TMPQ1[120,8] = ~(Ra_VPR128.16B[120,8]); + # simd infix TMPQ2 = Rm_VPR128.16B & TMPQ1 on lane size 1 + TMPQ2[0,8] = Rm_VPR128.16B[0,8] & TMPQ1[0,8]; + TMPQ2[8,8] = Rm_VPR128.16B[8,8] & TMPQ1[8,8]; + TMPQ2[16,8] = Rm_VPR128.16B[16,8] & TMPQ1[16,8]; + TMPQ2[24,8] = Rm_VPR128.16B[24,8] & TMPQ1[24,8]; + TMPQ2[32,8] = Rm_VPR128.16B[32,8] & TMPQ1[32,8]; + TMPQ2[40,8] = Rm_VPR128.16B[40,8] & TMPQ1[40,8]; + TMPQ2[48,8] = Rm_VPR128.16B[48,8] & TMPQ1[48,8]; + TMPQ2[56,8] = Rm_VPR128.16B[56,8] & TMPQ1[56,8]; + TMPQ2[64,8] = Rm_VPR128.16B[64,8] & TMPQ1[64,8]; + TMPQ2[72,8] = Rm_VPR128.16B[72,8] & TMPQ1[72,8]; + TMPQ2[80,8] = Rm_VPR128.16B[80,8] & TMPQ1[80,8]; + TMPQ2[88,8] = Rm_VPR128.16B[88,8] & TMPQ1[88,8]; + TMPQ2[96,8] = Rm_VPR128.16B[96,8] & TMPQ1[96,8]; + TMPQ2[104,8] = Rm_VPR128.16B[104,8] & TMPQ1[104,8]; + TMPQ2[112,8] = Rm_VPR128.16B[112,8] & TMPQ1[112,8]; + TMPQ2[120,8] = Rm_VPR128.16B[120,8] & TMPQ1[120,8]; + # simd infix Rd_VPR128.16B = Rn_VPR128.16B | TMPQ2 on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | TMPQ2[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | TMPQ2[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | TMPQ2[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | TMPQ2[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | TMPQ2[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | TMPQ2[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | TMPQ2[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | TMPQ2[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | TMPQ2[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | TMPQ2[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | TMPQ2[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | TMPQ2[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | TMPQ2[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | TMPQ2[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | TMPQ2[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | TMPQ2[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 +# C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 +# C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 +# CONSTRUCT x2f001400/mask=xfff89c00 MATCHED 7 DOCUMENTED OPCODES +# SMACRO ARG1 Imm_neon_uimm8Shift:4 ~ &=$& +# SMACRO(pseudo) ARG1 Imm_neon_uimm8Shift:4 &=NEON_bic/2@4 +# AUNIT --inst x2f001400/mask=xfff89c00 --status pass + +:bic Rd_VPR64.2S, abcdefgh +is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & abcdefgh & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR64.2S & Zd +{ + local tmp1:4 = ~ Imm_neon_uimm8Shift:4; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S & tmp1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] & tmp1; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] & tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 +# CONSTRUCT x2f009400/mask=xfff8dc00 MATCHED 4 DOCUMENTED OPCODES +# SMACRO ARG1 Imm_neon_uimm8Shift:2 ~ &=$& +# SMACRO(pseudo) ARG1 Imm_neon_uimm8Shift:2 &=NEON_bic/2@2 +# AUNIT --inst x2f009400/mask=xfff8dc00 --status pass + +:bic Rd_VPR64.4H, abcdefgh +is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & abcdefgh & b_1923=0x0 & b_1415=2 & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR64.4H & Zd +{ + local tmp1:2 = ~ Imm_neon_uimm8Shift:2; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H & tmp1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] & tmp1; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] & tmp1; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] & tmp1; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] & tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 +# C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 +# C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 +# CONSTRUCT x6f001400/mask=xfff89c00 MATCHED 7 DOCUMENTED OPCODES +# SMACRO ARG1 Imm_neon_uimm8Shift:4 ~ &=$& +# SMACRO(pseudo) ARG1 Imm_neon_uimm8Shift:4 &=NEON_bic/2@4 +# AUNIT --inst x6f001400/mask=xfff89c00 --status pass + +:bic Rd_VPR128.4S, abcdefgh +is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & abcdefgh & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR128.4S & Zd +{ + local tmp1:4 = ~ Imm_neon_uimm8Shift:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S & tmp1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] & tmp1; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] & tmp1; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] & tmp1; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] & tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 +# CONSTRUCT x6f009400/mask=xfff8dc00 MATCHED 4 DOCUMENTED OPCODES +# SMACRO ARG1 Imm_neon_uimm8Shift:2 ~ &=$& +# SMACRO(pseudo) ARG1 Imm_neon_uimm8Shift:2 &=NEON_bic/2@2 +# AUNIT --inst x6f009400/mask=xfff8dc00 --status pass + +:bic Rd_VPR128.8H, abcdefgh +is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & abcdefgh & b_1415=2 & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR128.8H & Zd +{ + local tmp1:2 = ~ Imm_neon_uimm8Shift:2; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H & tmp1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] & tmp1; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] & tmp1; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] & tmp1; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] & tmp1; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] & tmp1; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] & tmp1; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] & tmp1; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] & tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.21 BIC (vector, register) page C7-1430 line 79136 MATCH x0e601c00/mask=xbfe0fc00 +# CONSTRUCT x4e601c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $~@1 =$&@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_bic/2@1 +# AUNIT --inst x4e601c00/mask=xffe0fc00 --status pass + +:bic Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd unary TMPQ1 = ~(Rm_VPR128.16B) on lane size 1 + TMPQ1[0,8] = ~(Rm_VPR128.16B[0,8]); + TMPQ1[8,8] = ~(Rm_VPR128.16B[8,8]); + TMPQ1[16,8] = ~(Rm_VPR128.16B[16,8]); + TMPQ1[24,8] = ~(Rm_VPR128.16B[24,8]); + TMPQ1[32,8] = ~(Rm_VPR128.16B[32,8]); + TMPQ1[40,8] = ~(Rm_VPR128.16B[40,8]); + TMPQ1[48,8] = ~(Rm_VPR128.16B[48,8]); + TMPQ1[56,8] = ~(Rm_VPR128.16B[56,8]); + TMPQ1[64,8] = ~(Rm_VPR128.16B[64,8]); + TMPQ1[72,8] = ~(Rm_VPR128.16B[72,8]); + TMPQ1[80,8] = ~(Rm_VPR128.16B[80,8]); + TMPQ1[88,8] = ~(Rm_VPR128.16B[88,8]); + TMPQ1[96,8] = ~(Rm_VPR128.16B[96,8]); + TMPQ1[104,8] = ~(Rm_VPR128.16B[104,8]); + TMPQ1[112,8] = ~(Rm_VPR128.16B[112,8]); + TMPQ1[120,8] = ~(Rm_VPR128.16B[120,8]); + # simd infix Rd_VPR128.16B = Rn_VPR128.16B & TMPQ1 on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] & TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] & TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] & TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] & TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] & TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] & TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] & TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] & TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] & TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] & TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] & TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] & TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] & TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] & TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] & TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] & TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.21 BIC (vector, register) page C7-1430 line 79136 MATCH x0e601c00/mask=xbfe0fc00 +# CONSTRUCT x0e601c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $~@1 =$&@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_bic/2@1 +# AUNIT --inst x0e601c00/mask=xffe0fc00 --status pass + +:bic Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd unary TMPD1 = ~(Rm_VPR64.8B) on lane size 1 + TMPD1[0,8] = ~(Rm_VPR64.8B[0,8]); + TMPD1[8,8] = ~(Rm_VPR64.8B[8,8]); + TMPD1[16,8] = ~(Rm_VPR64.8B[16,8]); + TMPD1[24,8] = ~(Rm_VPR64.8B[24,8]); + TMPD1[32,8] = ~(Rm_VPR64.8B[32,8]); + TMPD1[40,8] = ~(Rm_VPR64.8B[40,8]); + TMPD1[48,8] = ~(Rm_VPR64.8B[48,8]); + TMPD1[56,8] = ~(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR64.8B = Rn_VPR64.8B & TMPD1 on lane size 1 + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] & TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] & TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] & TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] & TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] & TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] & TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] & TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] & TMPD1[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.22 BIF page C7-1432 line 79219 MATCH x2ee01c00/mask=xbfe0fc00 +# CONSTRUCT x6ee01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bif/3@1 +# AUNIT --inst x6ee01c00/mask=xffe0fc00 --status nopcodeop + +:bif Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_bif(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.22 BIF page C7-1432 line 79219 MATCH x2ee01c00/mask=xbfe0fc00 +# CONSTRUCT x2ee01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bif/3@1 +# AUNIT --inst x2ee01c00/mask=xffe0fc00 --status nopcodeop + +:bif Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_bif(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.23 BIT page C7-1434 line 79302 MATCH x2ea01c00/mask=xbfe0fc00 +# CONSTRUCT x6ea01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bit/3@1 +# AUNIT --inst x6ea01c00/mask=xffe0fc00 --status nopcodeop + +:bit Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_bit(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.23 BIT page C7-1434 line 79302 MATCH x2ea01c00/mask=xbfe0fc00 +# CONSTRUCT x2ea01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bit/3@1 +# AUNIT --inst x2ea01c00/mask=xffe0fc00 --status nopcodeop + +:bit Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_bit(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.24 BSL page C7-1436 line 79384 MATCH x2e601c00/mask=xbfe0fc00 +# CONSTRUCT x6e601c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bsl/3@1 +# AUNIT --inst x6e601c00/mask=xffe0fc00 --status nopcodeop + +:bsl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_bsl(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.24 BSL page C7-1436 line 79384 MATCH x2e601c00/mask=xbfe0fc00 +# CONSTRUCT x2e601c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bsl/3@1 +# AUNIT --inst x2e601c00/mask=xffe0fc00 --status nopcodeop + +:bsl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_bsl(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 +# CONSTRUCT x0e204800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@1 +# AUNIT --inst x0e204800/mask=xfffffc00 --status nopcodeop +# CLS (vector) SIMD 8B when size = 00 , Q = 0 + +:cls Rd_VPR64.8B, Rn_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR64.8B & Rn_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cls(Rn_VPR64.8B, 1:1); +} + +# C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 +# CONSTRUCT x4e204800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@1 +# AUNIT --inst x4e204800/mask=xfffffc00 --status nopcodeop +# CLS (vector) SIMD 16B when size = 00 , Q = 1 + +:cls Rd_VPR128.16B, Rn_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR128.16B & Rn_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cls(Rn_VPR128.16B, 1:1); +} + +# C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 +# CONSTRUCT x0e604800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@2 +# AUNIT --inst x0e604800/mask=xfffffc00 --status nopcodeop +# CLS (vector) SIMD 4H when size = 01 , Q = 0 + +:cls Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cls(Rn_VPR64.4H, 2:1); +} + +# C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 +# CONSTRUCT x4e604800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@2 +# AUNIT --inst x4e604800/mask=xfffffc00 --status nopcodeop +# CLS (vector) SIMD 8H when size = 01 , Q = 1 + +:cls Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cls(Rn_VPR128.8H, 2:1); +} + +# C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 +# CONSTRUCT x0ea04800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@4 +# AUNIT --inst x0ea04800/mask=xfffffc00 --status nopcodeop +# CLS (vector) SIMD 2S when size = 10 , Q = 0 + +:cls Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cls(Rn_VPR64.2S, 4:1); +} + +# C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 +# CONSTRUCT x4ea04800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@4 +# AUNIT --inst x4ea04800/mask=xfffffc00 --status nopcodeop +# CLS (vector) SIMD 4S when size = 10 , Q = 1 + +:cls Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cls(Rn_VPR128.4S, 4:1); +} + +# C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 +# CONSTRUCT x2e204800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@1 +# AUNIT --inst x2e204800/mask=xfffffc00 --status nopcodeop +# CLZ (vector) SIMD 8B when size = 00 , Q = 0 + +:clz Rd_VPR64.8B, Rn_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR64.8B & Rn_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_clz(Rn_VPR64.8B, 1:1); +} + +# C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 +# CONSTRUCT x6e204800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@1 +# AUNIT --inst x6e204800/mask=xfffffc00 --status nopcodeop +# CLZ (vector) SIMD 16B when size = 00 , Q = 1 + +:clz Rd_VPR128.16B, Rn_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR128.16B & Rn_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_clz(Rn_VPR128.16B, 1:1); +} + +# C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 +# CONSTRUCT x2e604800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@2 +# AUNIT --inst x2e604800/mask=xfffffc00 --status nopcodeop +# CLZ (vector) SIMD 4H when size = 01 , Q = 0 + +:clz Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_clz(Rn_VPR64.4H, 2:1); +} + +# C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 +# CONSTRUCT x6e604800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@2 +# AUNIT --inst x6e604800/mask=xfffffc00 --status nopcodeop +# CLZ (vector) SIMD 8H when size = 01 , Q = 1 + +:clz Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_clz(Rn_VPR128.8H, 2:1); +} + +# C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 +# CONSTRUCT x2ea04800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@4 +# AUNIT --inst x2ea04800/mask=xfffffc00 --status nopcodeop +# CLZ (vector) SIMD 2S when size = 10 , Q = 0 + +:clz Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_clz(Rn_VPR64.2S, 4:1); +} + +# C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 +# CONSTRUCT x6ea04800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@4 +# AUNIT --inst x6ea04800/mask=xfffffc00 --status nopcodeop +# CLZ (vector) SIMD 4S when size = 10 , Q = 1 + +:clz Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_clz(Rn_VPR128.4S, 4:1); +} + +# C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x7e208c00/mask=xff20fc00 +# CONSTRUCT x7ee08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 dup dup dup ARG2 ARG3 equal:1 zext:8 0:8 ~ =* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2 +# AUNIT --inst x7ee08c00/mask=xffe0fc00 --status pass +# CMEQ (register) Scalar + +:cmeq Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + local tmp1:1 = Rn_FPR64 == Rm_FPR64; + local tmp2:8 = zext(tmp1); + local tmp3:8 = ~ 0:8; + Rd_FPR64 = tmp2 * tmp3; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 +# CONSTRUCT x2e208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@1 +# AUNIT --inst x2e208c00/mask=xffe0fc00 --status nopcodeop +# CMEQ (register) SIMD 8B when size = 00 , Q = 0 + +:cmeq Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_21=1 & b_1015=0b100011 & Rd_VPR64.8B & Rn_VPR64.8B & Rm_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmeq(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 +# CONSTRUCT x6e208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@1 +# AUNIT --inst x6e208c00/mask=xffe0fc00 --status nopcodeop +# CMEQ (register) SIMD 16B when size = 00 , Q = 1 + +:cmeq Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_21=1 & b_1015=0b100011 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmeq(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 +# CONSTRUCT x2e608c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@2 +# AUNIT --inst x2e608c00/mask=xffe0fc00 --status nopcodeop +# CMEQ (register) SIMD 4H when size = 01 , Q = 0 + +:cmeq Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=1 & b_1015=0b100011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmeq(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 +# CONSTRUCT x6e608c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@2 +# AUNIT --inst x6e608c00/mask=xffe0fc00 --status nopcodeop +# CMEQ (register) SIMD 8H when size = 01 , Q = 1 + +:cmeq Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=1 & b_1015=0b100011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmeq(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 +# CONSTRUCT x2ea08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@4 +# AUNIT --inst x2ea08c00/mask=xffe0fc00 --status nopcodeop +# CMEQ (register) SIMD 2S when size = 10 , Q = 0 + +:cmeq Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=1 & b_1015=0b100011 & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmeq(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 +# CONSTRUCT x6ea08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@4 +# AUNIT --inst x6ea08c00/mask=xffe0fc00 --status nopcodeop +# CMEQ (register) SIMD 4S when size = 10 , Q = 1 + +:cmeq Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=1 & b_1015=0b100011 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmeq(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 +# CONSTRUCT x6ee08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@8 +# AUNIT --inst x6ee08c00/mask=xffe0fc00 --status nopcodeop +# CMEQ (register) SIMD 2D when size = 11 , Q = 1 + +:cmeq Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmeq(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 +# CONSTRUCT x4e209800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmeq/2@1 +# AUNIT --inst x4e209800/mask=xfffffc00 --status nopcodeop + +:cmeq Rd_VPR128.16B, Rn_VPR128.16B, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmeq(Rn_VPR128.16B, 0:1, 1:1); +} + +# C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 +# CONSTRUCT x4ee09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmeq/2@8 +# AUNIT --inst x4ee09800/mask=xfffffc00 --status nopcodeop + +:cmeq Rd_VPR128.2D, Rn_VPR128.2D, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmeq(Rn_VPR128.2D, 0:8, 8:1); +} + +# C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 +# CONSTRUCT x0ea09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmeq/2@4 +# AUNIT --inst x0ea09800/mask=xfffffc00 --status nopcodeop + +:cmeq Rd_VPR64.2S, Rn_VPR64.2S, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmeq(Rn_VPR64.2S, 0:4, 4:1); +} + +# C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 +# CONSTRUCT x0e609800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmeq/2@2 +# AUNIT --inst x0e609800/mask=xfffffc00 --status nopcodeop + +:cmeq Rd_VPR64.4H, Rn_VPR64.4H, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmeq(Rn_VPR64.4H, 0:2, 2:1); +} + +# C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 +# CONSTRUCT x4ea09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmeq/2@2 +# AUNIT --inst x4ea09800/mask=xfffffc00 --status nopcodeop + +:cmeq Rd_VPR128.4S, Rn_VPR128.4S, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmeq(Rn_VPR128.4S, 0:4, 2:1); +} + +# C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 +# CONSTRUCT x0e209800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmeq/2@1 +# AUNIT --inst x0e209800/mask=xfffffc00 --status nopcodeop + +:cmeq Rd_VPR64.8B, Rn_VPR64.8B, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmeq(Rn_VPR64.8B, 0:1, 1:1); +} + +# C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 +# CONSTRUCT x4e609800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmeq/2@2 +# AUNIT --inst x4e609800/mask=xfffffc00 --status nopcodeop + +:cmeq Rd_VPR128.8H, Rn_VPR128.8H, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmeq(Rn_VPR128.8H, 0:2, 2:1); +} + +# C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x5e209800/mask=xff3ffc00 +# CONSTRUCT x5ee09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmeq/2 +# AUNIT --inst x5ee09800/mask=xfffffc00 --status nopcodeop + +:cmeq Rd_FPR64, Rn_FPR64, "#0" +is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000100110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_cmeq(Rn_FPR64, 0:4); +} + +# C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 +# CONSTRUCT x4e203c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@1 +# AUNIT --inst x4e203c00/mask=xffe0fc00 --status nopcodeop + +:cmge Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x7 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmge(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 +# CONSTRUCT x4ee03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@8 +# AUNIT --inst x4ee03c00/mask=xffe0fc00 --status nopcodeop + +:cmge Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x7 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmge(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 +# CONSTRUCT x0ea03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@4 +# AUNIT --inst x0ea03c00/mask=xffe0fc00 --status nopcodeop + +:cmge Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x7 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmge(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 +# CONSTRUCT x0e603c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@2 +# AUNIT --inst x0e603c00/mask=xffe0fc00 --status nopcodeop + +:cmge Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x7 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmge(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 +# CONSTRUCT x4ea03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@4 +# AUNIT --inst x4ea03c00/mask=xffe0fc00 --status nopcodeop + +:cmge Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x7 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmge(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 +# CONSTRUCT x0e203c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@1 +# AUNIT --inst x0e203c00/mask=xffe0fc00 --status nopcodeop + +:cmge Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x7 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmge(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 +# CONSTRUCT x4e603c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@2 +# AUNIT --inst x4e603c00/mask=xffe0fc00 --status nopcodeop + +:cmge Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x7 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmge(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x5e203c00/mask=xff20fc00 +# CONSTRUCT x5ee03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2 +# AUNIT --inst x5ee03c00/mask=xffe0fc00 --status nopcodeop + +:cmge Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b001111 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_cmge(Rn_FPR64, Rm_FPR64); +} + +# C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 +# CONSTRUCT x6e208800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmge/2@1 +# AUNIT --inst x6e208800/mask=xfffffc00 --status nopcodeop + +:cmge Rd_VPR128.16B, Rn_VPR128.16B, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmge(Rn_VPR128.16B, 0:1, 1:1); +} + +# C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 +# CONSTRUCT x6ee08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmge/2@8 +# AUNIT --inst x6ee08800/mask=xfffffc00 --status nopcodeop + +:cmge Rd_VPR128.2D, Rn_VPR128.2D, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmge(Rn_VPR128.2D, 0:8, 8:1); +} + +# C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 +# CONSTRUCT x2ea08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmge/2@4 +# AUNIT --inst x2ea08800/mask=xfffffc00 --status nopcodeop + +:cmge Rd_VPR64.2S, Rn_VPR64.2S, "#0" +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmge(Rn_VPR64.2S, 0:4, 4:1); +} + +# C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 +# CONSTRUCT x2e608800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmge/2@2 +# AUNIT --inst x2e608800/mask=xfffffc00 --status nopcodeop + +:cmge Rd_VPR64.4H, Rn_VPR64.4H, "#0" +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmge(Rn_VPR64.4H, 0:2, 2:1); +} + +# C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 +# CONSTRUCT x6ea08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmge/2@4 +# AUNIT --inst x6ea08800/mask=xfffffc00 --status nopcodeop + +:cmge Rd_VPR128.4S, Rn_VPR128.4S, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmge(Rn_VPR128.4S, 0:4, 4:1); +} + +# C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 +# CONSTRUCT x2e208800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmge/2@1 +# AUNIT --inst x2e208800/mask=xfffffc00 --status nopcodeop + +:cmge Rd_VPR64.8B, Rn_VPR64.8B, "#0" +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmge(Rn_VPR64.8B, 0:1, 1:1); +} + +# C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 +# CONSTRUCT x6e608800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmge/2@2 +# AUNIT --inst x6e608800/mask=xfffffc00 --status nopcodeop + +:cmge Rd_VPR128.8H, Rn_VPR128.8H, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmge(Rn_VPR128.8H, 0:2, 2:1); +} + +# C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x7e208800/mask=xff3ffc00 +# CONSTRUCT x7ee08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmge/2 +# AUNIT --inst x7ee08800/mask=xfffffc00 --status nopcodeop + +:cmge Rd_FPR64, Rn_FPR64, "#0" +is b_2431=0b01111110 & b_2223=0b11 & b_1021=0b100000100010 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_cmge(Rn_FPR64, 0:4); +} + +# C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 +# CONSTRUCT x4e203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@1 +# AUNIT --inst x4e203400/mask=xffe0fc00 --status nopcodeop + +:cmgt Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmgt(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 +# CONSTRUCT x4ee03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@8 +# AUNIT --inst x4ee03400/mask=xffe0fc00 --status nopcodeop + +:cmgt Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmgt(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 +# CONSTRUCT x0ea03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@4 +# AUNIT --inst x0ea03400/mask=xffe0fc00 --status nopcodeop + +:cmgt Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmgt(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 +# CONSTRUCT x0e603400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@2 +# AUNIT --inst x0e603400/mask=xffe0fc00 --status nopcodeop + +:cmgt Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmgt(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 +# CONSTRUCT x4ea03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@4 +# AUNIT --inst x4ea03400/mask=xffe0fc00 --status nopcodeop + +:cmgt Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmgt(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 +# CONSTRUCT x0e203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@1 +# AUNIT --inst x0e203400/mask=xffe0fc00 --status nopcodeop + +:cmgt Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmgt(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 +# CONSTRUCT x4e603400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@2 +# AUNIT --inst x4e603400/mask=xffe0fc00 --status nopcodeop + +:cmgt Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmgt(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x5e203400/mask=xff20fc00 +# CONSTRUCT x5ee03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2 +# AUNIT --inst x5ee03400/mask=xffe0fc00 --status nopcodeop + +:cmgt Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_cmgt(Rn_FPR64, Rm_FPR64); +} + +# C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 +# CONSTRUCT x4e208800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmgt/2@1 +# AUNIT --inst x4e208800/mask=xfffffc00 --status nopcodeop + +:cmgt Rd_VPR128.16B, Rn_VPR128.16B, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmgt(Rn_VPR128.16B, 0:1, 1:1); +} + +# C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 +# CONSTRUCT x4ee08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmgt/2@8 +# AUNIT --inst x4ee08800/mask=xfffffc00 --status nopcodeop + +:cmgt Rd_VPR128.2D, Rn_VPR128.2D, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmgt(Rn_VPR128.2D, 0:8, 8:1); +} + +# C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 +# CONSTRUCT x0ea08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmgt/2@4 +# AUNIT --inst x0ea08800/mask=xfffffc00 --status nopcodeop + +:cmgt Rd_VPR64.2S, Rn_VPR64.2S, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmgt(Rn_VPR64.2S, 0:4, 4:1); +} + +# C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 +# CONSTRUCT x0e608800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmgt/2@2 +# AUNIT --inst x0e608800/mask=xfffffc00 --status nopcodeop + +:cmgt Rd_VPR64.4H, Rn_VPR64.4H, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmgt(Rn_VPR64.4H, 0:2, 2:1); +} + +# C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 +# CONSTRUCT x4ea08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmgt/2@4 +# AUNIT --inst x4ea08800/mask=xfffffc00 --status nopcodeop + +:cmgt Rd_VPR128.4S, Rn_VPR128.4S, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmgt(Rn_VPR128.4S, 0:4, 4:1); +} + +# C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 +# CONSTRUCT x0e208800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmgt/2@1 +# AUNIT --inst x0e208800/mask=xfffffc00 --status nopcodeop + +:cmgt Rd_VPR64.8B, Rn_VPR64.8B, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmgt(Rn_VPR64.8B, 0:1, 1:1); +} + +# C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 +# CONSTRUCT x4e608800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmgt/2@2 +# AUNIT --inst x4e608800/mask=xfffffc00 --status nopcodeop + +:cmgt Rd_VPR128.8H, Rn_VPR128.8H, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmgt(Rn_VPR128.8H, 0:2, 2:1); +} + +# C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x5e208800/mask=xff3ffc00 +# CONSTRUCT x5ee08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmgt/2 +# AUNIT --inst x5ee08800/mask=xfffffc00 --status nopcodeop + +:cmgt Rd_FPR64, Rn_FPR64, "#0" +is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000100010 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_cmgt(Rn_FPR64, 0:8); +} + +# C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 +# CONSTRUCT x6e203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@1 +# AUNIT --inst x6e203400/mask=xffe0fc00 --status nopcodeop + +:cmhi Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmhi(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 +# CONSTRUCT x6ee03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@8 +# AUNIT --inst x6ee03400/mask=xffe0fc00 --status nopcodeop + +:cmhi Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmhi(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 +# CONSTRUCT x2ea03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@4 +# AUNIT --inst x2ea03400/mask=xffe0fc00 --status nopcodeop + +:cmhi Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmhi(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 +# CONSTRUCT x2e603400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@2 +# AUNIT --inst x2e603400/mask=xffe0fc00 --status nopcodeop + +:cmhi Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmhi(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 +# CONSTRUCT x6ea03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@4 +# AUNIT --inst x6ea03400/mask=xffe0fc00 --status nopcodeop + +:cmhi Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmhi(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 +# CONSTRUCT x2e203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@1 +# AUNIT --inst x2e203400/mask=xffe0fc00 --status nopcodeop + +:cmhi Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmhi(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 +# CONSTRUCT x6e603400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@2 +# AUNIT --inst x6e603400/mask=xffe0fc00 --status nopcodeop + +:cmhi Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmhi(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x7e203400/mask=xff20fc00 +# CONSTRUCT x7ee03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2 +# AUNIT --inst x7ee03400/mask=xffe0fc00 --status nopcodeop + +:cmhi Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_cmhi(Rn_FPR64, Rm_FPR64); +} + +# C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 +# CONSTRUCT x6e203c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@1 +# AUNIT --inst x6e203c00/mask=xffe0fc00 --status nopcodeop + +:cmhs Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x7 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmhs(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 +# CONSTRUCT x6ee03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@8 +# AUNIT --inst x6ee03c00/mask=xffe0fc00 --status nopcodeop + +:cmhs Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x7 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmhs(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 +# CONSTRUCT x2ea03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@4 +# AUNIT --inst x2ea03c00/mask=xffe0fc00 --status nopcodeop + +:cmhs Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x7 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmhs(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 +# CONSTRUCT x2e603c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@2 +# AUNIT --inst x2e603c00/mask=xffe0fc00 --status nopcodeop + +:cmhs Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x7 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmhs(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 +# CONSTRUCT x6ea03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@4 +# AUNIT --inst x6ea03c00/mask=xffe0fc00 --status nopcodeop + +:cmhs Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x7 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmhs(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 +# CONSTRUCT x2e203c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@1 +# AUNIT --inst x2e203c00/mask=xffe0fc00 --status nopcodeop + +:cmhs Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x7 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmhs(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 +# CONSTRUCT x6e603c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@2 +# AUNIT --inst x6e603c00/mask=xffe0fc00 --status nopcodeop + +:cmhs Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x7 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmhs(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x7e203c00/mask=xff20fc00 +# CONSTRUCT x7ee03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2 +# AUNIT --inst x7ee03c00/mask=xffe0fc00 --status nopcodeop + +:cmhs Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b001111 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_cmhs(Rn_FPR64, Rm_FPR64); +} + +# C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 +# CONSTRUCT x6e209800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmle/2@1 +# AUNIT --inst x6e209800/mask=xfffffc00 --status nopcodeop + +:cmle Rd_VPR128.16B, Rn_VPR128.16B, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmle(Rn_VPR128.16B, 0:1, 1:1); +} + +# C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 +# CONSTRUCT x6ee09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmle/2@8 +# AUNIT --inst x6ee09800/mask=xfffffc00 --status nopcodeop + +:cmle Rd_VPR128.2D, Rn_VPR128.2D, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmle(Rn_VPR128.2D, 0:8, 8:1); +} + +# C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 +# CONSTRUCT x2ea09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmle/2@4 +# AUNIT --inst x2ea09800/mask=xfffffc00 --status nopcodeop + +:cmle Rd_VPR64.2S, Rn_VPR64.2S, "#0" +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmle(Rn_VPR64.2S, 0:4, 4:1); +} + +# C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 +# CONSTRUCT x2e609800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmle/2@2 +# AUNIT --inst x2e609800/mask=xfffffc00 --status nopcodeop + +:cmle Rd_VPR64.4H, Rn_VPR64.4H, "#0" +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmle(Rn_VPR64.4H, 0:2, 2:1); +} + +# C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 +# CONSTRUCT x6ea09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmle/2@4 +# AUNIT --inst x6ea09800/mask=xfffffc00 --status nopcodeop + +:cmle Rd_VPR128.4S, Rn_VPR128.4S, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmle(Rn_VPR128.4S, 0:4, 4:1); +} + +# C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 +# CONSTRUCT x2e209800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmle/2@1 +# AUNIT --inst x2e209800/mask=xfffffc00 --status nopcodeop + +:cmle Rd_VPR64.8B, Rn_VPR64.8B, "#0" +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmle(Rn_VPR64.8B, 0:1, 1:1); +} + +# C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 +# CONSTRUCT x6e609800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmle/2@2 +# AUNIT --inst x6e609800/mask=xfffffc00 --status nopcodeop + +:cmle Rd_VPR128.8H, Rn_VPR128.8H, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmle(Rn_VPR128.8H, 0:2, 2:1); +} + +# C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x7e209800/mask=xff3ffc00 +# CONSTRUCT x7ee09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmle/2 +# AUNIT --inst x7ee09800/mask=xfffffc00 --status nopcodeop + +:cmle Rd_FPR64, Rn_FPR64, "#0" +is b_2431=0b01111110 & b_2223=0b11 & b_1021=0b100000100110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_cmle(Rn_FPR64, 0:8); +} + +# C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 +# CONSTRUCT x4e20a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmlt/2@1 +# AUNIT --inst x4e20a800/mask=xfffffc00 --status nopcodeop + +:cmlt Rd_VPR128.16B, Rn_VPR128.16B, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmlt(Rn_VPR128.16B, 0:1, 1:1); +} + +# C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 +# CONSTRUCT x4ee0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmlt/2@8 +# AUNIT --inst x4ee0a800/mask=xfffffc00 --status nopcodeop + +:cmlt Rd_VPR128.2D, Rn_VPR128.2D, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmlt(Rn_VPR128.2D, 0:8, 8:1); +} + +# C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 +# CONSTRUCT x0ea0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmlt/2@4 +# AUNIT --inst x0ea0a800/mask=xfffffc00 --status nopcodeop + +:cmlt Rd_VPR64.2S, Rn_VPR64.2S, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmlt(Rn_VPR64.2S, 0:4, 4:1); +} + +# C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 +# CONSTRUCT x0e60a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmlt/2@2 +# AUNIT --inst x0e60a800/mask=xfffffc00 --status nopcodeop + +:cmlt Rd_VPR64.4H, Rn_VPR64.4H, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmlt(Rn_VPR64.4H, 0:2, 2:1); +} + +# C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 +# CONSTRUCT x4ea0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmlt/2@4 +# AUNIT --inst x4ea0a800/mask=xfffffc00 --status nopcodeop + +:cmlt Rd_VPR128.4S, Rn_VPR128.4S, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmlt(Rn_VPR128.4S, 0:4, 4:1); +} + +# C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 +# CONSTRUCT x0e20a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmlt/2@1 +# AUNIT --inst x0e20a800/mask=xfffffc00 --status nopcodeop + +:cmlt Rd_VPR64.8B, Rn_VPR64.8B, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmlt(Rn_VPR64.8B, 0:1, 1:1); +} + +# C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 +# CONSTRUCT x4e60a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmlt/2@2 +# AUNIT --inst x4e60a800/mask=xfffffc00 --status nopcodeop + +:cmlt Rd_VPR128.8H, Rn_VPR128.8H, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmlt(Rn_VPR128.8H, 0:2, 2:1); +} + +# C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x5e20a800/mask=xff3ffc00 +# CONSTRUCT x5ee0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmlt/2 +# AUNIT --inst x5ee0a800/mask=xfffffc00 --status nopcodeop + +:cmlt Rd_FPR64, Rn_FPR64, "#0" +is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000101010 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_cmlt(Rn_FPR64, 0:8); +} + +# C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 +# CONSTRUCT x4e208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@1 +# AUNIT --inst x4e208c00/mask=xffe0fc00 --status nopcodeop + +:cmtst Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x11 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cmtst(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 +# CONSTRUCT x4ee08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@8 +# AUNIT --inst x4ee08c00/mask=xffe0fc00 --status nopcodeop + +:cmtst Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_cmtst(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 +# CONSTRUCT x0ea08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@4 +# AUNIT --inst x0ea08c00/mask=xffe0fc00 --status nopcodeop + +:cmtst Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x11 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_cmtst(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 +# CONSTRUCT x0e608c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@2 +# AUNIT --inst x0e608c00/mask=xffe0fc00 --status nopcodeop + +:cmtst Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x11 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_cmtst(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 +# CONSTRUCT x4ea08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@4 +# AUNIT --inst x4ea08c00/mask=xffe0fc00 --status nopcodeop + +:cmtst Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_cmtst(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 +# CONSTRUCT x0e208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@1 +# AUNIT --inst x0e208c00/mask=xffe0fc00 --status nopcodeop + +:cmtst Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x11 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cmtst(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 +# CONSTRUCT x4e608c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@2 +# AUNIT --inst x4e608c00/mask=xffe0fc00 --status nopcodeop + +:cmtst Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_cmtst(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.37 CMTST page C7-1466 line 81106 MATCH x5e208c00/mask=xff20fc00 +# CONSTRUCT x5ee08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2 +# AUNIT --inst x5ee08c00/mask=xffe0fc00 --status nopcodeop + +:cmtst Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_cmtst(Rn_FPR64, Rm_FPR64); +} + +# C7.2.38 CNT page C7-1468 line 81245 MATCH x0e205800/mask=xbf3ffc00 +# CONSTRUCT x4e205800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_cnt/1@1 +# AUNIT --inst x4e205800/mask=xfffffc00 --status nopcodeop + +:cnt Rd_VPR128.16B, Rn_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_cnt(Rn_VPR128.16B, 1:1); +} + +# C7.2.38 CNT page C7-1468 line 81245 MATCH x0e205800/mask=xbf3ffc00 +# CONSTRUCT x0e205800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_cnt/1@1 +# AUNIT --inst x0e205800/mask=xfffffc00 --status nopcodeop + +:cnt Rd_VPR64.8B, Rn_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x5 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_cnt(Rn_VPR64.8B, 1:1); +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 +# CONSTRUCT x4e010400/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@1 +# AUNIT --inst x4e010400/mask=xffe1fc00 --status pass + +:dup Rd_VPR128.16B, Rn_VPR128.B.imm_neon_uimm4 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.16B & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 + local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; + # simd duplicate Rd_VPR128.16B = all elements tmp1 (lane size 1) + Rd_VPR128.16B[0,8] = tmp1; + Rd_VPR128.16B[8,8] = tmp1; + Rd_VPR128.16B[16,8] = tmp1; + Rd_VPR128.16B[24,8] = tmp1; + Rd_VPR128.16B[32,8] = tmp1; + Rd_VPR128.16B[40,8] = tmp1; + Rd_VPR128.16B[48,8] = tmp1; + Rd_VPR128.16B[56,8] = tmp1; + Rd_VPR128.16B[64,8] = tmp1; + Rd_VPR128.16B[72,8] = tmp1; + Rd_VPR128.16B[80,8] = tmp1; + Rd_VPR128.16B[88,8] = tmp1; + Rd_VPR128.16B[96,8] = tmp1; + Rd_VPR128.16B[104,8] = tmp1; + Rd_VPR128.16B[112,8] = tmp1; + Rd_VPR128.16B[120,8] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 +# CONSTRUCT x4e080400/mask=xffeffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@8 +# AUNIT --inst x4e080400/mask=xffeffc00 --status pass + +:dup Rd_VPR128.2D, Rn_VPR128.D.imm_neon_uimm1 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.2D & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm1] lane size 8 + local tmp1:8 = Rn_VPR128.D.imm_neon_uimm1; + # simd duplicate Rd_VPR128.2D = all elements tmp1 (lane size 8) + Rd_VPR128.2D[0,64] = tmp1; + Rd_VPR128.2D[64,64] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 +# CONSTRUCT x0e040400/mask=xffe7fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@4 +# AUNIT --inst x0e040400/mask=xffe7fc00 --status pass + +:dup Rd_VPR64.2S, Rn_VPR128.S.imm_neon_uimm2 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.2S & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 + local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; + # simd duplicate Rd_VPR64.2S = all elements tmp1 (lane size 4) + Rd_VPR64.2S[0,32] = tmp1; + Rd_VPR64.2S[32,32] = tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 +# CONSTRUCT x0e020400/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@2 +# AUNIT --inst x0e020400/mask=xffe3fc00 --status pass + +:dup Rd_VPR64.4H, Rn_VPR128.H.imm_neon_uimm3 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.4H & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 + local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; + # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) + Rd_VPR64.4H[0,16] = tmp1; + Rd_VPR64.4H[16,16] = tmp1; + Rd_VPR64.4H[32,16] = tmp1; + Rd_VPR64.4H[48,16] = tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 +# CONSTRUCT x4e040400/mask=xffe7fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@4 +# AUNIT --inst x4e040400/mask=xffe7fc00 --status pass + +:dup Rd_VPR128.4S, Rn_VPR128.S.imm_neon_uimm2 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.4S & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 + local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; + # simd duplicate Rd_VPR128.4S = all elements tmp1 (lane size 4) + Rd_VPR128.4S[0,32] = tmp1; + Rd_VPR128.4S[32,32] = tmp1; + Rd_VPR128.4S[64,32] = tmp1; + Rd_VPR128.4S[96,32] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 +# CONSTRUCT x0e010400/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@1 +# AUNIT --inst x0e010400/mask=xffe1fc00 --status pass + +:dup Rd_VPR64.8B, Rn_VPR128.B.imm_neon_uimm4 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.8B & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 + local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; + # simd duplicate Rd_VPR64.8B = all elements tmp1 (lane size 1) + Rd_VPR64.8B[0,8] = tmp1; + Rd_VPR64.8B[8,8] = tmp1; + Rd_VPR64.8B[16,8] = tmp1; + Rd_VPR64.8B[24,8] = tmp1; + Rd_VPR64.8B[32,8] = tmp1; + Rd_VPR64.8B[40,8] = tmp1; + Rd_VPR64.8B[48,8] = tmp1; + Rd_VPR64.8B[56,8] = tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 +# CONSTRUCT x4e020400/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@2 +# AUNIT --inst x4e020400/mask=xffe3fc00 --status pass + +:dup Rd_VPR128.8H, Rn_VPR128.H.imm_neon_uimm3 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.8H & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 + local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; + # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) + Rd_VPR128.8H[0,16] = tmp1; + Rd_VPR128.8H[16,16] = tmp1; + Rd_VPR128.8H[32,16] = tmp1; + Rd_VPR128.8H[48,16] = tmp1; + Rd_VPR128.8H[64,16] = tmp1; + Rd_VPR128.8H[80,16] = tmp1; + Rd_VPR128.8H[96,16] = tmp1; + Rd_VPR128.8H[112,16] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x5e000400/mask=xffe0fc00 +# C7.2.199 MOV (scalar) page C7-1854 line 104019 MATCH x5e000400/mask=xffe0fc00 +# CONSTRUCT x5e010400/mask=xffe1fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 =ARG2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@1 +# AUNIT --inst x5e010400/mask=xffe1fc00 --status pass + +:dup Rd_FPR8, Rn_VPR128.B.imm_neon_uimm4 +is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR8 & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 + Rd_FPR8 = Rn_VPR128.B.imm_neon_uimm4; + zext_zb(Zd); # zero upper 31 bytes of Zd +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x5e000400/mask=xffe0fc00 +# C7.2.199 MOV (scalar) page C7-1854 line 104019 MATCH x5e000400/mask=xffe0fc00 +# CONSTRUCT x5e080400/mask=xffeffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 =ARG2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@8 +# AUNIT --inst x5e080400/mask=xffeffc00 --status pass + +:dup Rd_FPR64, Rn_VPR128.D.imm_neon_uimm1 +is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR64 & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm1] lane size 8 + Rd_FPR64 = Rn_VPR128.D.imm_neon_uimm1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x5e000400/mask=xffe0fc00 +# C7.2.199 MOV (scalar) page C7-1854 line 104019 MATCH x5e000400/mask=xffe0fc00 +# CONSTRUCT x5e020400/mask=xffe3fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 =ARG2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@2 +# AUNIT --inst x5e020400/mask=xffe3fc00 --status pass + +:dup Rd_FPR16, Rn_VPR128.H.imm_neon_uimm3 +is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR16 & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 + Rd_FPR16 = Rn_VPR128.H.imm_neon_uimm3; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x5e000400/mask=xffe0fc00 +# C7.2.199 MOV (scalar) page C7-1854 line 104019 MATCH x5e000400/mask=xffe0fc00 +# CONSTRUCT x5e040400/mask=xffe7fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 =ARG2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@4 +# AUNIT --inst x5e040400/mask=xffe7fc00 --status pass + +:dup Rd_FPR32, Rn_VPR128.S.imm_neon_uimm2 +is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR32 & Zd +{ + # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 + Rd_FPR32 = Rn_VPR128.S.imm_neon_uimm2; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 +# CONSTRUCT x4e010c00/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(force-primitive) ARG1 ARG2[0]:1 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@1 +# AUNIT --inst x4e010c00/mask=xffe1fc00 --status pass + +:dup Rd_VPR128.16B, Rn_GPR32 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_16=1 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR128.16B & Zd +{ + local tmp1:1 = Rn_GPR32[0,8]; + # simd duplicate Rd_VPR128.16B = all elements tmp1 (lane size 1) + Rd_VPR128.16B[0,8] = tmp1; + Rd_VPR128.16B[8,8] = tmp1; + Rd_VPR128.16B[16,8] = tmp1; + Rd_VPR128.16B[24,8] = tmp1; + Rd_VPR128.16B[32,8] = tmp1; + Rd_VPR128.16B[40,8] = tmp1; + Rd_VPR128.16B[48,8] = tmp1; + Rd_VPR128.16B[56,8] = tmp1; + Rd_VPR128.16B[64,8] = tmp1; + Rd_VPR128.16B[72,8] = tmp1; + Rd_VPR128.16B[80,8] = tmp1; + Rd_VPR128.16B[88,8] = tmp1; + Rd_VPR128.16B[96,8] = tmp1; + Rd_VPR128.16B[104,8] = tmp1; + Rd_VPR128.16B[112,8] = tmp1; + Rd_VPR128.16B[120,8] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 +# CONSTRUCT x4e080c00/mask=xffeffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@8 +# AUNIT --inst x4e080c00/mask=xffeffc00 --status pass + +:dup Rd_VPR128.2D, Rn_GPR64 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1619=0b1000 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR64 & Rd_VPR128.2D & Zd +{ + # simd duplicate Rd_VPR128.2D = all elements Rn_GPR64 (lane size 8) + Rd_VPR128.2D[0,64] = Rn_GPR64; + Rd_VPR128.2D[64,64] = Rn_GPR64; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 +# CONSTRUCT x0e040c00/mask=xffe7fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@4 +# AUNIT --inst x0e040c00/mask=xffe7fc00 --status pass + +:dup Rd_VPR64.2S, Rn_GPR32 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_1618=0b100 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR64.2S & Zd +{ + # simd duplicate Rd_VPR64.2S = all elements Rn_GPR32 (lane size 4) + Rd_VPR64.2S[0,32] = Rn_GPR32; + Rd_VPR64.2S[32,32] = Rn_GPR32; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 +# CONSTRUCT x0e020c00/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@2 +# AUNIT --inst x0e020c00/mask=xffe3fc00 --status pass + +:dup Rd_VPR64.4H, Rn_GPR32 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_1617=0b10 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR64.4H & Zd +{ + local tmp1:2 = Rn_GPR32[0,16]; + # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) + Rd_VPR64.4H[0,16] = tmp1; + Rd_VPR64.4H[16,16] = tmp1; + Rd_VPR64.4H[32,16] = tmp1; + Rd_VPR64.4H[48,16] = tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 +# CONSTRUCT x4e040c00/mask=xffe7fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@4 +# AUNIT --inst x4e040c00/mask=xffe7fc00 --status pass + +:dup Rd_VPR128.4S, Rn_GPR32 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1618=0b100 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR128.4S & Zd +{ + # simd duplicate Rd_VPR128.4S = all elements Rn_GPR32 (lane size 4) + Rd_VPR128.4S[0,32] = Rn_GPR32; + Rd_VPR128.4S[32,32] = Rn_GPR32; + Rd_VPR128.4S[64,32] = Rn_GPR32; + Rd_VPR128.4S[96,32] = Rn_GPR32; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 +# CONSTRUCT x0e010c00/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:1 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@1 +# AUNIT --inst x0e010c00/mask=xffe1fc00 --status pass + +:dup Rd_VPR64.8B, Rn_GPR32 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_16=1 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR64.8B & Zd +{ + local tmp1:1 = Rn_GPR32[0,8]; + # simd duplicate Rd_VPR64.8B = all elements tmp1 (lane size 1) + Rd_VPR64.8B[0,8] = tmp1; + Rd_VPR64.8B[8,8] = tmp1; + Rd_VPR64.8B[16,8] = tmp1; + Rd_VPR64.8B[24,8] = tmp1; + Rd_VPR64.8B[32,8] = tmp1; + Rd_VPR64.8B[40,8] = tmp1; + Rd_VPR64.8B[48,8] = tmp1; + Rd_VPR64.8B[56,8] = tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 +# CONSTRUCT x4e020c00/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@2 +# AUNIT --inst x4e020c00/mask=xffe3fc00 --status pass + +:dup Rd_VPR128.8H, Rn_GPR32 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1617=0b10 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR128.8H & Zd +{ + local tmp1:2 = Rn_GPR32[0,16]; + # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) + Rd_VPR128.8H[0,16] = tmp1; + Rd_VPR128.8H[16,16] = tmp1; + Rd_VPR128.8H[32,16] = tmp1; + Rd_VPR128.8H[48,16] = tmp1; + Rd_VPR128.8H[64,16] = tmp1; + Rd_VPR128.8H[80,16] = tmp1; + Rd_VPR128.8H[96,16] = tmp1; + Rd_VPR128.8H[112,16] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.41 EOR (vector) page C7-1475 line 81603 MATCH x2e201c00/mask=xbfe0fc00 +# CONSTRUCT x6e201c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$^@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_eor/2@1 +# AUNIT --inst x6e201c00/mask=xffe0fc00 --status pass + +:eor Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix Rd_VPR128.16B = Rn_VPR128.16B ^ Rm_VPR128.16B on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] ^ Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] ^ Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] ^ Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] ^ Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] ^ Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] ^ Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] ^ Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] ^ Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] ^ Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] ^ Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] ^ Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] ^ Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] ^ Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] ^ Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] ^ Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] ^ Rm_VPR128.16B[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.41 EOR (vector) page C7-1475 line 81603 MATCH x2e201c00/mask=xbfe0fc00 +# CONSTRUCT x2e201c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$^@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_eor/2@1 +# AUNIT --inst x2e201c00/mask=xffe0fc00 --status pass + +:eor Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix Rd_VPR64.8B = Rn_VPR64.8B ^ Rm_VPR64.8B on lane size 1 + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] ^ Rm_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] ^ Rm_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] ^ Rm_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] ^ Rm_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] ^ Rm_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] ^ Rm_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] ^ Rm_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] ^ Rm_VPR64.8B[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.42 EOR3 page C7-1477 line 81685 MATCH xce000000/mask=xffe08000 +# CONSTRUCT xce000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 ARG4 $|@1 =$|@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_eor3/3@1 +# AUNIT --inst xce000000/mask=xffe08000 --status noqemu + +:eor3 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, Ra_VPR128.16B +is b_2131=0b11001110000 & b_15=0 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B & Ra_VPR128.16B & Zd +{ + # simd infix TMPQ1 = Rm_VPR128.16B | Ra_VPR128.16B on lane size 1 + TMPQ1[0,8] = Rm_VPR128.16B[0,8] | Ra_VPR128.16B[0,8]; + TMPQ1[8,8] = Rm_VPR128.16B[8,8] | Ra_VPR128.16B[8,8]; + TMPQ1[16,8] = Rm_VPR128.16B[16,8] | Ra_VPR128.16B[16,8]; + TMPQ1[24,8] = Rm_VPR128.16B[24,8] | Ra_VPR128.16B[24,8]; + TMPQ1[32,8] = Rm_VPR128.16B[32,8] | Ra_VPR128.16B[32,8]; + TMPQ1[40,8] = Rm_VPR128.16B[40,8] | Ra_VPR128.16B[40,8]; + TMPQ1[48,8] = Rm_VPR128.16B[48,8] | Ra_VPR128.16B[48,8]; + TMPQ1[56,8] = Rm_VPR128.16B[56,8] | Ra_VPR128.16B[56,8]; + TMPQ1[64,8] = Rm_VPR128.16B[64,8] | Ra_VPR128.16B[64,8]; + TMPQ1[72,8] = Rm_VPR128.16B[72,8] | Ra_VPR128.16B[72,8]; + TMPQ1[80,8] = Rm_VPR128.16B[80,8] | Ra_VPR128.16B[80,8]; + TMPQ1[88,8] = Rm_VPR128.16B[88,8] | Ra_VPR128.16B[88,8]; + TMPQ1[96,8] = Rm_VPR128.16B[96,8] | Ra_VPR128.16B[96,8]; + TMPQ1[104,8] = Rm_VPR128.16B[104,8] | Ra_VPR128.16B[104,8]; + TMPQ1[112,8] = Rm_VPR128.16B[112,8] | Ra_VPR128.16B[112,8]; + TMPQ1[120,8] = Rm_VPR128.16B[120,8] | Ra_VPR128.16B[120,8]; + # simd infix Rd_VPR128.16B = Rn_VPR128.16B | TMPQ1 on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.43 EXT page C7-1478 line 81756 MATCH x2e000000/mask=xbfe08400 +# CONSTRUCT x6e000000/mask=xffe08400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 imm4:1 =NEON_ext/3@1 +# AUNIT --inst x6e000000/mask=xffe08400 --status nopcodeop + +:ext Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, imm4 +is b_3131=0 & q=1 & b_2429=0x2e & b_2223=0b00 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & imm4 & b_1010=0 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_ext(Rn_VPR128.16B, Rm_VPR128.16B, imm4:1, 1:1); +} + +# C7.2.43 EXT page C7-1478 line 81756 MATCH x2e000000/mask=xbfe08400 +# CONSTRUCT x2e000000/mask=xffe0c400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 imm4:1 =NEON_ext/3@1 +# AUNIT --inst x2e000000/mask=xffe0c400 --status nopcodeop + +:ext Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, imm4 +is b_3131=0 & q=0 & b_2429=0x2e & b_2223=0b00 & b_2121=0 & Rm_VPR64.8B & b_1415=0 & imm4 & b_1010=0 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_ext(Rn_VPR64.8B, Rm_VPR64.8B, imm4:1, 1:1); +} + +# C7.2.44 FABD page C7-1480 line 81859 MATCH x2ec01400/mask=xbfe0fc00 +# CONSTRUCT x2ec01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f-@2 =$fabs@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2@2 +# AUNIT --inst x2ec01400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=0 sz=1 bb=0 cc=00 F=VPR64.4H + +:fabd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_21=0 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + # simd infix TMPD1 = Rn_VPR64.4H f- Rm_VPR64.4H on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] f- Rm_VPR64.4H[0,16]; + TMPD1[16,16] = Rn_VPR64.4H[16,16] f- Rm_VPR64.4H[16,16]; + TMPD1[32,16] = Rn_VPR64.4H[32,16] f- Rm_VPR64.4H[32,16]; + TMPD1[48,16] = Rn_VPR64.4H[48,16] f- Rm_VPR64.4H[48,16]; + # simd unary Rd_VPR64.4H = abs(TMPD1) on lane size 2 + Rd_VPR64.4H[0,16] = abs(TMPD1[0,16]); + Rd_VPR64.4H[16,16] = abs(TMPD1[16,16]); + Rd_VPR64.4H[32,16] = abs(TMPD1[32,16]); + Rd_VPR64.4H[48,16] = abs(TMPD1[48,16]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.44 FABD page C7-1480 line 81859 MATCH x2ec01400/mask=xbfe0fc00 +# CONSTRUCT x6ec01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f-@2 =$fabs@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2@2 +# AUNIT --inst x6ec01400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=1 sz=1 bb=0 cc=00 F=VPR128.8H + +:fabd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_21=0 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H f- Rm_VPR128.8H on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] f- Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] f- Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] f- Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] f- Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] f- Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] f- Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] f- Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] f- Rm_VPR128.8H[112,16]; + # simd unary Rd_VPR128.8H = abs(TMPQ1) on lane size 2 + Rd_VPR128.8H[0,16] = abs(TMPQ1[0,16]); + Rd_VPR128.8H[16,16] = abs(TMPQ1[16,16]); + Rd_VPR128.8H[32,16] = abs(TMPQ1[32,16]); + Rd_VPR128.8H[48,16] = abs(TMPQ1[48,16]); + Rd_VPR128.8H[64,16] = abs(TMPQ1[64,16]); + Rd_VPR128.8H[80,16] = abs(TMPQ1[80,16]); + Rd_VPR128.8H[96,16] = abs(TMPQ1[96,16]); + Rd_VPR128.8H[112,16] = abs(TMPQ1[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.44 FABD page C7-1480 line 81859 MATCH x2ea0d400/mask=xbfa0fc00 +# CONSTRUCT x2ea0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f-@4 =$fabs@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2@4 +# AUNIT --inst x2ea0d400/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" +# Vector half precision variant when Q=0 sz=0 bb=1 cc=11 F=VPR64.2S + +:fabd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110101 & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd +{ + # simd infix TMPD1 = Rn_VPR64.2S f- Rm_VPR64.2S on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] f- Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] f- Rm_VPR64.2S[32,32]; + # simd unary Rd_VPR64.2S = abs(TMPD1) on lane size 4 + Rd_VPR64.2S[0,32] = abs(TMPD1[0,32]); + Rd_VPR64.2S[32,32] = abs(TMPD1[32,32]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.44 FABD page C7-1480 line 81859 MATCH x2ea0d400/mask=xbfa0fc00 +# CONSTRUCT x6ea0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f-@4 =$fabs@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2@4 +# AUNIT --inst x6ea0d400/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" +# Vector half precision variant when Q=1 sz=0 bb=1 cc=11 F=VPR128.4S + +:fabd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110101 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S f- Rm_VPR128.4S on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] f- Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] f- Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] f- Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] f- Rm_VPR128.4S[96,32]; + # simd unary Rd_VPR128.4S = abs(TMPQ1) on lane size 4 + Rd_VPR128.4S[0,32] = abs(TMPQ1[0,32]); + Rd_VPR128.4S[32,32] = abs(TMPQ1[32,32]); + Rd_VPR128.4S[64,32] = abs(TMPQ1[64,32]); + Rd_VPR128.4S[96,32] = abs(TMPQ1[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.44 FABD page C7-1480 line 81859 MATCH x2ea0d400/mask=xbfa0fc00 +# CONSTRUCT x6ee0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f-@8 =$fabs@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2@8 +# AUNIT --inst x6ee0d400/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" +# Vector half precision variant when Q=1 sz=1 bb=1 cc=11 F=VPR128.2D + +:fabd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_21=1 & b_1015=0b110101 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.2D f- Rm_VPR128.2D on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] f- Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] f- Rm_VPR128.2D[64,64]; + # simd unary Rd_VPR128.2D = abs(TMPQ1) on lane size 8 + Rd_VPR128.2D[0,64] = abs(TMPQ1[0,64]); + Rd_VPR128.2D[64,64] = abs(TMPQ1[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.44 FABD page C7-1480 line 81859 MATCH x7ec01400/mask=xffe0fc00 +# CONSTRUCT x7ec01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f- =fabs +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2 +# AUNIT --inst x7ec01400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Scalar half precision variant when sz=1 bb=0 cc=00 F=FPR16 + +:fabd Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_2131=0b01111110110 & b_1015=0b000101 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd +{ + local tmp1:2 = Rn_FPR16 f- Rm_FPR16; + Rd_FPR16 = abs(tmp1); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.44 FABD page C7-1480 line 81859 MATCH x7ea0d400/mask=xffa0fc00 +# CONSTRUCT x7ea0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f- =fabs +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2 +# AUNIT --inst x7ea0d400/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" +# Scalar half precision variant when sz=0 bb=1 cc=11 F=FPR32 + +:fabd Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_2131=0b01111110101 & b_1015=0b110101 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd +{ + local tmp1:4 = Rn_FPR32 f- Rm_FPR32; + Rd_FPR32 = abs(tmp1); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.44 FABD page C7-1480 line 81859 MATCH x7ea0d400/mask=xffa0fc00 +# CONSTRUCT x7ee0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f- =fabs +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2 +# AUNIT --inst x7ee0d400/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" +# Scalar half precision variant when sz=1 bb=1 cc=11 F=FPR64 + +:fabd Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2131=0b01111110111 & b_1015=0b110101 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + local tmp1:8 = Rn_FPR64 f- Rm_FPR64; + Rd_FPR64 = abs(tmp1); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.45 FABS (vector) page C7-1483 line 82050 MATCH x0ea0f800/mask=xbfbffc00 +# CONSTRUCT x4ee0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$fabs@8 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1@8 +# AUNIT --inst x4ee0f800/mask=xfffffc00 --rand dfp --status pass + +:fabs Rd_VPR128.2D, Rn_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd unary Rd_VPR128.2D = abs(Rn_VPR128.2D) on lane size 8 + Rd_VPR128.2D[0,64] = abs(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = abs(Rn_VPR128.2D[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.45 FABS (vector) page C7-1483 line 82050 MATCH x0ea0f800/mask=xbfbffc00 +# CONSTRUCT x0ea0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$fabs@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1@4 +# AUNIT --inst x0ea0f800/mask=xfffffc00 --rand sfp --status pass + +:fabs Rd_VPR64.2S, Rn_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd unary Rd_VPR64.2S = abs(Rn_VPR64.2S) on lane size 4 + Rd_VPR64.2S[0,32] = abs(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = abs(Rn_VPR64.2S[32,32]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.45 FABS (vector) page C7-1483 line 82050 MATCH x0ea0f800/mask=xbfbffc00 +# CONSTRUCT x4ea0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$fabs@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1@4 +# AUNIT --inst x4ea0f800/mask=xfffffc00 --rand sfp --status pass + +:fabs Rd_VPR128.4S, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd unary Rd_VPR128.4S = abs(Rn_VPR128.4S) on lane size 4 + Rd_VPR128.4S[0,32] = abs(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = abs(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = abs(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = abs(Rn_VPR128.4S[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.45 FABS (vector) page C7-1483 line 82050 MATCH x0ef8f800/mask=xbffffc00 +# CONSTRUCT x0ef8f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$fabs@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1@2 +# AUNIT --inst x0ef8f800/mask=xfffffc00 --rand hfp --status noqemu +# FABS (vector) SIMD 4H when size=0 Q=0 + +:fabs Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_1029=0b00111011111000111110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + # simd unary Rd_VPR64.4H = abs(Rn_VPR64.4H) on lane size 2 + Rd_VPR64.4H[0,16] = abs(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = abs(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = abs(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = abs(Rn_VPR64.4H[48,16]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.45 FABS (vector) page C7-1483 line 82050 MATCH x0ef8f800/mask=xbffffc00 +# CONSTRUCT x4ef8f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$fabs@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1@2 +# AUNIT --inst x4ef8f800/mask=xfffffc00 --rand hfp --status noqemu +# FABS (vector) SIMD 8H when size=0 Q=1 + +:fabs Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_1029=0b00111011111000111110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + # simd unary Rd_VPR128.8H = abs(Rn_VPR128.8H) on lane size 2 + Rd_VPR128.8H[0,16] = abs(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = abs(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = abs(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = abs(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = abs(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = abs(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = abs(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = abs(Rn_VPR128.8H[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.46 FABS (scalar) page C7-1485 line 82158 MATCH x1e20c000/mask=xff3ffc00 +# CONSTRUCT x1ee0c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =fabs +# SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1 +# AUNIT --inst x1ee0c000/mask=xfffffc00 --rand hfp --status noqemu + +:fabs Rd_FPR16, Rn_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x1 & b_1014=0x10 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = abs(Rn_FPR16); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.46 FABS (scalar) page C7-1485 line 82158 MATCH x1e20c000/mask=xff3ffc00 +# CONSTRUCT x1e60c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =fabs +# SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1 +# AUNIT --inst x1e60c000/mask=xfffffc00 --rand dfp --status pass + +:fabs Rd_FPR64, Rn_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x1 & b_1014=0x10 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = abs(Rn_FPR64); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.46 FABS (scalar) page C7-1485 line 82158 MATCH x1e20c000/mask=xff3ffc00 +# CONSTRUCT x1e20c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =fabs +# SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1 +# AUNIT --inst x1e20c000/mask=xfffffc00 --rand sfp --status pass + +:fabs Rd_FPR32, Rn_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x1 & b_1014=0x10 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = abs(Rn_FPR32); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.47 FACGE page C7-1487 line 82250 MATCH x2e20ec00/mask=xbfa0fc00 +# CONSTRUCT x6e60ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2@8 +# AUNIT --inst x6e60ec00/mask=xffe0fc00 --rand dfp --status nopcodeop + +:facge Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1d & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_facge(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.47 FACGE page C7-1487 line 82250 MATCH x2e20ec00/mask=xbfa0fc00 +# CONSTRUCT x2e20ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2@4 +# AUNIT --inst x2e20ec00/mask=xffe0fc00 --rand sfp --status nopcodeop + +:facge Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1d & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_facge(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.47 FACGE page C7-1487 line 82250 MATCH x2e20ec00/mask=xbfa0fc00 +# CONSTRUCT x6e20ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2@4 +# AUNIT --inst x6e20ec00/mask=xffe0fc00 --rand sfp --status nopcodeop + +:facge Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1d & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_facge(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.47 FACGE page C7-1487 line 82250 MATCH x7e402c00/mask=xffe0fc00 +# CONSTRUCT x7e402c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2 +# AUNIT --inst x7e402c00/mask=xffe0fc00 --rand hfp --status noqemu +# Scalar half precision + +:facge Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_2131=0b01111110010 & b_1015=0b001011 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd +{ + Rd_FPR16 = NEON_facge(Rn_FPR16, Rm_FPR16); +} + +# C7.2.47 FACGE page C7-1487 line 82250 MATCH x7e20ec00/mask=xffa0fc00 +# CONSTRUCT x7e20ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2 +# AUNIT --inst x7e20ec00/mask=xffe0fc00 --rand sfp --status nopcodeop +# Scalar single-precision and double-precision sz=0 + +:facge Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_2331=0b011111100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd +{ + Rd_FPR32 = NEON_facge(Rn_FPR32, Rm_FPR32); +} + +# C7.2.47 FACGE page C7-1487 line 82250 MATCH x7e20ec00/mask=xffa0fc00 +# CONSTRUCT x7e60ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2 +# AUNIT --inst x7e60ec00/mask=xffe0fc00 --rand dfp --status nopcodeop +# Scalar single-precision and double-precision sz=1 + +:facge Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2331=0b011111100 & b_22=1 & b_21=1 & b_1015=0b111011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_facge(Rn_FPR64, Rm_FPR64); +} + +# C7.2.47 FACGE page C7-1487 line 82250 MATCH x2e402c00/mask=xbfe0fc00 +# CONSTRUCT x2e402c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2@2 +# AUNIT --inst x2e402c00/mask=xffe0fc00 --rand hfp --status noqemu +# FACGE SIMD 4H when size=0 Q=0 + +:facge Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_facge(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.47 FACGE page C7-1487 line 82250 MATCH x2e402c00/mask=xbfe0fc00 +# CONSTRUCT x6e402c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2@2 +# AUNIT --inst x6e402c00/mask=xffe0fc00 --rand hfp --status noqemu +# FACGE SIMD 8H when size=0 Q=1 + +:facge Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_facge(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.48 FACGT page C7-1491 line 82494 MATCH x2ea0ec00/mask=xbfa0fc00 +# CONSTRUCT x6ee0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2@8 +# AUNIT --inst x6ee0ec00/mask=xffe0fc00 --rand dfp --status nopcodeop + +:facgt Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1d & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_facgt(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.48 FACGT page C7-1491 line 82494 MATCH x2ea0ec00/mask=xbfa0fc00 +# CONSTRUCT x2ea0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2@4 +# AUNIT --inst x2ea0ec00/mask=xffe0fc00 --rand sfp --status nopcodeop + +:facgt Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1d & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_facgt(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.48 FACGT page C7-1491 line 82494 MATCH x2ea0ec00/mask=xbfa0fc00 +# CONSTRUCT x6ea0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2@4 +# AUNIT --inst x6ea0ec00/mask=xffe0fc00 --rand sfp --status nopcodeop + +:facgt Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1d & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_facgt(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.48 FACGT page C7-1491 line 82494 MATCH x7ec02c00/mask=xffe0fc00 +# CONSTRUCT x7ec02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2 +# AUNIT --inst x7ec02c00/mask=xffe0fc00 --rand hfp --status noqemu +# Scalar half precision + +:facgt Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_2131=0b01111110110 & b_1015=0b001011 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd +{ + Rd_FPR16 = NEON_facgt(Rn_FPR16, Rm_FPR16); +} + +# C7.2.48 FACGT page C7-1491 line 82494 MATCH x7ea0ec00/mask=xffa0fc00 +# CONSTRUCT x7ea0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2 +# AUNIT --inst x7ea0ec00/mask=xffe0fc00 --rand sfp --status nopcodeop +# Scalar single-precision and double-precision sz=0 + +:facgt Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_2331=0b011111101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd +{ + Rd_FPR32 = NEON_facgt(Rn_FPR32, Rm_FPR32); +} + +# C7.2.48 FACGT page C7-1491 line 82494 MATCH x7ea0ec00/mask=xffa0fc00 +# CONSTRUCT x7ee0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2 +# AUNIT --inst x7ee0ec00/mask=xffe0fc00 --rand dfp --status nopcodeop +# Scalar single-precision and double-precision sz=1 + +:facgt Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2331=0b011111101 & b_22=1 & b_21=1 & b_1015=0b111011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_facgt(Rn_FPR64, Rm_FPR64); +} + +# C7.2.48 FACGT page C7-1491 line 82494 MATCH x2ec02c00/mask=xbfe0fc00 +# CONSTRUCT x2ec02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2@2 +# AUNIT --inst x2ec02c00/mask=xffe0fc00 --rand hfp --status noqemu +# Vector half-precision SIMD 4H when Q=0 + +:facgt Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_facgt(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.48 FACGT page C7-1491 line 82494 MATCH x2ec02c00/mask=xbfe0fc00 +# CONSTRUCT x6ec02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2@2 +# AUNIT --inst x6ec02c00/mask=xffe0fc00 --rand hfp --status noqemu +# Vector half-precision SIMD 8H when Q=1 + +:facgt Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_facgt(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.49 FADD (vector) page C7-1495 line 82738 MATCH x0e20d400/mask=xbfa0fc00 +# CONSTRUCT x4e60d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2@8 +# AUNIT --inst x4e60d400/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:fadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1a & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd infix Rd_VPR128.2D = Rn_VPR128.2D f+ Rm_VPR128.2D on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f+ Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f+ Rm_VPR128.2D[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.49 FADD (vector) page C7-1495 line 82738 MATCH x0e20d400/mask=xbfa0fc00 +# CONSTRUCT x0e20d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2@4 +# AUNIT --inst x0e20d400/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" + +:fadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1a & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix Rd_VPR64.2S = Rn_VPR64.2S f+ Rm_VPR64.2S on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f+ Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f+ Rm_VPR64.2S[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.49 FADD (vector) page C7-1495 line 82738 MATCH x0e20d400/mask=xbfa0fc00 +# CONSTRUCT x4e20d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2@4 +# AUNIT --inst x4e20d400/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" + +:fadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1a & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix Rd_VPR128.4S = Rn_VPR128.4S f+ Rm_VPR128.4S on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f+ Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f+ Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f+ Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f+ Rm_VPR128.4S[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.49 FADD (vector) page C7-1495 line 82738 MATCH x0e401400/mask=xbfe0fc00 +# CONSTRUCT x0e401400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2@2 +# AUNIT --inst x0e401400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision SIMD 4H when Q=0 + +:fadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rn_VPR64.4H f+ Rm_VPR64.4H on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f+ Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f+ Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f+ Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f+ Rm_VPR64.4H[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.49 FADD (vector) page C7-1495 line 82738 MATCH x0e401400/mask=xbfe0fc00 +# CONSTRUCT x4e401400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2@2 +# AUNIT --inst x4e401400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision SIMD 8H when Q=1 + +:fadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rn_VPR128.8H f+ Rm_VPR128.8H on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f+ Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f+ Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f+ Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f+ Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f+ Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f+ Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f+ Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f+ Rm_VPR128.8H[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.50 FADD (scalar) page C7-1497 line 82859 MATCH x1e202800/mask=xff20fc00 +# CONSTRUCT x1e602800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f+ +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2 +# AUNIT --inst x1e602800/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:fadd Rd_FPR64, Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x2 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = Rn_FPR64 f+ Rm_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.50 FADD (scalar) page C7-1497 line 82859 MATCH x1e202800/mask=xff20fc00 +# CONSTRUCT x1ee02800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f+ +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2 +# AUNIT --inst x1ee02800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fadd Rd_FPR16, Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x2 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = Rn_FPR16 f+ Rm_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.50 FADD (scalar) page C7-1497 line 82859 MATCH x1e202800/mask=xff20fc00 +# CONSTRUCT x1e202800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f+ +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2 +# AUNIT --inst x1e202800/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" + +:fadd Rd_FPR32, Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x2 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = Rn_FPR32 f+ Rm_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.51 FADDP (scalar) page C7-1499 line 82962 MATCH x7e30d800/mask=xffbffc00 +# CONSTRUCT x7e70d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =#f+ +# SMACRO(pseudo) ARG1 ARG2 =NEON_faddp/1@8 +# AUNIT --inst x7e70d800/mask=xfffffc00 --rand dfp --status pass --comment "nofpround" + +:faddp Rd_FPR64, Rn_VPR128.2D +is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xd & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd +{ + # sipd infix Rd_FPR64 = f+(Rn_VPR128.2D) on pairs lane size (8 to 8) + local tmp1 = Rn_VPR128.2D[0,64]; + local tmp2 = Rn_VPR128.2D[64,64]; + Rd_FPR64 = tmp1 f+ tmp2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.51 FADDP (scalar) page C7-1499 line 82962 MATCH x7e30d800/mask=xffbffc00 +# CONSTRUCT x7e30d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =#f+ +# SMACRO(pseudo) ARG1 ARG2 =NEON_faddp/1@4 +# AUNIT --inst x7e30d800/mask=xfffffc00 --rand sfp --status pass --comment "nofpround" + +:faddp Rd_FPR32, Rn_VPR64.2S +is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xd & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd +{ + # sipd infix Rd_FPR32 = f+(Rn_VPR64.2S) on pairs lane size (4 to 4) + local tmp1 = Rn_VPR64.2S[0,32]; + local tmp2 = Rn_VPR64.2S[32,32]; + Rd_FPR32 = tmp1 f+ tmp2; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.51 FADDP (scalar) page C7-1499 line 82962 MATCH x5e30d800/mask=xfffffc00 +# CONSTRUCT x5e30d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_FPR32 =#f+@2 +# SMACRO(pseudo) ARG1 Rn_FPR32 =NEON_faddp/1@2 +# AUNIT --inst x5e30d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant + +:faddp Rd_FPR16, vRn_VPR128^".2H" +is b_1031=0b0101111000110000110110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd +{ + # sipd infix Rd_FPR16 = f+(Rn_FPR32) on pairs lane size (2 to 2) + local tmp1 = Rn_FPR32[0,16]; + local tmp2 = Rn_FPR32[16,16]; + Rd_FPR16 = tmp1 f+ tmp2; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.52 FADDP (vector) page C7-1501 line 83067 MATCH x2e20d400/mask=xbfa0fc00 +# CONSTRUCT x6e60d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 ARG3 =#f+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_faddp/2@8 +# AUNIT --inst x6e60d400/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:faddp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1a & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = f+(Rn_VPR128.2D,Rm_VPR128.2D) on pairs lane size (8 to 8) + local tmp2 = Rn_VPR128.2D[0,64]; + local tmp3 = Rn_VPR128.2D[64,64]; + TMPQ1[0,64] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.2D[0,64]; + tmp3 = Rm_VPR128.2D[64,64]; + TMPQ1[64,64] = tmp2 f+ tmp3; + Rd_VPR128.2D = TMPQ1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.52 FADDP (vector) page C7-1501 line 83067 MATCH x2e20d400/mask=xbfa0fc00 +# CONSTRUCT x2e20d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG2 ARG3 =#f+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_faddp/2@4 +# AUNIT --inst x2e20d400/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" + +:faddp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1a & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = f+(Rn_VPR64.2S,Rm_VPR64.2S) on pairs lane size (4 to 4) + local tmp2 = Rn_VPR64.2S[0,32]; + local tmp3 = Rn_VPR64.2S[32,32]; + TMPD1[0,32] = tmp2 f+ tmp3; + tmp2 = Rm_VPR64.2S[0,32]; + tmp3 = Rm_VPR64.2S[32,32]; + TMPD1[32,32] = tmp2 f+ tmp3; + Rd_VPR64.2S = TMPD1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.52 FADDP (vector) page C7-1501 line 83067 MATCH x2e20d400/mask=xbfa0fc00 +# CONSTRUCT x6e20d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 ARG3 =#f+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_faddp/2@4 +# AUNIT --inst x6e20d400/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" + +:faddp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1a & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = f+(Rn_VPR128.4S,Rm_VPR128.4S) on pairs lane size (4 to 4) + local tmp2 = Rn_VPR128.4S[0,32]; + local tmp3 = Rn_VPR128.4S[32,32]; + TMPQ1[0,32] = tmp2 f+ tmp3; + tmp2 = Rn_VPR128.4S[64,32]; + tmp3 = Rn_VPR128.4S[96,32]; + TMPQ1[32,32] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.4S[0,32]; + tmp3 = Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.4S[64,32]; + tmp3 = Rm_VPR128.4S[96,32]; + TMPQ1[96,32] = tmp2 f+ tmp3; + Rd_VPR128.4S = TMPQ1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.52 FADDP (vector) page C7-1501 line 83067 MATCH x2e401400/mask=xbfe0fc00 +# CONSTRUCT x2e401400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG3 ARG2 =#f+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_faddp/2@2 +# AUNIT --inst x2e401400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision SIMD 4H when Q = 0 + +:faddp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = f+(Rm_VPR64.4H,Rn_VPR64.4H) on pairs lane size (2 to 2) + local tmp2 = Rm_VPR64.4H[0,16]; + local tmp3 = Rm_VPR64.4H[16,16]; + TMPD1[0,16] = tmp2 f+ tmp3; + tmp2 = Rm_VPR64.4H[32,16]; + tmp3 = Rm_VPR64.4H[48,16]; + TMPD1[16,16] = tmp2 f+ tmp3; + tmp2 = Rn_VPR64.4H[0,16]; + tmp3 = Rn_VPR64.4H[16,16]; + TMPD1[32,16] = tmp2 f+ tmp3; + tmp2 = Rn_VPR64.4H[32,16]; + tmp3 = Rn_VPR64.4H[48,16]; + TMPD1[48,16] = tmp2 f+ tmp3; + Rd_VPR64.4H = TMPD1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.52 FADDP (vector) page C7-1501 line 83067 MATCH x2e401400/mask=xbfe0fc00 +# CONSTRUCT x6e401400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 ARG3 =#f+/2 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_faddp/2@2 +# AUNIT --inst x6e401400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision SIMD 8H when Q = 1 + +:faddp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = f+(Rn_VPR128.8H,Rm_VPR128.8H) on pairs lane size (2 to 2) + local tmp2 = Rn_VPR128.8H[0,16]; + local tmp3 = Rn_VPR128.8H[16,16]; + TMPQ1[0,16] = tmp2 f+ tmp3; + tmp2 = Rn_VPR128.8H[32,16]; + tmp3 = Rn_VPR128.8H[48,16]; + TMPQ1[16,16] = tmp2 f+ tmp3; + tmp2 = Rn_VPR128.8H[64,16]; + tmp3 = Rn_VPR128.8H[80,16]; + TMPQ1[32,16] = tmp2 f+ tmp3; + tmp2 = Rn_VPR128.8H[96,16]; + tmp3 = Rn_VPR128.8H[112,16]; + TMPQ1[48,16] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.8H[0,16]; + tmp3 = Rm_VPR128.8H[16,16]; + TMPQ1[64,16] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.8H[32,16]; + tmp3 = Rm_VPR128.8H[48,16]; + TMPQ1[80,16] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.8H[64,16]; + tmp3 = Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.8H[96,16]; + tmp3 = Rm_VPR128.8H[112,16]; + TMPQ1[112,16] = tmp2 f+ tmp3; + Rd_VPR128.8H = TMPQ1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.46 FCADD page C7-1090 line 63037 KEEPWITH + +fcadd_rotate: #90 is b_12=0 { export 90:1; } +fcadd_rotate: #270 is b_12=1 { export 270:1; } + +# C7.2.53 FCADD page C7-1503 line 83189 MATCH x2e00e400/mask=xbf20ec00 +# CONSTRUCT x2e40e400/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcadd/3@2 +# AUNIT --inst x2e40e400/mask=xffe0ec00 --rand hfp --status noqemu --comment "nofpround" +# FCADD SIMD 4H when size = 01 , Q = 0 + +:fcadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, fcadd_rotate +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcadd(Rn_VPR64.4H, Rm_VPR64.4H, fcadd_rotate, 2:1); +} + +# C7.2.53 FCADD page C7-1503 line 83189 MATCH x2e00e400/mask=xbf20ec00 +# CONSTRUCT x6e40e400/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcadd/3@2 +# AUNIT --inst x6e40e400/mask=xffe0ec00 --rand hfp --status noqemu --comment "nofpround" +# FCADD SIMD 8H when size = 01 , Q = 1 + +:fcadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, fcadd_rotate +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcadd(Rn_VPR128.8H, Rm_VPR128.8H, fcadd_rotate, 2:1); +} + +# C7.2.53 FCADD page C7-1503 line 83189 MATCH x2e00e400/mask=xbf20ec00 +# CONSTRUCT x2e80e400/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcadd/3@4 +# AUNIT --inst x2e80e400/mask=xffe0ec00 --rand sfp --status noqemu --comment "nofpround" +# FCADD SIMD 2S when size = 10 , Q = 0 + +:fcadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, fcadd_rotate +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcadd(Rn_VPR64.2S, Rm_VPR64.2S, fcadd_rotate, 4:1); +} + +# C7.2.53 FCADD page C7-1503 line 83189 MATCH x2e00e400/mask=xbf20ec00 +# CONSTRUCT x6e80e400/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcadd/3@4 +# AUNIT --inst x6e80e400/mask=xffe0ec00 --rand sfp --status noqemu --comment "nofpround" +# FCADD SIMD 4S when size = 10 , Q = 1 + +:fcadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, fcadd_rotate +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcadd(Rn_VPR128.4S, Rm_VPR128.4S, fcadd_rotate, 4:1); +} + +# C7.2.53 FCADD page C7-1503 line 83189 MATCH x2e00e400/mask=xbf20ec00 +# CONSTRUCT x6ec0e400/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcadd/3@8 +# AUNIT --inst x6ec0e400/mask=xffe0ec00 --rand dfp --status noqemu --comment "nofpround" +# FCADD SIMD 2D when size = 11 , Q = 1 + +:fcadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, fcadd_rotate +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fcadd(Rn_VPR128.2D, Rm_VPR128.2D, fcadd_rotate, 8:1); +} + +# C7.2.54 FCCMP page C7-1505 line 83301 MATCH x1e200400/mask=xff200c10 +# CONSTRUCT x1e600400/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmp/4 +# AUNIT --inst x1e600400/mask=xffe00c10 --rand dfp --status nodest --comment "flags" + +:fccmp Rn_FPR64, Rm_FPR64, NZCVImm_uimm4, CondOp +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & b_1011=1 & Rn_FPR64 & fpccmp.op=0 & NZCVImm_uimm4 +{ + setCC_NZCV(NZCVImm_uimm4:1); + local tmp1:1 = ! CondOp:1; + if (tmp1) goto inst_next; + fcomp(Rn_FPR64, Rm_FPR64); +} + +# C7.2.54 FCCMP page C7-1505 line 83301 MATCH x1e200400/mask=xff200c10 +# CONSTRUCT x1e200400/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmp/4 +# AUNIT --inst x1e200400/mask=xffe00c10 --rand sfp --status nodest --comment "flags" + +:fccmp Rn_FPR32, Rm_FPR32, NZCVImm_uimm4, CondOp +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & b_1011=1 & Rn_FPR32 & fpccmp.op=0 & NZCVImm_uimm4 +{ + setCC_NZCV(NZCVImm_uimm4:1); + local tmp1:1 = ! CondOp:1; + if (tmp1) goto inst_next; + fcomp(Rn_FPR32, Rm_FPR32); +} + +# C7.2.54 FCCMP page C7-1505 line 83301 MATCH x1e200400/mask=xff200c10 +# CONSTRUCT x1ee00400/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmp/4 +# AUNIT --inst x1ee00400/mask=xffe00c10 --rand hfp --status nodest --comment "flags" + +:fccmp Rn_FPR16, Rm_FPR16, NZCVImm_uimm4, CondOp +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & b_1011=1 & Rn_FPR16 & fpccmp.op=0 & NZCVImm_uimm4 +{ + setCC_NZCV(NZCVImm_uimm4:1); + local tmp1:1 = ! CondOp:1; + if (tmp1) goto inst_next; + fcomp(Rn_FPR16, Rm_FPR16); +} + +# C7.2.55 FCCMPE page C7-1507 line 83416 MATCH x1e200410/mask=xff200c10 +# CONSTRUCT x1e600410/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmpe/4 +# AUNIT --inst x1e600410/mask=xffe00c10 --rand dfp --status nodest --comment "flags" + +:fccmpe Rn_FPR64, Rm_FPR64, NZCVImm_uimm4, CondOp +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & b_1011=1 & Rn_FPR64 & fpccmp.op=1 & NZCVImm_uimm4 +{ + setCC_NZCV(NZCVImm_uimm4:1); + local tmp1:1 = ! CondOp:1; + if (tmp1) goto inst_next; + ftestNAN(Rn_FPR64, Rm_FPR64); + fcomp(Rn_FPR64, Rm_FPR64); +} + +# C7.2.55 FCCMPE page C7-1507 line 83416 MATCH x1e200410/mask=xff200c10 +# CONSTRUCT x1e200410/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmpe/4 +# AUNIT --inst x1e200410/mask=xffe00c10 --rand sfp --status nodest --comment "flags" + +:fccmpe Rn_FPR32, Rm_FPR32, NZCVImm_uimm4, CondOp +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & b_1011=1 & Rn_FPR32 & fpccmp.op=1 & NZCVImm_uimm4 +{ + setCC_NZCV(NZCVImm_uimm4:1); + local tmp1:1 = ! CondOp:1; + if (tmp1) goto inst_next; + ftestNAN(Rn_FPR32, Rm_FPR32); + fcomp(Rn_FPR32, Rm_FPR32); +} + +# C7.2.55 FCCMPE page C7-1507 line 83416 MATCH x1e200410/mask=xff200c10 +# CONSTRUCT x1ee00410/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmpe/4 +# AUNIT --inst x1ee00410/mask=xffe00c10 --rand hfp --status nodest --comment "flags" + +:fccmpe Rn_FPR16, Rm_FPR16, NZCVImm_uimm4, CondOp +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & b_1011=1 & Rn_FPR16 & fpccmp.op=1 & NZCVImm_uimm4 +{ + setCC_NZCV(NZCVImm_uimm4:1); + local tmp1:1 = ! CondOp:1; + if (tmp1) goto inst_next; + ftestNAN(Rn_FPR16, Rm_FPR16); + fcomp(Rn_FPR16, Rm_FPR16); +} + +# C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x0e20e400/mask=xbfa0fc00 +# CONSTRUCT x4e60e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2@8 +# AUNIT --inst x4e60e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" + +:fcmeq Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fcmeq(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x0e20e400/mask=xbfa0fc00 +# CONSTRUCT x0e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2@4 +# AUNIT --inst x0e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmeq Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcmeq(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x0e20e400/mask=xbfa0fc00 +# CONSTRUCT x4e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2@4 +# AUNIT --inst x4e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmeq Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcmeq(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x5e402400/mask=xffe0fc00 +# CONSTRUCT x5e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2 +# AUNIT --inst x5e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" +# Scalar half precision variant + +:fcmeq Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_2131=0b01011110010 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd +{ + Rd_FPR16 = NEON_fcmeq(Rn_FPR16, Rm_FPR16); +} + +# C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x5e20e400/mask=xffa0fc00 +# CONSTRUCT x5e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2 +# AUNIT --inst x5e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision variant sz=0 + +:fcmeq Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_2331=0b010111100 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd +{ + Rd_FPR32 = NEON_fcmeq(Rn_FPR32, Rm_FPR32); +} + +# C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x5e20e400/mask=xffa0fc00 +# CONSTRUCT x5e60e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2 +# AUNIT --inst x5e60e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision variant sz=1 + +:fcmeq Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2331=0b010111100 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_fcmeq(Rn_FPR64, Rm_FPR64); +} + +# C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x0e402400/mask=xbfe0fc00 +# CONSTRUCT x0e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2@2 +# AUNIT --inst x0e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 4H when Q=0 + +:fcmeq Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcmeq(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x0e402400/mask=xbfe0fc00 +# CONSTRUCT x4e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2@2 +# AUNIT --inst x4e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 8H when Q=1 + +:fcmeq Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcmeq(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x0ea0d800/mask=xbfbffc00 +# CONSTRUCT x4ee0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmeq/2@8 +# AUNIT --inst x4ee0d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" + +:fcmeq Rd_VPR128.2D, Rn_VPR128.2D, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xd & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fcmeq(Rn_VPR128.2D, 0:8, 8:1); +} + +# C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x0ea0d800/mask=xbfbffc00 +# CONSTRUCT x0ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmeq/2@4 +# AUNIT --inst x0ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmeq Rd_VPR64.2S, Rn_VPR64.2S, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcmeq(Rn_VPR64.2S, 0:4, 4:1); +} + +# C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x0ea0d800/mask=xbfbffc00 +# CONSTRUCT x4ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmeq/2@4 +# AUNIT --inst x4ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmeq Rd_VPR128.4S, Rn_VPR128.4S, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcmeq(Rn_VPR128.4S, 0:4, 4:1); +} + +# C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x5ef8d800/mask=xfffffc00 +# CONSTRUCT x5ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmeq/2 +# AUNIT --inst x5ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Scalar half precision variant + +:fcmeq Rd_FPR16, Rn_FPR16, "#0.0" +is b_1031=0b0101111011111000110110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = NEON_fcmeq(Rn_FPR16, 0:2); +} + +# C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x5ea0d800/mask=xffbffc00 +# CONSTRUCT x5ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmeq/2 +# AUNIT --inst x5ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision sz=0 + +:fcmeq Rd_FPR32, Rn_FPR32, "#0.0" +is b_2331=0b010111101 & b_22=0 & b_1021=0b100000110110 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = NEON_fcmeq(Rn_FPR32, 0:4); +} + +# C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x5ea0d800/mask=xffbffc00 +# CONSTRUCT x5ee0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmeq/2 +# AUNIT --inst x5ee0d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision sz=1 + +:fcmeq Rd_FPR64, Rn_FPR64, "#0.0" +is b_2331=0b010111101 & b_22=1 & b_1021=0b100000110110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_fcmeq(Rn_FPR64, 0:8); +} + +# C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x0ef8d800/mask=xbffffc00 +# CONSTRUCT x0ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmeq/2@2 +# AUNIT --inst x0ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 4H when Q = 0 + +:fcmeq Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" +is b_31=0 & b_30=0 & b_1029=0b00111011111000110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcmeq(Rn_VPR64.4H, 0:2, 2:1); +} + +# C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x0ef8d800/mask=xbffffc00 +# CONSTRUCT x4ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmeq/2@2 +# AUNIT --inst x4ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 8H when Q = 1 + +:fcmeq Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" +is b_31=0 & b_30=1 & b_1029=0b00111011111000110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcmeq(Rn_VPR128.8H, 0:2, 2:1); +} + +# C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x2e20e400/mask=xbfa0fc00 +# CONSTRUCT x6e60e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2@8 +# AUNIT --inst x6e60e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" + +:fcmge Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fcmge(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x2e20e400/mask=xbfa0fc00 +# CONSTRUCT x2e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2@4 +# AUNIT --inst x2e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmge Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcmge(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x2e20e400/mask=xbfa0fc00 +# CONSTRUCT x6e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2@4 +# AUNIT --inst x6e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmge Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcmge(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x7e402400/mask=xffe0fc00 +# CONSTRUCT x7e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2 +# AUNIT --inst x7e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" +# Scalar half precision variant + +:fcmge Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_2131=0b01111110010 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd +{ + Rd_FPR16 = NEON_fcmge(Rn_FPR16, Rm_FPR16); +} + +# C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x7e20e400/mask=xffa0fc00 +# CONSTRUCT x7e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2 +# AUNIT --inst x7e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision variant sz=0 + +:fcmge Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_2331=0b011111100 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd +{ + Rd_FPR32 = NEON_fcmge(Rn_FPR32, Rm_FPR32); +} + +# C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x7e20e400/mask=xffa0fc00 +# CONSTRUCT x7e60e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2 +# AUNIT --inst x7e60e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision variant sz=1 + +:fcmge Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2331=0b011111100 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_fcmge(Rn_FPR64, Rm_FPR64); +} + +# C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x2e402400/mask=xbfe0fc00 +# CONSTRUCT x2e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2@2 +# AUNIT --inst x2e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 4H when Q = 0 + +:fcmge Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcmge(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x2e402400/mask=xbfe0fc00 +# CONSTRUCT x6e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2@2 +# AUNIT --inst x6e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 8H when Q = 1 + +:fcmge Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcmge(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x2ea0c800/mask=xbfbffc00 +# CONSTRUCT x6ee0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmge/2@8 +# AUNIT --inst x6ee0c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" + +:fcmge Rd_VPR128.2D, Rn_VPR128.2D, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fcmge(Rn_VPR128.2D, 0:8, 8:1); +} + +# C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x2ea0c800/mask=xbfbffc00 +# CONSTRUCT x2ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmge/2@4 +# AUNIT --inst x2ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmge Rd_VPR64.2S, Rn_VPR64.2S, "#0" +is b_3131=0 & q=0 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcmge(Rn_VPR64.2S, 0:4, 4:1); +} + +# C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x2ea0c800/mask=xbfbffc00 +# CONSTRUCT x6ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmge/2@4 +# AUNIT --inst x6ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmge Rd_VPR128.4S, Rn_VPR128.4S, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcmge(Rn_VPR128.4S, 0:4, 4:1); +} + +# C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x7ef8c800/mask=xfffffc00 +# CONSTRUCT x7ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmge/2 +# AUNIT --inst x7ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Scalar half precision variant + +:fcmge Rd_FPR16, Rn_FPR16, "#0.0" +is b_1031=0b0111111011111000110010 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = NEON_fcmge(Rn_FPR16, 0:2); +} + +# C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x7ea0c800/mask=xffbffc00 +# CONSTRUCT x7ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmge/2 +# AUNIT --inst x7ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision sz=0 + +:fcmge Rd_FPR32, Rn_FPR32, "#0.0" +is b_2331=0b011111101 & b_22=0 & b_1021=0b100000110010 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = NEON_fcmge(Rn_FPR32, 0:4); +} + +# C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x7ea0c800/mask=xffbffc00 +# CONSTRUCT x7ee0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmge/2 +# AUNIT --inst x7ee0c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision sz=1 + +:fcmge Rd_FPR64, Rn_FPR64, "#0.0" +is b_2331=0b011111101 & b_22=1 & b_1021=0b100000110010 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_fcmge(Rn_FPR64, 0:8); +} + +# C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x2ef8c800/mask=xbffffc00 +# CONSTRUCT x2ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmge/2@2 +# AUNIT --inst x2ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 4H when Q = 0 + +:fcmge Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" +is b_31=0 & b_30=0 & b_1029=0b10111011111000110010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcmge(Rn_VPR64.4H, 0:2, 2:1); +} + +# C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x2ef8c800/mask=xbffffc00 +# CONSTRUCT x6ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmge/2@2 +# AUNIT --inst x6ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 8H when Q = 1 + +:fcmge Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" +is b_31=0 & b_30=1 & b_1029=0b10111011111000110010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcmge(Rn_VPR128.8H, 0:2, 2:1); +} + +# C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x2ea0e400/mask=xbfa0fc00 +# CONSTRUCT x6ee0e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2@8 +# AUNIT --inst x6ee0e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" + +:fcmgt Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fcmgt(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x2ea0e400/mask=xbfa0fc00 +# CONSTRUCT x2ea0e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2@4 +# AUNIT --inst x2ea0e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmgt Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcmgt(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x2ea0e400/mask=xbfa0fc00 +# CONSTRUCT x6ea0e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2@4 +# AUNIT --inst x6ea0e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmgt Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcmgt(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x7ec02400/mask=xffe0fc00 +# CONSTRUCT x7ec02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2 +# AUNIT --inst x7ec02400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" +# Scalar half precision variant + +:fcmgt Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_2131=0b01111110110 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd +{ + Rd_FPR16 = NEON_fcmgt(Rn_FPR16, Rm_FPR16); +} + +# C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x7ea0e400/mask=xffa0fc00 +# CONSTRUCT x7ea0e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2 +# AUNIT --inst x7ea0e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision variant sz=0 + +:fcmgt Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_2331=0b011111101 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd +{ + Rd_FPR32 = NEON_fcmgt(Rn_FPR32, Rm_FPR32); +} + +# C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x7ea0e400/mask=xffa0fc00 +# CONSTRUCT x7ee0e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2 +# AUNIT --inst x7ee0e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision variant sz=1 + +:fcmgt Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_2331=0b011111101 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_fcmgt(Rn_FPR64, Rm_FPR64); +} + +# C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x2ec02400/mask=xbfe0fc00 +# CONSTRUCT x2ec02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2@2 +# AUNIT --inst x2ec02400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 4H when Q = 0 + +:fcmgt Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcmgt(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x2ec02400/mask=xbfe0fc00 +# CONSTRUCT x6ec02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2@2 +# AUNIT --inst x6ec02400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 8H when Q = 1 + +:fcmgt Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcmgt(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x0ea0c800/mask=xbfbffc00 +# CONSTRUCT x4ee0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmgt/2@8 +# AUNIT --inst x4ee0c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" + +:fcmgt Rd_VPR128.2D, Rn_VPR128.2D, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fcmgt(Rn_VPR128.2D, 0:8, 8:1); +} + +# C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x0ea0c800/mask=xbfbffc00 +# CONSTRUCT x0ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmgt/2@4 +# AUNIT --inst x0ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmgt Rd_VPR64.2S, Rn_VPR64.2S, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcmgt(Rn_VPR64.2S, 0:4, 4:1); +} + +# C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x0ea0c800/mask=xbfbffc00 +# CONSTRUCT x4ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmgt/2@4 +# AUNIT --inst x4ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmgt Rd_VPR128.4S, Rn_VPR128.4S, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcmgt(Rn_VPR128.4S, 0:4, 4:1); +} + +# C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x5ef8c800/mask=xfffffc00 +# CONSTRUCT x5ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmgt/2 +# AUNIT --inst x5ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Scalar half precision variant + +:fcmgt Rd_FPR16, Rn_FPR16, "#0.0" +is b_1031=0b0101111011111000110010 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = NEON_fcmgt(Rn_FPR16, 0:2); +} + +# C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x5ea0c800/mask=xffbffc00 +# CONSTRUCT x5ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmgt/2 +# AUNIT --inst x5ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision sz=0 + +:fcmgt Rd_FPR32, Rn_FPR32, "#0.0" +is b_2331=0b010111101 & b_22=0 & b_1021=0b100000110010 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = NEON_fcmgt(Rn_FPR32, 0:4); +} + +# C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x5ea0c800/mask=xffbffc00 +# CONSTRUCT x5ee0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmgt/2 +# AUNIT --inst x5ee0c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision sz=1 + +:fcmgt Rd_FPR64, Rn_FPR64, "#0.0" +is b_2331=0b010111101 & b_22=1 & b_1021=0b100000110010 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_fcmgt(Rn_FPR64, 0:8); +} + +# C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x0ef8c800/mask=xbffffc00 +# CONSTRUCT x0ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmgt/2@2 +# AUNIT --inst x0ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 4H when Q = 0 + +:fcmgt Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" +is b_31=0 & b_30=0 & b_1029=0b00111011111000110010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcmgt(Rn_VPR64.4H, 0:2, 2:1); +} + +# C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x0ef8c800/mask=xbffffc00 +# CONSTRUCT x4ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmgt/2@2 +# AUNIT --inst x4ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 8H when Q = 1 + +:fcmgt Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" +is b_31=0 & b_30=1 & b_1029=0b00111011111000110010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcmgt(Rn_VPR128.8H, 0:2, 2:1); +} + +# C7.2.55 FCMLA (by element) page C7-1117 line 64749 KEEPWITH + +fcmla_rotate: #0 is b_15=0 & b_1314=0b00 { export 0:1; } +fcmla_rotate: #90 is b_15=0 & b_1314=0b01 { export 90:1; } +fcmla_rotate: #180 is b_15=0 & b_1314=0b10 { export 180:1; } +fcmla_rotate: #270 is b_15=0 & b_1314=0b11 { export 270:1; } +fcmla_rotate: #0 is b_15=1 & b_1112=0b00 { export 0:1; } +fcmla_rotate: #90 is b_15=1 & b_1112=0b01 { export 90:1; } +fcmla_rotate: #180 is b_15=1 & b_1112=0b10 { export 180:1; } +fcmla_rotate: #270 is b_15=1 & b_1112=0b11 { export 270:1; } + +# C7.2.62 FCMLA (by element) page C7-1530 line 84901 MATCH x2f001000/mask=xbf009400 +# CONSTRUCT x2f401000/mask=xffc09c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@2 +# AUNIT --inst x2f401000/mask=xffc09c00 --rand hfp --status noqemu --comment "noflags" +# The representation of Rm in the documentation as a 4 bit field +# extended by M actually makes it a standard 5 bit field. +# 4H variant when size = 01 , Q = 0 T=VPR64.4H imm=Re_VPR128.H.vIndexHL i1=Re_VPR128.H i2=vIndexHL +# NOTE: if size == '01' and H == '1' && Q == '0' then ReservedValue(); + +:fcmla Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128.H.vIndexHL, fcmla_rotate +is b_31=0 & b_30=0 & b_2429=0b101111 & b_2223=0b01 & b_15=0 & b_12=1 & b_11=0 & b_10=0 & fcmla_rotate & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128.H.vIndexHL & Re_VPR128.H & vIndexHL & Zd +{ + local tmp1:2 = SIMD_PIECE(Re_VPR128.H, vIndexHL:1); + Rd_VPR64.4H = NEON_fcmla(Rn_VPR64.4H, tmp1, fcmla_rotate, 2:1); +} + +# C7.2.62 FCMLA (by element) page C7-1530 line 84901 MATCH x2f001000/mask=xbf009400 +# CONSTRUCT x6f401000/mask=xffc09400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@2 +# AUNIT --inst x6f401000/mask=xffc09400 --rand hfp --status noqemu --comment "noflags" +# 8H variant when size = 01 , Q = 1 T=VPR128.8H imm=Re_VPR128.H.vIndexHL i1=Re_VPR128.H i2=vIndexHL + +:fcmla Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128.H.vIndexHL, fcmla_rotate +is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b01 & b_15=0 & b_12=1 & b_10=0 & fcmla_rotate & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128.H.vIndexHL & Re_VPR128.H & vIndexHL & Zd +{ + local tmp1:2 = SIMD_PIECE(Re_VPR128.H, vIndexHL:1); + Rd_VPR128.8H = NEON_fcmla(Rn_VPR128.8H, tmp1, fcmla_rotate, 2:1); +} + +# C7.2.62 FCMLA (by element) page C7-1530 line 84901 MATCH x2f001000/mask=xbf009400 +# CONSTRUCT x6f801000/mask=xffe09400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 +# AUNIT --inst x6f801000/mask=xffe09400 --rand sfp --status noqemu --comment "noflags" +# 4S variant when size = 10 , Q = 1 T=VPR128.4S imm=Re_VPR128.S.vIndex i1=Re_VPR128.S i2=vIndex +# NOTE: if size == '10' and (L == '1' || Q == '0') then ReservedValue(); + +:fcmla Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex, fcmla_rotate +is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b10 & b_21=0 & b_15=0 & b_12=1 & b_10=0 & fcmla_rotate & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd +{ + local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + Rd_VPR128.4S = NEON_fcmla(Rn_VPR128.4S, tmp1, fcmla_rotate, 4:1); +} + +# C7.2.63 FCMLA page C7-1533 line 85073 MATCH x2e00c400/mask=xbf20e400 +# CONSTRUCT x2e40c400/mask=xffe0e400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 +# AUNIT --inst x2e40c400/mask=xffe0e400 --rand hfp --status noqemu --comment "noflags" +# FCMLA SIMD 4H when size = 01 , Q = 0 + +:fcmla Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, fcmla_rotate +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcmla(Rn_VPR64.4H, Rm_VPR64.4H, fcmla_rotate, 4:1); +} + +# C7.2.63 FCMLA page C7-1533 line 85073 MATCH x2e00c400/mask=xbf20e400 +# CONSTRUCT x6e40c400/mask=xffe0e400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 +# AUNIT --inst x6e40c400/mask=xffe0e400 --rand hfp --status noqemu --comment "noflags" +# FCMLA SIMD 8H when size = 01 , Q = 1 + +:fcmla Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, fcmla_rotate +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcmla(Rn_VPR128.8H, Rm_VPR128.8H, fcmla_rotate, 4:1); +} + +# C7.2.63 FCMLA page C7-1533 line 85073 MATCH x2e00c400/mask=xbf20e400 +# CONSTRUCT x2e80c400/mask=xffe0e400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 +# AUNIT --inst x2e80c400/mask=xffe0e400 --rand sfp --status noqemu --comment "noflags" +# FCMLA SIMD 2S when size = 10 , Q = 0 + +:fcmla Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, fcmla_rotate +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcmla(Rn_VPR64.2S, Rm_VPR64.2S, fcmla_rotate, 4:1); +} + +# C7.2.63 FCMLA page C7-1533 line 85073 MATCH x2e00c400/mask=xbf20e400 +# CONSTRUCT x6e80c400/mask=xffe0e400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 +# AUNIT --inst x6e80c400/mask=xffe0e400 --rand sfp --status noqemu --comment "noflags" +# FCMLA SIMD 4S when size = 10 , Q = 1 + +:fcmla Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, fcmla_rotate +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcmla(Rn_VPR128.4S, Rm_VPR128.4S, fcmla_rotate, 4:1); +} + +# C7.2.63 FCMLA page C7-1533 line 85073 MATCH x2e00c400/mask=xbf20e400 +# CONSTRUCT x6ec0c400/mask=xffe0e400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 +# AUNIT --inst x6ec0c400/mask=xffe0e400 --rand dfp --status noqemu --comment "noflags" +# FCMLA SIMD 2D when size = 11 , Q = 1 + +:fcmla Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, fcmla_rotate +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fcmla(Rn_VPR128.2D, Rm_VPR128.2D, fcmla_rotate, 4:1); +} + +# C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x2ea0d800/mask=xbfbffc00 +# CONSTRUCT x6ee0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmle/2@8 +# AUNIT --inst x6ee0d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" + +:fcmle Rd_VPR128.2D, Rn_VPR128.2D, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xd & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fcmle(Rn_VPR128.2D, 0:8, 8:1); +} + +# C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x2ea0d800/mask=xbfbffc00 +# CONSTRUCT x2ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmle/2@2 +# AUNIT --inst x2ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmle Rd_VPR64.2S, Rn_VPR64.2S, "#0" +is b_3131=0 & q=0 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcmle(Rn_VPR64.2S, 0:2, 2:1); +} + +# C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x2ea0d800/mask=xbfbffc00 +# CONSTRUCT x6ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmle/2@4 +# AUNIT --inst x6ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop + +:fcmle Rd_VPR128.4S, Rn_VPR128.4S, "#0" +is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcmle(Rn_VPR128.4S, 0:4, 4:1); +} + +# C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x7ef8d800/mask=xfffffc00 +# CONSTRUCT x7ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmle/2 +# AUNIT --inst x7ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Scalar half precision variant + +:fcmle Rd_FPR16, Rn_FPR16, "#0.0" +is b_1031=0b0111111011111000110110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = NEON_fcmle(Rn_FPR16, 0:2); +} + +# C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x7ea0d800/mask=xffbffc00 +# CONSTRUCT x7ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmle/2 +# AUNIT --inst x7ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision sz=0 + +:fcmle Rd_FPR32, Rn_FPR32, "#0.0" +is b_2331=0b011111101 & b_22=0 & b_1021=0b100000110110 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = NEON_fcmle(Rn_FPR32, 0:4); +} + +# C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x7ea0d800/mask=xffbffc00 +# CONSTRUCT x7ee0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmle/2 +# AUNIT --inst x7ee0d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision sz=1 + +:fcmle Rd_FPR64, Rn_FPR64, "#0.0" +is b_2331=0b011111101 & b_22=1 & b_1021=0b100000110110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_fcmle(Rn_FPR64, 0:8); +} + +# C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x2ef8d800/mask=xbffffc00 +# CONSTRUCT x2ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmle/2@2 +# AUNIT --inst x2ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 4H when Q = 0 + +:fcmle Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" +is b_31=0 & b_30=0 & b_1029=0b10111011111000110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcmle(Rn_VPR64.4H, 0:2, 2:1); +} + +# C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x2ef8d800/mask=xbffffc00 +# CONSTRUCT x6ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmle/2@2 +# AUNIT --inst x6ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 8H when Q = 1 + +:fcmle Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" +is b_31=0 & b_30=1 & b_1029=0b10111011111000110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcmle(Rn_VPR128.8H, 0:2, 2:1); +} + +# C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x0ea0e800/mask=xbfbffc00 +# CONSTRUCT x4ee0e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmlt/2@8 +# AUNIT --inst x4ee0e800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" + +:fcmlt Rd_VPR128.2D, Rn_VPR128.2D, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xe & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fcmlt(Rn_VPR128.2D, 0:8, 8:1); +} + +# C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x0ea0e800/mask=xbfbffc00 +# CONSTRUCT x0ea0e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmlt/2@4 +# AUNIT --inst x0ea0e800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmlt Rd_VPR64.2S, Rn_VPR64.2S, "#0" +is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xe & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcmlt(Rn_VPR64.2S, 0:4, 4:1); +} + +# C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x0ea0e800/mask=xbfbffc00 +# CONSTRUCT x4ea0e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmlt/2@4 +# AUNIT --inst x4ea0e800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" + +:fcmlt Rd_VPR128.4S, Rn_VPR128.4S, "#0" +is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xe & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcmlt(Rn_VPR128.4S, 0:4, 4:1); +} + +# C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x5ef8e800/mask=xfffffc00 +# CONSTRUCT x5ef8e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmlt/2 +# AUNIT --inst x5ef8e800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Scalar half precision variant + +:fcmlt Rd_FPR16, Rn_FPR16, "#0.0" +is b_1031=0b0101111011111000111010 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = NEON_fcmlt(Rn_FPR16, 0:2); +} + +# C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x5ea0e800/mask=xffbffc00 +# CONSTRUCT x5ea0e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmlt/2 +# AUNIT --inst x5ea0e800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision sz=0 + +:fcmlt Rd_FPR32, Rn_FPR32, "#0.0" +is b_2331=0b010111101 & b_22=0 & b_1021=0b100000111010 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = NEON_fcmlt(Rn_FPR32, 0:4); +} + +# C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x5ea0e800/mask=xffbffc00 +# CONSTRUCT x5ee0e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmlt/2 +# AUNIT --inst x5ee0e800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" +# Scalar single-precision and double-precision sz=1 + +:fcmlt Rd_FPR64, Rn_FPR64, "#0.0" +is b_2331=0b010111101 & b_22=1 & b_1021=0b100000111010 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_fcmlt(Rn_FPR64, 0:8); +} + +# C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x0ef8e800/mask=xbffffc00 +# CONSTRUCT x0ef8e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmlt/2@2 +# AUNIT --inst x0ef8e800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 4H when Q = 0 + +:fcmlt Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" +is b_31=0 & b_30=0 & b_1029=0b00111011111000111010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcmlt(Rn_VPR64.4H, 0:2, 2:1); +} + +# C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x0ef8e800/mask=xbffffc00 +# CONSTRUCT x4ef8e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmlt/2@2 +# AUNIT --inst x4ef8e800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" +# Vector half precision variant SIMD 8H when Q = 1 + +:fcmlt Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" +is b_31=0 & b_30=1 & b_1029=0b00111011111000111010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcmlt(Rn_VPR128.8H, 0:2, 2:1); +} + +# C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 +# CONSTRUCT x1e602000/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 +# AUNIT --inst x1e602000/mask=xffe0fc1f --rand dfp --status nodest --comment "flags" + +:fcmp Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x0 +{ + fcomp(Rn_FPR64, Rm_FPR64); +} + +# C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 +# CONSTRUCT x1e602008/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 +# AUNIT --inst x1e602008/mask=xffe0fc1f --rand dfp --status nodest --comment "flags" + +:fcmp Rn_FPR64, Rm_fpz64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_fpz64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x8 +{ + fcomp(Rn_FPR64, Rm_fpz64); +} + +# C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 +# CONSTRUCT x1e202008/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 +# AUNIT --inst x1e202008/mask=xffe0fc1f --rand sfp --status nodest --comment "flags" + +:fcmp Rn_FPR32, Rm_fpz32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_fpz32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x8 +{ + fcomp(Rn_FPR32, Rm_fpz32); +} + +# C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 +# CONSTRUCT x1e202000/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 +# AUNIT --inst x1e202000/mask=xffe0fc1f --rand sfp --status nodest --comment "flags" + +:fcmp Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x0 +{ + fcomp(Rn_FPR32, Rm_FPR32); +} + +# C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 +# CONSTRUCT x1ee02008/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 +# AUNIT --inst x1ee02008/mask=xffe0fc1f --rand hfp --status nodest --comment "flags" + +:fcmp Rn_FPR16, Rm_fpz16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_fpz16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x8 +{ + fcomp(Rn_FPR16, Rm_fpz16); +} + +# C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 +# CONSTRUCT x1ee02000/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 +# AUNIT --inst x1ee02000/mask=xffe0fc1f --rand hfp --status nodest --comment "flags" + +:fcmp Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x0 +{ + fcomp(Rn_FPR16, Rm_FPR16); +} + +# C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 +# CONSTRUCT x1e602010/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 +# AUNIT --inst x1e602010/mask=xffe0fc1f --rand dfp --status nodest --comment "flags" + +:fcmpe Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x10 +{ + ftestNAN(Rn_FPR64, Rm_FPR64); + fcomp(Rn_FPR64, Rm_FPR64); +} + +# C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 +# CONSTRUCT x1e602018/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 +# AUNIT --inst x1e602018/mask=xffe0fc1f --rand dfp --status nodest --comment "flags" + +:fcmpe Rn_FPR64, Rm_fpz64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_fpz64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x18 +{ + ftestNAN(Rn_FPR64, Rm_fpz64); + fcomp(Rn_FPR64, Rm_fpz64); +} + +# C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 +# CONSTRUCT x1e202018/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 +# AUNIT --inst x1e202018/mask=xffe0fc1f --rand sfp --status nodest --comment "flags" + +:fcmpe Rn_FPR32, Rm_fpz32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_fpz32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x18 +{ + ftestNAN(Rn_FPR32, Rm_fpz32); + fcomp(Rn_FPR32, Rm_fpz32); +} + +# C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 +# CONSTRUCT x1e202010/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 +# AUNIT --inst x1e202010/mask=xffe0fc1f --rand sfp --status nodest --comment "flags" + +:fcmpe Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x10 +{ + ftestNAN(Rn_FPR32, Rm_FPR32); + fcomp(Rn_FPR32, Rm_FPR32); +} + +# C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 +# CONSTRUCT x1ee02018/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 +# AUNIT --inst x1ee02018/mask=xffe0fc1f --rand hfp --status nodest --comment "flags" + +:fcmpe Rn_FPR16, Rm_fpz16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_fpz16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x18 +{ + ftestNAN(Rn_FPR16, Rm_fpz16); + fcomp(Rn_FPR16, Rm_fpz16); +} + +# C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 +# CONSTRUCT x1ee02010/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES +# SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 +# SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 +# AUNIT --inst x1ee02010/mask=xffe0fc1f --rand hfp --status nodest --comment "flags" + +:fcmpe Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x10 +{ + ftestNAN(Rn_FPR16, Rm_FPR16); + fcomp(Rn_FPR16, Rm_FPR16); +} + +# C7.2.68 FCSEL page C7-1545 line 85895 MATCH x1e200c00/mask=xff200c00 +# CONSTRUCT x1e600c00/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 = dup ext swap ARG4:1 inst_next goto = +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4:1 =NEON_fcsel/3 +# AUNIT --inst x1e600c00/mask=xffe00c00 --rand dfp --status pass --comment "flags" +# Rm may be the same register as Rd, so it needs to be saved + +:fcsel Rd_FPR64, Rn_FPR64, Rm_FPR64, CondOp +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & b_1011=3 & Rn_FPR64 & Rd_FPR64 & Zd +{ + local tmp1:8 = Rm_FPR64; + Rd_FPR64 = Rn_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd + if (CondOp:1) goto inst_next; + Rd_FPR64 = tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.68 FCSEL page C7-1545 line 85895 MATCH x1e200c00/mask=xff200c00 +# CONSTRUCT x1e200c00/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 = dup ext swap ARG4:1 inst_next goto = +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4:1 =NEON_fcsel/3 +# AUNIT --inst x1e200c00/mask=xffe00c00 --rand sfp --status pass --comment "flags" + +:fcsel Rd_FPR32, Rn_FPR32, Rm_FPR32, CondOp +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & b_1011=3 & Rn_FPR32 & Rd_FPR32 & Zd +{ + local tmp1:4 = Rm_FPR32; + Rd_FPR32 = Rn_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd + if (CondOp:1) goto inst_next; + Rd_FPR32 = tmp1; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.68 FCSEL page C7-1545 line 85895 MATCH x1e200c00/mask=xff200c00 +# CONSTRUCT x1ee00c00/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 = dup ext swap ARG4:1 inst_next goto = +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4:1 =NEON_fcsel/3 +# AUNIT --inst x1ee00c00/mask=xffe00c00 --rand hfp --status noqemu --comment "flags" + +:fcsel Rd_FPR16, Rn_FPR16, Rm_FPR16, CondOp +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & b_1011=3 & Rn_FPR16 & Rd_FPR16 & Zd +{ + local tmp1:2 = Rm_FPR16; + Rd_FPR16 = Rn_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd + if (CondOp:1) goto inst_next; + Rd_FPR16 = tmp1; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 +# CONSTRUCT x1ee2c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 +# AUNIT --inst x1ee2c000/mask=xfffffc00 --rand hfp --status pass --comment "nofpround" + +:fcvt Rd_FPR64, Rn_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x5 & b_1014=0x10 & Rn_FPR16 & Rd_FPR64 & Zd +{ + Rd_FPR64 = float2float(Rn_FPR16); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 +# CONSTRUCT x1e22c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 +# AUNIT --inst x1e22c000/mask=xfffffc00 --rand sfp --status pass --comment "nofpround" + +:fcvt Rd_FPR64, Rn_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x5 & b_1014=0x10 & Rn_FPR32 & Rd_FPR64 & Zd +{ + Rd_FPR64 = float2float(Rn_FPR32); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 +# CONSTRUCT x1e63c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 +# AUNIT --inst x1e63c000/mask=xfffffc00 --rand hfp --status pass --comment "nofpround" + +:fcvt Rd_FPR16, Rn_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x7 & b_1014=0x10 & Rn_FPR64 & Rd_FPR16 & Zd +{ + Rd_FPR16 = float2float(Rn_FPR64); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 +# CONSTRUCT x1e23c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 +# AUNIT --inst x1e23c000/mask=xfffffc00 --rand hfp --status fail --comment "nofpround" + +:fcvt Rd_FPR16, Rn_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x7 & b_1014=0x10 & Rn_FPR32 & Rd_FPR16 & Zd +{ + Rd_FPR16 = float2float(Rn_FPR32); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 +# CONSTRUCT x1e624000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 +# AUNIT --inst x1e624000/mask=xfffffc00 --rand sfp --status fail --comment "nofpround" + +:fcvt Rd_FPR32, Rn_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x4 & b_1014=0x10 & Rn_FPR64 & Rd_FPR32 & Zd +{ + Rd_FPR32 = float2float(Rn_FPR64); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 +# CONSTRUCT x1ee24000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 +# AUNIT --inst x1ee24000/mask=xfffffc00 --rand hfp --status pass --comment "nofpround" + +:fcvt Rd_FPR32, Rn_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x4 & b_1014=0x10 & Rn_FPR16 & Rd_FPR32 & Zd +{ + Rd_FPR32 = float2float(Rn_FPR16); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.63 FCVTAS (vector) page C7-1136 line 65961 KEEPWITH + +fcvt_vmnemonic: "fcvtas" is b_29=0 & b_23=0 & b_1314=0b10 & b_12=0 { } +fcvt_vmnemonic: "fcvtau" is b_29=1 & b_23=0 & b_1314=0b10 & b_12=0 { } +fcvt_vmnemonic: "fcvtms" is b_29=0 & b_23=0 & b_1314=0b01 & b_12=1 { } +fcvt_vmnemonic: "fcvtmu" is b_29=1 & b_23=0 & b_1314=0b01 & b_12=1 { } +fcvt_vmnemonic: "fcvtns" is b_29=0 & b_23=0 & b_1314=0b01 & b_12=0 { } +fcvt_vmnemonic: "fcvtnu" is b_29=1 & b_23=0 & b_1314=0b01 & b_12=0 { } +fcvt_vmnemonic: "fcvtps" is b_29=0 & b_23=1 & b_1314=0b01 & b_12=0 { } +fcvt_vmnemonic: "fcvtpu" is b_29=1 & b_23=1 & b_1314=0b01 & b_12=0 { } +fcvt_vmnemonic: "fcvtzs" is b_29=0 & b_23=1 & b_1314=0b01 & b_12=1 { } +fcvt_vmnemonic: "fcvtzu" is b_29=1 & b_23=1 & b_1314=0b01 & b_12=1 { } + +fcvt_smnemonic: "fcvtas" is b_1920=0b00 & b_1618=0b100 { } +fcvt_smnemonic: "fcvtau" is b_1920=0b00 & b_1618=0b101 { } +fcvt_smnemonic: "fcvtms" is b_1920=0b10 & b_1618=0b000 { } +fcvt_smnemonic: "fcvtmu" is b_1920=0b10 & b_1618=0b001 { } +fcvt_smnemonic: "fcvtns" is b_1920=0b00 & b_1618=0b000 { } +fcvt_smnemonic: "fcvtnu" is b_1920=0b00 & b_1618=0b001 { } +fcvt_smnemonic: "fcvtps" is b_1920=0b01 & b_1618=0b000 { } +fcvt_smnemonic: "fcvtpu" is b_1920=0b01 & b_1618=0b001 { } +fcvt_smnemonic: "fcvtzs" is b_1920=0b11 & b_1618=0b000 { } +fcvt_smnemonic: "fcvtzu" is b_1920=0b11 & b_1618=0b001 { } + +# C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x5e79c800/mask=xfffffc00 +# C7.2.72 FCVTAU (vector) page C7-1554 line 86430 MATCH x7e79c800/mask=xfffffc00 +# CONSTRUCT x5e79c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x5e79c800/mask=xdffffc00 --rand hfp --status noqemu --comment "nofpround" +# Scalar half precision + +:^fcvt_vmnemonic Rd_FPR16, Rn_FPR16 +is b_3031=0b01 & b_1028=0b1111001111001110010 & fcvt_vmnemonic & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = trunc(Rn_FPR16); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x5e21c800/mask=xffbffc00 +# C7.2.72 FCVTAU (vector) page C7-1554 line 86430 MATCH x7e21c800/mask=xffbffc00 +# CONSTRUCT x5e21c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x5e21c800/mask=xdffffc00 --rand sfp --status fail --comment "nofpround" +# Scalar single-precision and double-precision variant sz=0 + +:^fcvt_vmnemonic Rd_FPR32, Rn_FPR32 +is b_3031=0b01 & b_2328=0b111100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = trunc(Rn_FPR32); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x5e21c800/mask=xffbffc00 +# C7.2.72 FCVTAU (vector) page C7-1554 line 86430 MATCH x7e21c800/mask=xffbffc00 +# CONSTRUCT x5e61c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x5e61c800/mask=xdffffc00 --rand dfp --status fail --comment "nofpround" +# Scalar single-precision and double-precision variant sz=1 + +:^fcvt_vmnemonic Rd_FPR64, Rn_FPR64 +is b_3031=0b01 & b_2328=0b111100 & b_22=1 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = trunc(Rn_FPR64); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x0e79c800/mask=xbffffc00 +# C7.2.72 FCVTAU (vector) page C7-1554 line 86430 MATCH x2e79c800/mask=xbffffc00 +# CONSTRUCT x0e79c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@2 +# AUNIT --inst x0e79c800/mask=xdffffc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant Q=0 + +:^fcvt_vmnemonic Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2328=0b011100 & b_1022=0b1111001110010 & fcvt_vmnemonic & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + # simd unary Rd_VPR64.4H = trunc(Rn_VPR64.4H) on lane size 2 + Rd_VPR64.4H[0,16] = trunc(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = trunc(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = trunc(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = trunc(Rn_VPR64.4H[48,16]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x0e79c800/mask=xbffffc00 +# C7.2.72 FCVTAU (vector) page C7-1554 line 86430 MATCH x2e79c800/mask=xbffffc00 +# CONSTRUCT x4e79c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@2 +# AUNIT --inst x4e79c800/mask=xdffffc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant Q=1 + +:^fcvt_vmnemonic Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2328=0b011100 & b_1022=0b1111001110010 & fcvt_vmnemonic & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + # simd unary Rd_VPR128.8H = trunc(Rn_VPR128.8H) on lane size 2 + Rd_VPR128.8H[0,16] = trunc(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = trunc(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = trunc(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = trunc(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = trunc(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = trunc(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = trunc(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = trunc(Rn_VPR128.8H[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x0e21c800/mask=xbfbffc00 +# C7.2.72 FCVTAU (vector) page C7-1554 line 86430 MATCH x2e21c800/mask=xbfbffc00 +# CONSTRUCT x0e21c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@4 +# AUNIT --inst x0e21c800/mask=xdffffc00 --rand sfp --status fail --comment "nofpround" +# Vector single-precision and double-precision variant SIMD 2S when sz = 0 , Q = 0 + +:^fcvt_vmnemonic Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2328=0b011100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + # simd unary Rd_VPR64.2S = trunc(Rn_VPR64.2S) on lane size 4 + Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x0e21c800/mask=xbfbffc00 +# C7.2.72 FCVTAU (vector) page C7-1554 line 86430 MATCH x2e21c800/mask=xbfbffc00 +# CONSTRUCT x4e21c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@4 +# AUNIT --inst x4e21c800/mask=xdffffc00 --rand sfp --status fail --comment "nofpround" +# Vector single-precision and double-precision variant SIMD 4S when sz = 0 , Q = 1 + +:^fcvt_vmnemonic Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2328=0b011100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + # simd unary Rd_VPR128.4S = trunc(Rn_VPR128.4S) on lane size 4 + Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x0e21c800/mask=xbfbffc00 +# C7.2.72 FCVTAU (vector) page C7-1554 line 86430 MATCH x2e21c800/mask=xbfbffc00 +# CONSTRUCT x4e61c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@8 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@8 +# AUNIT --inst x4e61c800/mask=xdffffc00 --rand dfp --status fail --comment "nofpround" +# Vector single-precision and double-precision variant SIMD 2D when sz = 1 , Q = 1 + +:^fcvt_vmnemonic Rd_VPR128.2D, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2328=0b011100 & b_22=1 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + # simd unary Rd_VPR128.2D = trunc(Rn_VPR128.2D) on lane size 8 + Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 +# C7.2.73 FCVTAU (scalar) page C7-1557 line 86615 MATCH x1e250000/mask=x7f3ffc00 +# CONSTRUCT x1ee40000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x1ee40000/mask=xfffefc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision to 32-bit variant when sf == 0 && type == 11 + +:^fcvt_smnemonic Rd_GPR32, Rn_FPR16 +is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR16 & Rd_GPR64 +{ + Rd_GPR32 = trunc(Rn_FPR16); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 +# C7.2.73 FCVTAU (scalar) page C7-1557 line 86615 MATCH x1e250000/mask=x7f3ffc00 +# CONSTRUCT x9ee40000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x9ee40000/mask=xfffefc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision to 64-bit variant when sf == 1 && type == 11 + +:^fcvt_smnemonic Rd_GPR64, Rn_FPR16 +is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR16 +{ + Rd_GPR64 = trunc(Rn_FPR16); +} + +# C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 +# C7.2.73 FCVTAU (scalar) page C7-1557 line 86615 MATCH x1e250000/mask=x7f3ffc00 +# CONSTRUCT x1e240000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x1e240000/mask=xfffefc00 --rand sfp --status fail --comment "nofpround" +# Single-precision to 32-bit variant when sf == 0 && type == 00 + +:^fcvt_smnemonic Rd_GPR32, Rn_FPR32 +is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR32 & Rd_GPR64 +{ + Rd_GPR32 = trunc(Rn_FPR32); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 +# C7.2.73 FCVTAU (scalar) page C7-1557 line 86615 MATCH x1e250000/mask=x7f3ffc00 +# CONSTRUCT x9e240000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x9e240000/mask=xfffefc00 --rand sfp --status fail --comment "nofpround" +# Single-precision to 64-bit variant when sf == 1 && type == 00 + +:^fcvt_smnemonic Rd_GPR64, Rn_FPR32 +is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR32 +{ + Rd_GPR64 = trunc(Rn_FPR32); +} + +# C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 +# C7.2.73 FCVTAU (scalar) page C7-1557 line 86615 MATCH x1e250000/mask=x7f3ffc00 +# CONSTRUCT x1e640000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x1e640000/mask=xfffefc00 --rand dfp --status fail --comment "nofpround" +# Double-precision to 32-bit variant when sf == 0 && type == 01 + +:^fcvt_smnemonic Rd_GPR32, Rn_FPR64 +is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 +{ + Rd_GPR32 = trunc(Rn_FPR64); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 +# C7.2.73 FCVTAU (scalar) page C7-1557 line 86615 MATCH x1e250000/mask=x7f3ffc00 +# CONSTRUCT x9e640000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x9e640000/mask=xfffefc00 --rand dfp --status fail --comment "nofpround" +# Double-precision to 64-bit variant sf == 1 && type == 01 + +:^fcvt_smnemonic Rd_GPR64, Rn_FPR64 +is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR64 +{ + Rd_GPR64 = trunc(Rn_FPR64); +} + +# C7.2.74 FCVTL, FCVTL2 page C7-1559 line 86735 MATCH x0e217800/mask=xbfbffc00 +# CONSTRUCT x0e617800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$float2float@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvtl/1@4 +# AUNIT --inst x0e617800/mask=xfffffc00 --rand dfp --status fail --comment "ext nofpround" + +:fcvtl Rd_VPR128.2D, Rn_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR64.2S; + # simd resize Rd_VPR128.2D = float2float(TMPD1) (lane size 4 to 8) + Rd_VPR128.2D[0,64] = float2float(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = float2float(TMPD1[32,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.74 FCVTL, FCVTL2 page C7-1559 line 86735 MATCH x0e217800/mask=xbfbffc00 +# CONSTRUCT x4e617800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 =$float2float@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvtl2/1@8 +# AUNIT --inst x4e617800/mask=xfffffc00 --rand dfp --status fail --comment "ext nofpround" + +:fcvtl2 Rd_VPR128.2D, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize Rd_VPR128.2D = float2float(TMPD1) (lane size 4 to 8) + Rd_VPR128.2D[0,64] = float2float(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = float2float(TMPD1[32,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.74 FCVTL, FCVTL2 page C7-1559 line 86735 MATCH x0e217800/mask=xbfbffc00 +# CONSTRUCT x0e217800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$float2float@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvtl/1@4 +# AUNIT --inst x0e217800/mask=xfffffc00 --rand sfp --status fail --comment "ext nofpround" + +:fcvtl Rd_VPR128.4S, Rn_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR64.4H; + # simd resize Rd_VPR128.4S = float2float(TMPD1) (lane size 2 to 4) + Rd_VPR128.4S[0,32] = float2float(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = float2float(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = float2float(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = float2float(TMPD1[48,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.74 FCVTL, FCVTL2 page C7-1559 line 86735 MATCH x0e217800/mask=xbfbffc00 +# CONSTRUCT x4e217800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 =$float2float@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvtl2/1@2 +# AUNIT --inst x4e217800/mask=xfffffc00 --rand sfp --status fail --comment "ext nofpround" + +:fcvtl2 Rd_VPR128.4S, Rn_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize Rd_VPR128.4S = float2float(TMPD1) (lane size 2 to 4) + Rd_VPR128.4S[0,32] = float2float(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = float2float(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = float2float(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = float2float(TMPD1[48,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x5e79b800/mask=xfffffc00 +# C7.2.77 FCVTMU (vector) page C7-1566 line 87133 MATCH x7e79b800/mask=xfffffc00 +# C7.2.80 FCVTNS (vector) page C7-1573 line 87534 MATCH x5e79a800/mask=xfffffc00 +# C7.2.82 FCVTNU (vector) page C7-1578 line 87842 MATCH x7e79a800/mask=xfffffc00 +# C7.2.84 FCVTPS (vector) page C7-1583 line 88150 MATCH x5ef9a800/mask=xfffffc00 +# C7.2.86 FCVTPU (vector) page C7-1588 line 88458 MATCH x7ef9a800/mask=xfffffc00 +# C7.2.90 FCVTZS (vector, integer) page C7-1598 line 89055 MATCH x5ef9b800/mask=xfffffc00 +# C7.2.94 FCVTZU (vector, integer) page C7-1608 line 89640 MATCH x7ef9b800/mask=xfffffc00 +# CONSTRUCT x5e79a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x5e79a800/mask=xdf7fec00 --rand hfp --status noqemu --comment "nofpround" +# Scalar half precision + +:^fcvt_vmnemonic Rd_FPR16, Rn_FPR16 +is b_3031=0b01 & b_2428=0b11110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = trunc(Rn_FPR16); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x5e21b800/mask=xffbffc00 +# C7.2.77 FCVTMU (vector) page C7-1566 line 87133 MATCH x7e21b800/mask=xffbffc00 +# C7.2.80 FCVTNS (vector) page C7-1573 line 87534 MATCH x5e21a800/mask=xffbffc00 +# C7.2.82 FCVTNU (vector) page C7-1578 line 87842 MATCH x7e21a800/mask=xffbffc00 +# C7.2.84 FCVTPS (vector) page C7-1583 line 88150 MATCH x5ea1a800/mask=xffbffc00 +# C7.2.86 FCVTPU (vector) page C7-1588 line 88458 MATCH x7ea1a800/mask=xffbffc00 +# C7.2.90 FCVTZS (vector, integer) page C7-1598 line 89055 MATCH x5ea1b800/mask=xffbffc00 +# C7.2.94 FCVTZU (vector, integer) page C7-1608 line 89640 MATCH x7ea1b800/mask=xffbffc00 +# CONSTRUCT x5e21a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x5e21a800/mask=xdf7fec00 --rand sfp --status fail --comment "nofpround" +# Scalar single-precision and double-precision variant sz=0 + +:^fcvt_vmnemonic Rd_FPR32, Rn_FPR32 +is b_3031=0b01 & b_2428=0b11110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = trunc(Rn_FPR32); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x5e21b800/mask=xffbffc00 +# C7.2.77 FCVTMU (vector) page C7-1566 line 87133 MATCH x7e21b800/mask=xffbffc00 +# C7.2.80 FCVTNS (vector) page C7-1573 line 87534 MATCH x5e21a800/mask=xffbffc00 +# C7.2.82 FCVTNU (vector) page C7-1578 line 87842 MATCH x7e21a800/mask=xffbffc00 +# C7.2.84 FCVTPS (vector) page C7-1583 line 88150 MATCH x5ea1a800/mask=xffbffc00 +# C7.2.86 FCVTPU (vector) page C7-1588 line 88458 MATCH x7ea1a800/mask=xffbffc00 +# C7.2.90 FCVTZS (vector, integer) page C7-1598 line 89055 MATCH x5ea1b800/mask=xffbffc00 +# C7.2.94 FCVTZU (vector, integer) page C7-1608 line 89640 MATCH x7ea1b800/mask=xffbffc00 +# CONSTRUCT x5e61a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x5e61a800/mask=xdf7fec00 --rand dfp --status fail --comment "nofpround" +# Scalar single-precision and double-precision variant sz=1 + +:^fcvt_vmnemonic Rd_FPR64, Rn_FPR64 +is b_3031=0b01 & b_2428=0b11110 & b_22=1 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = trunc(Rn_FPR64); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x0e79b800/mask=xbffffc00 +# C7.2.77 FCVTMU (vector) page C7-1566 line 87133 MATCH x2e79b800/mask=xbffffc00 +# C7.2.80 FCVTNS (vector) page C7-1573 line 87534 MATCH x0e79a800/mask=xbffffc00 +# C7.2.82 FCVTNU (vector) page C7-1578 line 87842 MATCH x2e79a800/mask=xbffffc00 +# C7.2.84 FCVTPS (vector) page C7-1583 line 88150 MATCH x0ef9a800/mask=xbffffc00 +# C7.2.86 FCVTPU (vector) page C7-1588 line 88458 MATCH x2ef9a800/mask=xbffffc00 +# C7.2.90 FCVTZS (vector, integer) page C7-1598 line 89055 MATCH x0ef9b800/mask=xbffffc00 +# C7.2.94 FCVTZU (vector, integer) page C7-1608 line 89640 MATCH x2ef9b800/mask=xbffffc00 +# CONSTRUCT x0e79a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@2 +# AUNIT --inst x0e79a800/mask=xdf7fec00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant Q=0 + +:^fcvt_vmnemonic Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2428=0b01110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + # simd unary Rd_VPR64.4H = trunc(Rn_VPR64.4H) on lane size 2 + Rd_VPR64.4H[0,16] = trunc(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = trunc(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = trunc(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = trunc(Rn_VPR64.4H[48,16]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x0e79b800/mask=xbffffc00 +# C7.2.77 FCVTMU (vector) page C7-1566 line 87133 MATCH x2e79b800/mask=xbffffc00 +# C7.2.80 FCVTNS (vector) page C7-1573 line 87534 MATCH x0e79a800/mask=xbffffc00 +# C7.2.82 FCVTNU (vector) page C7-1578 line 87842 MATCH x2e79a800/mask=xbffffc00 +# C7.2.84 FCVTPS (vector) page C7-1583 line 88150 MATCH x0ef9a800/mask=xbffffc00 +# C7.2.86 FCVTPU (vector) page C7-1588 line 88458 MATCH x2ef9a800/mask=xbffffc00 +# C7.2.90 FCVTZS (vector, integer) page C7-1598 line 89055 MATCH x0ef9b800/mask=xbffffc00 +# C7.2.94 FCVTZU (vector, integer) page C7-1608 line 89640 MATCH x2ef9b800/mask=xbffffc00 +# CONSTRUCT x4e79a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@2 +# AUNIT --inst x4e79a800/mask=xdf7fec00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant Q=1 + +:^fcvt_vmnemonic Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2428=0b01110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + # simd unary Rd_VPR128.8H = trunc(Rn_VPR128.8H) on lane size 2 + Rd_VPR128.8H[0,16] = trunc(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = trunc(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = trunc(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = trunc(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = trunc(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = trunc(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = trunc(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = trunc(Rn_VPR128.8H[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x0e21b800/mask=xbfbffc00 +# C7.2.77 FCVTMU (vector) page C7-1566 line 87133 MATCH x2e21b800/mask=xbfbffc00 +# C7.2.80 FCVTNS (vector) page C7-1573 line 87534 MATCH x0e21a800/mask=xbfbffc00 +# C7.2.82 FCVTNU (vector) page C7-1578 line 87842 MATCH x2e21a800/mask=xbfbffc00 +# C7.2.84 FCVTPS (vector) page C7-1583 line 88150 MATCH x0ea1a800/mask=xbfbffc00 +# C7.2.86 FCVTPU (vector) page C7-1588 line 88458 MATCH x2ea1a800/mask=xbfbffc00 +# C7.2.90 FCVTZS (vector, integer) page C7-1598 line 89055 MATCH x0ea1b800/mask=xbfbffc00 +# C7.2.94 FCVTZU (vector, integer) page C7-1608 line 89640 MATCH x2ea1b800/mask=xbfbffc00 +# CONSTRUCT x0e21a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@4 +# AUNIT --inst x0e21a800/mask=xdf7fec00 --rand sfp --status fail --comment "nofpround" +# Vector single-precision and double-precision variant SIMD 2S when sz = 0 , Q = 0 + +:^fcvt_vmnemonic Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2428=0b01110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + # simd unary Rd_VPR64.2S = trunc(Rn_VPR64.2S) on lane size 4 + Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x0e21b800/mask=xbfbffc00 +# C7.2.77 FCVTMU (vector) page C7-1566 line 87133 MATCH x2e21b800/mask=xbfbffc00 +# C7.2.80 FCVTNS (vector) page C7-1573 line 87534 MATCH x0e21a800/mask=xbfbffc00 +# C7.2.82 FCVTNU (vector) page C7-1578 line 87842 MATCH x2e21a800/mask=xbfbffc00 +# C7.2.84 FCVTPS (vector) page C7-1583 line 88150 MATCH x0ea1a800/mask=xbfbffc00 +# C7.2.86 FCVTPU (vector) page C7-1588 line 88458 MATCH x2ea1a800/mask=xbfbffc00 +# C7.2.90 FCVTZS (vector, integer) page C7-1598 line 89055 MATCH x0ea1b800/mask=xbfbffc00 +# C7.2.94 FCVTZU (vector, integer) page C7-1608 line 89640 MATCH x2ea1b800/mask=xbfbffc00 +# CONSTRUCT x4e21a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@4 +# AUNIT --inst x4e21a800/mask=xdf7fec00 --rand sfp --status fail --comment "nofpround" +# Vector single-precision and double-precision variant SIMD 4S when sz = 0 , Q = 1 + +:^fcvt_vmnemonic Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2428=0b01110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + # simd unary Rd_VPR128.4S = trunc(Rn_VPR128.4S) on lane size 4 + Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x0e21b800/mask=xbfbffc00 +# C7.2.77 FCVTMU (vector) page C7-1566 line 87133 MATCH x2e21b800/mask=xbfbffc00 +# C7.2.80 FCVTNS (vector) page C7-1573 line 87534 MATCH x0e21a800/mask=xbfbffc00 +# C7.2.82 FCVTNU (vector) page C7-1578 line 87842 MATCH x2e21a800/mask=xbfbffc00 +# C7.2.84 FCVTPS (vector) page C7-1583 line 88150 MATCH x0ea1a800/mask=xbfbffc00 +# C7.2.86 FCVTPU (vector) page C7-1588 line 88458 MATCH x2ea1a800/mask=xbfbffc00 +# C7.2.90 FCVTZS (vector, integer) page C7-1598 line 89055 MATCH x0ea1b800/mask=xbfbffc00 +# C7.2.94 FCVTZU (vector, integer) page C7-1608 line 89640 MATCH x2ea1b800/mask=xbfbffc00 +# CONSTRUCT x4e61a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@8 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@8 +# AUNIT --inst x4e61a800/mask=xdf7fec00 --rand dfp --status fail --comment "nofpround" +# Vector single-precision and double-precision variant SIMD 2D when sz = 1 , Q = 1 + +:^fcvt_vmnemonic Rd_VPR128.2D, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2428=0b01110 & b_22=1 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + # simd unary Rd_VPR128.2D = trunc(Rn_VPR128.2D) on lane size 8 + Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 +# C7.2.78 FCVTMU (scalar) page C7-1569 line 87318 MATCH x1e310000/mask=x7f3ffc00 +# C7.2.81 FCVTNS (scalar) page C7-1576 line 87719 MATCH x1e200000/mask=x7f3ffc00 +# C7.2.83 FCVTNU (scalar) page C7-1581 line 88027 MATCH x1e210000/mask=x7f3ffc00 +# C7.2.85 FCVTPS (scalar) page C7-1586 line 88335 MATCH x1e280000/mask=x7f3ffc00 +# C7.2.87 FCVTPU (scalar) page C7-1591 line 88643 MATCH x1e290000/mask=x7f3ffc00 +# C7.2.92 FCVTZS (scalar, integer) page C7-1603 line 89367 MATCH x1e380000/mask=x7f3ffc00 +# C7.2.96 FCVTZU (scalar, integer) page C7-1613 line 89952 MATCH x1e390000/mask=x7f3ffc00 +# CONSTRUCT x1ee00000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x1ee00000/mask=xffe6fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision to 32-bit variant when sf == 0 && type == 11 + +:^fcvt_smnemonic Rd_GPR32, Rn_FPR16 +is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR16 & Rd_GPR64 +{ + Rd_GPR32 = trunc(Rn_FPR16); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 +# C7.2.78 FCVTMU (scalar) page C7-1569 line 87318 MATCH x1e310000/mask=x7f3ffc00 +# C7.2.81 FCVTNS (scalar) page C7-1576 line 87719 MATCH x1e200000/mask=x7f3ffc00 +# C7.2.83 FCVTNU (scalar) page C7-1581 line 88027 MATCH x1e210000/mask=x7f3ffc00 +# C7.2.85 FCVTPS (scalar) page C7-1586 line 88335 MATCH x1e280000/mask=x7f3ffc00 +# C7.2.87 FCVTPU (scalar) page C7-1591 line 88643 MATCH x1e290000/mask=x7f3ffc00 +# C7.2.92 FCVTZS (scalar, integer) page C7-1603 line 89367 MATCH x1e380000/mask=x7f3ffc00 +# C7.2.96 FCVTZU (scalar, integer) page C7-1613 line 89952 MATCH x1e390000/mask=x7f3ffc00 +# CONSTRUCT x9ee00000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x9ee00000/mask=xffe6fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision to 64-bit variant when sf == 1 && type == 11 + +:^fcvt_smnemonic Rd_GPR64, Rn_FPR16 +is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR16 +{ + Rd_GPR64 = trunc(Rn_FPR16); +} + +# C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 +# C7.2.78 FCVTMU (scalar) page C7-1569 line 87318 MATCH x1e310000/mask=x7f3ffc00 +# C7.2.81 FCVTNS (scalar) page C7-1576 line 87719 MATCH x1e200000/mask=x7f3ffc00 +# C7.2.83 FCVTNU (scalar) page C7-1581 line 88027 MATCH x1e210000/mask=x7f3ffc00 +# C7.2.85 FCVTPS (scalar) page C7-1586 line 88335 MATCH x1e280000/mask=x7f3ffc00 +# C7.2.87 FCVTPU (scalar) page C7-1591 line 88643 MATCH x1e290000/mask=x7f3ffc00 +# C7.2.92 FCVTZS (scalar, integer) page C7-1603 line 89367 MATCH x1e380000/mask=x7f3ffc00 +# C7.2.96 FCVTZU (scalar, integer) page C7-1613 line 89952 MATCH x1e390000/mask=x7f3ffc00 +# CONSTRUCT x1e200000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x1e200000/mask=xffe6fc00 --rand sfp --status fail --comment "nofpround" +# Single-precision to 32-bit variant when sf == 0 && type == 00 + +:^fcvt_smnemonic Rd_GPR32, Rn_FPR32 +is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR32 & Rd_GPR64 +{ + Rd_GPR32 = trunc(Rn_FPR32); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 +# C7.2.78 FCVTMU (scalar) page C7-1569 line 87318 MATCH x1e310000/mask=x7f3ffc00 +# C7.2.81 FCVTNS (scalar) page C7-1576 line 87719 MATCH x1e200000/mask=x7f3ffc00 +# C7.2.83 FCVTNU (scalar) page C7-1581 line 88027 MATCH x1e210000/mask=x7f3ffc00 +# C7.2.85 FCVTPS (scalar) page C7-1586 line 88335 MATCH x1e280000/mask=x7f3ffc00 +# C7.2.87 FCVTPU (scalar) page C7-1591 line 88643 MATCH x1e290000/mask=x7f3ffc00 +# C7.2.92 FCVTZS (scalar, integer) page C7-1603 line 89367 MATCH x1e380000/mask=x7f3ffc00 +# C7.2.96 FCVTZU (scalar, integer) page C7-1613 line 89952 MATCH x1e390000/mask=x7f3ffc00 +# CONSTRUCT x9e200000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x9e200000/mask=xffe6fc00 --rand sfp --status fail --comment "nofpround" +# Single-precision to 64-bit variant when sf == 1 && type == 00 + +:^fcvt_smnemonic Rd_GPR64, Rn_FPR32 +is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR32 +{ + Rd_GPR64 = trunc(Rn_FPR32); +} + +# C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 +# C7.2.78 FCVTMU (scalar) page C7-1569 line 87318 MATCH x1e310000/mask=x7f3ffc00 +# C7.2.81 FCVTNS (scalar) page C7-1576 line 87719 MATCH x1e200000/mask=x7f3ffc00 +# C7.2.83 FCVTNU (scalar) page C7-1581 line 88027 MATCH x1e210000/mask=x7f3ffc00 +# C7.2.85 FCVTPS (scalar) page C7-1586 line 88335 MATCH x1e280000/mask=x7f3ffc00 +# C7.2.87 FCVTPU (scalar) page C7-1591 line 88643 MATCH x1e290000/mask=x7f3ffc00 +# C7.2.92 FCVTZS (scalar, integer) page C7-1603 line 89367 MATCH x1e380000/mask=x7f3ffc00 +# C7.2.96 FCVTZU (scalar, integer) page C7-1613 line 89952 MATCH x1e390000/mask=x7f3ffc00 +# CONSTRUCT x1e600000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x1e600000/mask=xffe6fc00 --rand dfp --status fail --comment "nofpround" +# Double-precision to 32-bit variant when sf == 0 && type == 01 + +:^fcvt_smnemonic Rd_GPR32, Rn_FPR64 +is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 +{ + Rd_GPR32 = trunc(Rn_FPR64); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 +# C7.2.78 FCVTMU (scalar) page C7-1569 line 87318 MATCH x1e310000/mask=x7f3ffc00 +# C7.2.81 FCVTNS (scalar) page C7-1576 line 87719 MATCH x1e200000/mask=x7f3ffc00 +# C7.2.83 FCVTNU (scalar) page C7-1581 line 88027 MATCH x1e210000/mask=x7f3ffc00 +# C7.2.85 FCVTPS (scalar) page C7-1586 line 88335 MATCH x1e280000/mask=x7f3ffc00 +# C7.2.87 FCVTPU (scalar) page C7-1591 line 88643 MATCH x1e290000/mask=x7f3ffc00 +# C7.2.92 FCVTZS (scalar, integer) page C7-1603 line 89367 MATCH x1e380000/mask=x7f3ffc00 +# C7.2.96 FCVTZU (scalar, integer) page C7-1613 line 89952 MATCH x1e390000/mask=x7f3ffc00 +# CONSTRUCT x9e600000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 +# AUNIT --inst x9e600000/mask=xffe6fc00 --rand dfp --status fail --comment "nofpround" +# Double-precision to 64-bit variant sf == 1 && type == 01 + +:^fcvt_smnemonic Rd_GPR64, Rn_FPR64 +is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR64 +{ + Rd_GPR64 = trunc(Rn_FPR64); +} + +# C7.2.79 FCVTN, FCVTN2 page C7-1571 line 87441 MATCH x0e216800/mask=xbfbffc00 +# CONSTRUCT x0e616800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$float2float@8:8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn/2@8 +# AUNIT --inst x0e616800/mask=xfffffc00 --rand sfp --status fail --comment "ext nofpround" + +:fcvtn Rd_VPR64.2S, Rn_VPR128.2D +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.2D & Rd_VPR64.2S & Rd_VPR128 & Zd +{ + TMPQ1 = Rn_VPR128.2D; + # simd resize Rd_VPR64.2S = float2float(TMPQ1) (lane size 8 to 4) + Rd_VPR64.2S[0,32] = float2float(TMPQ1[0,64]); + Rd_VPR64.2S[32,32] = float2float(TMPQ1[64,64]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.79 FCVTN, FCVTN2 page C7-1571 line 87441 MATCH x0e216800/mask=xbfbffc00 +# CONSTRUCT x4e616800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $float2float@8:8 1:1 &=$copy +# SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn2/2@8 +# AUNIT --inst x4e616800/mask=xfffffc00 --rand sfp --status pass --comment "ext nofpround" + +:fcvtn2 Rd_VPR128.4S, Rn_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + # simd resize TMPD1 = float2float(Rn_VPR128.2D) (lane size 8 to 4) + TMPD1[0,32] = float2float(Rn_VPR128.2D[0,64]); + TMPD1[32,32] = float2float(Rn_VPR128.2D[64,64]); + # simd copy Rd_VPR128.4S element 1:1 = TMPD1 (lane size 8) + Rd_VPR128.4S[64,64] = TMPD1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.79 FCVTN, FCVTN2 page C7-1571 line 87441 MATCH x0e216800/mask=xbfbffc00 +# CONSTRUCT x0e216800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$float2float@4:8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn/2@4 +# AUNIT --inst x0e216800/mask=xfffffc00 --rand hfp --status fail --comment "ext nofpround" + +:fcvtn Rd_VPR64.4H, Rn_VPR128.4S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.4S & Rd_VPR64.4H & Rd_VPR128 & Zd +{ + TMPQ1 = Rn_VPR128.4S; + # simd resize Rd_VPR64.4H = float2float(TMPQ1) (lane size 4 to 2) + Rd_VPR64.4H[0,16] = float2float(TMPQ1[0,32]); + Rd_VPR64.4H[16,16] = float2float(TMPQ1[32,32]); + Rd_VPR64.4H[32,16] = float2float(TMPQ1[64,32]); + Rd_VPR64.4H[48,16] = float2float(TMPQ1[96,32]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.79 FCVTN, FCVTN2 page C7-1571 line 87441 MATCH x0e216800/mask=xbfbffc00 +# CONSTRUCT x4e216800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $float2float@4:8 1:1 &=$copy +# SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn2/2@4 +# AUNIT --inst x4e216800/mask=xfffffc00 --rand hfp --status fail --comment "ext nofpround" + +:fcvtn2 Rd_VPR128.8H, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + # simd resize TMPD1 = float2float(Rn_VPR128.4S) (lane size 4 to 2) + TMPD1[0,16] = float2float(Rn_VPR128.4S[0,32]); + TMPD1[16,16] = float2float(Rn_VPR128.4S[32,32]); + TMPD1[32,16] = float2float(Rn_VPR128.4S[64,32]); + TMPD1[48,16] = float2float(Rn_VPR128.4S[96,32]); + # simd copy Rd_VPR128.8H element 1:1 = TMPD1 (lane size 8) + Rd_VPR128.8H[64,64] = TMPD1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.88 FCVTXN, FCVTXN2 page C7-1593 line 88766 MATCH x7e216800/mask=xffbffc00 +# CONSTRUCT x7e616800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float +# SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtxn/2 +# AUNIT --inst x7e616800/mask=xfffffc00 --rand sfp --status fail --comment "nofpround" + +:fcvtxn Rd_FPR32, Rn_FPR64 +is b_2331=0b011111100 & b_22=1 & b_1021=0b100001011010 & Rd_FPR32 & Rn_FPR64 & Zd +{ + Rd_FPR32 = float2float(Rn_FPR64); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.88 FCVTXN, FCVTXN2 page C7-1593 line 88766 MATCH x2e216800/mask=xbfbffc00 +# CONSTRUCT x2e616800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$float2float@8:8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtxn/2@8 +# AUNIT --inst x2e616800/mask=xfffffc00 --rand sfp --status fail --comment "ext nofpround" +# Vector Variant + +:fcvtxn Rd_VPR64.2S, Rn_VPR128.2D +is b_31=0 & b_30=0 & b_2329=0b1011100 & b_22=1 & b_1021=0b100001011010 & Rd_VPR64.2S & Rd_VPR128 & Rn_VPR128.2D & Zd +{ + TMPQ1 = Rn_VPR128.2D; + # simd resize Rd_VPR64.2S = float2float(TMPQ1) (lane size 8 to 4) + Rd_VPR64.2S[0,32] = float2float(TMPQ1[0,64]); + Rd_VPR64.2S[32,32] = float2float(TMPQ1[64,64]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.88 FCVTXN, FCVTXN2 page C7-1593 line 88766 MATCH x2e216800/mask=xbfbffc00 +# CONSTRUCT x6e616800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $float2float@8:8 1:1 &=$copy +# SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtxn2/2@8 +# AUNIT --inst x6e616800/mask=xfffffc00 --rand sfp --status fail --comment "ext nofpround" +# Vector Variant + +:fcvtxn2 Rd_VPR128.4S, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2329=0b1011100 & b_22=1 & b_1021=0b100001011010 & Rd_VPR128.4S & Rn_VPR128.2D & Rd_VPR128 & Zd +{ + # simd resize TMPD1 = float2float(Rn_VPR128.2D) (lane size 8 to 4) + TMPD1[0,32] = float2float(Rn_VPR128.2D[0,64]); + TMPD1[32,32] = float2float(Rn_VPR128.2D[64,64]); + # simd copy Rd_VPR128.4S element 1:1 = TMPD1 (lane size 8) + Rd_VPR128.4S[64,64] = TMPD1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x5f00fc00/mask=xff80fc00 +# CONSTRUCT x5f40fc00/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 zext:8 =NEON_fcvtzs/2 +# AUNIT --inst x5f40fc00/mask=xffc0fc00 --rand dfp --status nopcodeop --comment "nofpround" +# Scalar variant when immh=1xxx + +:fcvtzs Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b010111110 & b_22=1 & b_1015=0b111111 & Imm_shr_imm64 & Rn_FPR64 & Rd_FPR64 & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + Rd_FPR64 = NEON_fcvtzs(Rn_FPR64, tmp1); +} + +# C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x5f00fc00/mask=xff80fc00 +# CONSTRUCT x5f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_fcvtzs/2 +# AUNIT --inst x5f20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" +# Scalar variant when immh=01xx + +:fcvtzs Rd_FPR32, Rn_FPR32, Imm_shr_imm32 +is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fcvtzs(Rn_FPR32, Imm_shr_imm32:4); +} + +# C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x5f00fc00/mask=xff80fc00 +# CONSTRUCT x5f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzs/2 +# AUNIT --inst x5f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" +# Scalar variant when immh=001x + +:fcvtzs Rd_FPR16, Rn_FPR16, Imm_shr_imm16 +is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b111111 & Imm_shr_imm16 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_fcvtzs(Rn_FPR16, Imm_shr_imm16); +} + +# C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 +# CONSTRUCT x4f40fc00/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 zext:8 =NEON_fcvtzs/2@8 +# AUNIT --inst x4f40fc00/mask=xffc0fc00 --rand dfp --status nopcodeop --comment "nofpround" +# Vector 2D variant when immh=1xxx Q=1 bb=b_22 cc=1 V=VPR128.2D imm=Imm_shr_imm64 + +:fcvtzs Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_31=0 & b_30=1 & b_2329=0b0011110 & b_22=1 & b_1015=0b111111 & Rd_VPR128.2D & Rn_VPR128.2D & Imm_shr_imm64 & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + Rd_VPR128.2D = NEON_fcvtzs(Rn_VPR128.2D, tmp1, 8:1); +} + +# C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 +# CONSTRUCT x0f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_fcvtzs/2@4 +# AUNIT --inst x0f20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" +# Vector 2S variant when immh=01xx Q=0 bb=b_2122 cc=0b01 V=VPR64.2S imm=Imm_shr_imm32 + +:fcvtzs Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_31=0 & b_30=0 & b_2329=0b0011110 & b_2122=0b01 & b_1015=0b111111 & Rd_VPR64.2S & Rn_VPR64.2S & Imm_shr_imm32 & Zd +{ + Rd_VPR64.2S = NEON_fcvtzs(Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); +} + +# C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 +# CONSTRUCT x4f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_fcvtzs/2@4 +# AUNIT --inst x4f20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" +# Vector 4S variant when immh=01xx Q=1 bb=b_2122 cc=0b01 V=VPR128.4S imm=Imm_shr_imm32 + +:fcvtzs Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_31=0 & b_30=1 & b_2329=0b0011110 & b_2122=0b01 & b_1015=0b111111 & Rd_VPR128.4S & Rn_VPR128.4S & Imm_shr_imm32 & Zd +{ + Rd_VPR128.4S = NEON_fcvtzs(Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); +} + +# C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 +# CONSTRUCT x0f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzs/2@2 +# AUNIT --inst x0f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" +# Vector 4H variant when immh=001x Q=0 bb=b_2022 cc=0b001 V=VPR64.4H imm=Imm_shr_imm16 + +:fcvtzs Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_31=0 & b_30=0 & b_2329=0b0011110 & b_2022=0b001 & b_1015=0b111111 & Rd_VPR64.4H & Rn_VPR64.4H & Imm_shr_imm16 & Zd +{ + Rd_VPR64.4H = NEON_fcvtzs(Rn_VPR64.4H, Imm_shr_imm16, 2:1); +} + +# C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 +# CONSTRUCT x4f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzs/2@2 +# AUNIT --inst x4f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" +# Vector 8H variant when immh=001x Q=1 bb=b_2022 cc=0b001 V=VPR128.8H imm=Imm_shr_imm16 + +:fcvtzs Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_31=0 & b_30=1 & b_2329=0b0011110 & b_2022=0b001 & b_1015=0b111111 & Rd_VPR128.8H & Rn_VPR128.8H & Imm_shr_imm16 & Zd +{ + Rd_VPR128.8H = NEON_fcvtzs(Rn_VPR128.8H, Imm_shr_imm16, 2:1); +} + +# C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 +# CONSTRUCT x1ed88000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 FBits16 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 FBits16 =NEON_fcvtzs/2 +# AUNIT --inst x1ed88000/mask=xffff8000 --rand hfp --status noqemu --comment "nofpround" +# if sf == '0' && scale<5> == '0' then UnallocatedEncoding(); +# Half-precision to 32-bit variant when sf == 0 && type == 11 G=GPR32 V=FPR16 size=2 fbits=FBits16 + +:fcvtzs Rd_GPR32, Rn_FPR16, FBitsOp +is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_1621=0b011000 & b_15=1 & Rd_GPR32 & Rn_FPR16 & FBitsOp & FBits16 & Rd_GPR64 +{ + local tmp1:2 = Rn_FPR16 f* FBits16; + Rd_GPR32 = trunc(tmp1); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 +# CONSTRUCT x9ed80000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzs/2 +# AUNIT --inst x9ed80000/mask=xffff0000 --rand hfp --status noqemu --comment "nofpround" +# Half-precision to 64-bit variant when sf == 1 && type == 11 G=GPR64 V=FPR16 size=2 fbits=FBits16 + +:fcvtzs Rd_GPR64, Rn_FPR16, FBitsOp +is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR16 & FBitsOp & FBits16 +{ + local tmp1:2 = Rn_FPR16 f* FBitsOp; + Rd_GPR64 = trunc(tmp1); +} + +# C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 +# CONSTRUCT x1e188000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 FBits32 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_fcvtzs/2 +# AUNIT --inst x1e188000/mask=xffff8000 --rand sfp --status fail --comment "nofpround" +# Single-precision to 32-bit variant when sf == 0 && type == 00 G=GPR32 V=FPR32 size=4 fbits=FBits32 + +:fcvtzs Rd_GPR32, Rn_FPR32, FBitsOp +is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_1621=0b011000 & b_15=1 & Rd_GPR32 & Rn_FPR32 & FBitsOp & FBits32 & Rd_GPR64 +{ + local tmp1:4 = Rn_FPR32 f* FBits32; + Rd_GPR32 = trunc(tmp1); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 +# CONSTRUCT x9e180000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 FBits32 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_fcvtzs/2 +# AUNIT --inst x9e180000/mask=xffff0000 --rand sfp --status pass --comment "nofpround" +# Single-precision to 64-bit variant when sf == 1 && type == 00 G=GPR64 V=FPR32 size=4 fbits=FBits32 + +:fcvtzs Rd_GPR64, Rn_FPR32, FBitsOp +is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR32 & FBitsOp & FBits32 +{ + local tmp1:4 = Rn_FPR32 f* FBits32; + Rd_GPR64 = trunc(tmp1); +} + +# C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 +# CONSTRUCT x1e588000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 FBits64 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_fcvtzs/2 +# AUNIT --inst x1e588000/mask=xffff8000 --rand dfp --status fail --comment "nofpround" +# Double-precision to 32-bit variant when sf == 0 && type == 01 G=GPR32 V=FPR64 size=8 fbits=FBits64 + +:fcvtzs Rd_GPR32, Rn_FPR64, FBitsOp +is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_1621=0b011000 & b_15=1 & Rd_GPR32 & Rn_FPR64 & FBitsOp & FBits64 & Rd_GPR64 +{ + local tmp1:8 = Rn_FPR64 f* FBits64; + Rd_GPR32 = trunc(tmp1); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 +# CONSTRUCT x9e580000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 FBits64 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_fcvtzs/2 +# AUNIT --inst x9e580000/mask=xffff0000 --rand dfp --status pass --comment "nofpround" +# Double-precision to 64-bit variant when sf == 1 && type == 01 G=GPR64 V=FPR64 size=8 fbits=FBits64 + +:fcvtzs Rd_GPR64, Rn_FPR64, FBitsOp +is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR64 & FBitsOp & FBits64 +{ + local tmp1:8 = Rn_FPR64 f* FBits64; + Rd_GPR64 = trunc(tmp1); +} + +# C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x2f00fc00/mask=xbf80fc00 +# CONSTRUCT x6f40fc00/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 zext:8 =NEON_fcvtzu/2@8 +# AUNIT --inst x6f40fc00/mask=xffc0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fcvtzu Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1f & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + Rd_VPR128.2D = NEON_fcvtzu(Rn_VPR128.2D, tmp1, 8:1); +} + +# C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x2f00fc00/mask=xbf80fc00 +# CONSTRUCT x2f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_fcvtzu/2@4 +# AUNIT --inst x2f20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fcvtzu Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1f & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fcvtzu(Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); +} + +# C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x2f00fc00/mask=xbf80fc00 +# CONSTRUCT x6f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_fcvtzu/2@4 +# AUNIT --inst x6f20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fcvtzu Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1f & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fcvtzu(Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); +} + +# C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x2f00fc00/mask=xbf80fc00 +# CONSTRUCT x2f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2@2 +# AUNIT --inst x2f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fcvtzu Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm16 & b_1115=0x1f & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fcvtzu(Rn_VPR64.4H, Imm_shr_imm16, 2:1); +} + +# C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x2f00fc00/mask=xbf80fc00 +# CONSTRUCT x6f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2@2 +# AUNIT --inst x6f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fcvtzu Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm16 & b_1115=0x1f & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fcvtzu(Rn_VPR128.8H, Imm_shr_imm16, 2:1); +} + +# C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x7f00fc00/mask=xff80fc00 +# CONSTRUCT x7f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 1:2 ARG3 << int2float:2 f* fabs =trunc +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 +# AUNIT --inst x7f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" +# FCVTZU (vector, fixed-point) Scalar immh=001x + +:fcvtzu Rd_FPR16, Rn_FPR16, Imm_shr_imm32 +is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR16 & Rd_FPR16 & Zd +{ + local tmp1:2 = 1:2 << Imm_shr_imm32; + local tmp2:2 = int2float(tmp1); + local tmp3:2 = Rn_FPR16 f* tmp2; + local tmp4:2 = abs(tmp3); + Rd_FPR16 = trunc(tmp4); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x7f00fc00/mask=xff80fc00 +# CONSTRUCT x7f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 1:4 ARG3:4 << int2float:4 f* fabs =trunc +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 +# AUNIT --inst x7f20fc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" +# FCVTZU (vector, fixed-point) Scalar immh=01xx + +:fcvtzu Rd_FPR32, Rn_FPR32, Imm_shr_imm32 +is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR32 & Rd_FPR32 & Zd +{ + local tmp1:4 = 1:4 << Imm_shr_imm32:4; + local tmp2:4 = int2float(tmp1); + local tmp3:4 = Rn_FPR32 f* tmp2; + local tmp4:4 = abs(tmp3); + Rd_FPR32 = trunc(tmp4); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x7f00fc00/mask=xff80fc00 +# CONSTRUCT x7f40fc00/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 1:8 ARG3 zext:8 << int2float:8 f* fabs =trunc +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 +# AUNIT --inst x7f40fc00/mask=xffc0fc00 --rand dfp --status fail --comment "nofpround" +# FCVTZU (vector, fixed-point) Scalar immh=1xxx + +:fcvtzu Rd_FPR64, Rn_FPR64, Imm_shr_imm32 +is b_2331=0b011111110 & b_22=1 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR64 & Rd_FPR64 & Zd +{ + local tmp1:8 = zext(Imm_shr_imm32); + local tmp2:8 = 1:8 << tmp1; + local tmp3:8 = int2float(tmp2); + local tmp4:8 = Rn_FPR64 f* tmp3; + local tmp5:8 = abs(tmp4); + Rd_FPR64 = trunc(tmp5); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 +# CONSTRUCT x1ed98000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 +# AUNIT --inst x1ed98000/mask=xffff8000 --rand hfp --status noqemu --comment "nofpround" + +:fcvtzu Rd_GPR32, Rn_FPR16, FBitsOp +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=3 & fpOpcode=1 & b_15=1 & FBitsOp & FBits16 & Rn_FPR16 & Rd_GPR32 & Rd_GPR64 +{ + local tmp1:2 = Rn_FPR16 f* FBitsOp; + Rd_GPR32 = trunc(tmp1); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 +# CONSTRUCT x9ed90000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 +# AUNIT --inst x9ed90000/mask=xffff0000 --rand hfp --status noqemu --comment "nofpround" + +:fcvtzu Rd_GPR64, Rn_FPR16, FBitsOp +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=3 & fpOpcode=1 & FBitsOp & FBits16 & Rn_FPR16 & Rd_GPR64 +{ + local tmp1:2 = Rn_FPR16 f* FBitsOp; + Rd_GPR64 = trunc(tmp1); +} + +# C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 +# CONSTRUCT x1e598000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 FBits64 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 +# AUNIT --inst x1e598000/mask=xffff8000 --rand dfp --status fail --comment "nofpround" + +:fcvtzu Rd_GPR32, Rn_FPR64, FBitsOp +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=3 & fpOpcode=1 & b_15=1 & FBitsOp & FBits64 & Rn_FPR64 & Rd_GPR32 & Rd_GPR64 +{ + local tmp1:8 = Rn_FPR64 f* FBits64; + Rd_GPR32 = trunc(tmp1); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 +# CONSTRUCT x1e198000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 FBits32 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_fcvtzu/2 +# AUNIT --inst x1e198000/mask=xffff8000 --rand sfp --status fail --comment "nofpround" + +:fcvtzu Rd_GPR32, Rn_FPR32, FBitsOp +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=3 & fpOpcode=1 & b_15=1 & FBitsOp & FBits32 & Rn_FPR32 & Rd_GPR32 & Rd_GPR64 +{ + local tmp1:4 = Rn_FPR32 f* FBits32; + Rd_GPR32 = trunc(tmp1); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 +# CONSTRUCT x9e590000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 FBits64 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_fcvtzu/2 +# AUNIT --inst x9e590000/mask=xffff0000 --rand dfp --status fail --comment "nofpround" + +:fcvtzu Rd_GPR64, Rn_FPR64, FBitsOp +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=3 & fpOpcode=1 & FBitsOp & FBits64 & Rn_FPR64 & Rd_GPR64 +{ + local tmp1:8 = Rn_FPR64 f* FBits64; + Rd_GPR64 = trunc(tmp1); +} + +# C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 +# CONSTRUCT x9e190000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 FBits32 f* =trunc +# SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_fcvtzu/2 +# AUNIT --inst x9e190000/mask=xffff0000 --rand sfp --status fail --comment "nofpround" + +:fcvtzu Rd_GPR64, Rn_FPR32, FBitsOp +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=3 & fpOpcode=1 & FBitsOp & FBits32 & Rn_FPR32 & Rd_GPR64 +{ + local tmp1:4 = Rn_FPR32 f* FBits32; + Rd_GPR64 = trunc(tmp1); +} + +# C7.2.97 FDIV (vector) page C7-1615 line 90075 MATCH x2e20fc00/mask=xbfa0fc00 +# CONSTRUCT x6e60fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f/@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2@8 +# AUNIT --inst x6e60fc00/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:fdiv Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1f & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd infix Rd_VPR128.2D = Rn_VPR128.2D f/ Rm_VPR128.2D on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f/ Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f/ Rm_VPR128.2D[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.97 FDIV (vector) page C7-1615 line 90075 MATCH x2e20fc00/mask=xbfa0fc00 +# CONSTRUCT x2e20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f/@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2@4 +# AUNIT --inst x2e20fc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" + +:fdiv Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1f & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix Rd_VPR64.2S = Rn_VPR64.2S f/ Rm_VPR64.2S on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f/ Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f/ Rm_VPR64.2S[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.97 FDIV (vector) page C7-1615 line 90075 MATCH x2e20fc00/mask=xbfa0fc00 +# CONSTRUCT x6e20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f/@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2@4 +# AUNIT --inst x6e20fc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" + +:fdiv Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1f & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix Rd_VPR128.4S = Rn_VPR128.4S f/ Rm_VPR128.4S on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f/ Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f/ Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f/ Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f/ Rm_VPR128.4S[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.97 FDIV (vector) page C7-1615 line 90075 MATCH x2e403c00/mask=xbfe0fc00 +# CONSTRUCT x2e403c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f/@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2@2 +# AUNIT --inst x2e403c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fdiv Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rn_VPR64.4H f/ Rm_VPR64.4H on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f/ Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f/ Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f/ Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f/ Rm_VPR64.4H[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.97 FDIV (vector) page C7-1615 line 90075 MATCH x2e403c00/mask=xbfe0fc00 +# CONSTRUCT x6e403c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f/@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2@2 +# AUNIT --inst x6e403c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fdiv Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rn_VPR128.8H f/ Rm_VPR128.8H on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f/ Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f/ Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f/ Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f/ Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f/ Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f/ Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f/ Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f/ Rm_VPR128.8H[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.98 FDIV (scalar) page C7-1617 line 90190 MATCH x1e201800/mask=xff20fc00 +# CONSTRUCT x1e601800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f/ +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2 +# AUNIT --inst x1e601800/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:fdiv Rd_FPR64, Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x1 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = Rn_FPR64 f/ Rm_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.98 FDIV (scalar) page C7-1617 line 90190 MATCH x1e201800/mask=xff20fc00 +# CONSTRUCT x1e201800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f/ +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2 +# AUNIT --inst x1e201800/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" + +:fdiv Rd_FPR32, Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x1 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = Rn_FPR32 f/ Rm_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.98 FDIV (scalar) page C7-1617 line 90190 MATCH x1e201800/mask=xff20fc00 +# CONSTRUCT x1ee01800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f/ +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2 +# AUNIT --inst x1ee01800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fdiv Rd_FPR16, Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x1 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = Rn_FPR16 f/ Rm_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.99 FJCVTZS page C7-1619 line 90296 MATCH x1e7e0000/mask=xfffffc00 +# CONSTRUCT x1e7e0000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_fjcvtzs/1 +# AUNIT --inst x1e7e0000/mask=xfffffc00 --rand dfp --status noqemu --comment "nofpround" + +:fjcvtzs Rd_GPR32, Rn_FPR64 +is b_1031=0b0001111001111110000000 & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 +{ + Rd_GPR32 = trunc(Rn_FPR64); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.100 FMADD page C7-1620 line 90360 MATCH x1f000000/mask=xff208000 +# CONSTRUCT x1f400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmadd/3 +# AUNIT --inst x1f400000/mask=xffe08000 --rand dfp --status nopcodeop --comment "nofpround" + +:fmadd Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=0 & Rm_FPR64 & b_15=0 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_fmadd(Rn_FPR64, Rm_FPR64, Ra_FPR64); +} + +# C7.2.100 FMADD page C7-1620 line 90360 MATCH x1f000000/mask=xff208000 +# CONSTRUCT x1f000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmadd/3 +# AUNIT --inst x1f000000/mask=xffe08000 --rand sfp --status nopcodeop --comment "nofpround" + +:fmadd Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=0 & Rm_FPR32 & b_15=0 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fmadd(Rn_FPR32, Rm_FPR32, Ra_FPR32); +} + +# C7.2.100 FMADD page C7-1620 line 90360 MATCH x1f000000/mask=xff208000 +# CONSTRUCT x1fc00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmadd/3 +# AUNIT --inst x1fc00000/mask=xffe08000 --rand hfp --status noqemu --comment "nofpround" + +:fmadd Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=0 & Rm_FPR16 & b_15=0 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_fmadd(Rn_FPR16, Rm_FPR16, Ra_FPR16); +} + +# C7.2.101 FMAX (vector) page C7-1622 line 90483 MATCH x0e20f400/mask=xbfa0fc00 +# CONSTRUCT x4e60f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2@8 +# AUNIT --inst x4e60f400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmax Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fmax(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.101 FMAX (vector) page C7-1622 line 90483 MATCH x0e20f400/mask=xbfa0fc00 +# CONSTRUCT x0e20f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2@4 +# AUNIT --inst x0e20f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmax Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fmax(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.101 FMAX (vector) page C7-1622 line 90483 MATCH x0e20f400/mask=xbfa0fc00 +# CONSTRUCT x4e20f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2@4 +# AUNIT --inst x4e20f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmax Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fmax(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.101 FMAX (vector) page C7-1622 line 90483 MATCH x0e403400/mask=xbfe0fc00 +# CONSTRUCT x0e403400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2@2 +# AUNIT --inst x0e403400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fmax Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fmax(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.101 FMAX (vector) page C7-1622 line 90483 MATCH x0e403400/mask=xbfe0fc00 +# CONSTRUCT x4e403400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2@2 +# AUNIT --inst x4e403400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fmax Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fmax(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.102 FMAX (scalar) page C7-1624 line 90609 MATCH x1e204800/mask=xff20fc00 +# CONSTRUCT x1e604800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = ext ARG2 ARG3 f>:1 inst_next goto ARG1 ARG3 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2 +# AUNIT --inst x1e604800/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmax Rd_FPR64, Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x4 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = Rn_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd + local tmp1:1 = Rn_FPR64 f> Rm_FPR64; + if (tmp1) goto inst_next; + Rd_FPR64 = Rm_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.102 FMAX (scalar) page C7-1624 line 90609 MATCH x1e204800/mask=xff20fc00 +# CONSTRUCT x1e204800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = ext ARG2 ARG3 f>:1 inst_next goto ARG1 ARG3 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2 +# AUNIT --inst x1e204800/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmax Rd_FPR32, Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x4 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = Rn_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd + local tmp1:1 = Rn_FPR32 f> Rm_FPR32; + if (tmp1) goto inst_next; + Rd_FPR32 = Rm_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.102 FMAX (scalar) page C7-1624 line 90609 MATCH x1e204800/mask=xff20fc00 +# CONSTRUCT x1ee04800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2 +# AUNIT --inst x1ee04800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fmax Rd_FPR16, Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x4 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_fmax(Rn_FPR16, Rm_FPR16); +} + +# C7.2.103 FMAXNM (vector) page C7-1626 line 90711 MATCH x0e20c400/mask=xbfa0fc00 +# CONSTRUCT x4e60c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2@8 +# AUNIT --inst x4e60c400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmaxnm Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fmaxnm(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.103 FMAXNM (vector) page C7-1626 line 90711 MATCH x0e20c400/mask=xbfa0fc00 +# CONSTRUCT x0e20c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2@4 +# AUNIT --inst x0e20c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmaxnm Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fmaxnm(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.103 FMAXNM (vector) page C7-1626 line 90711 MATCH x0e20c400/mask=xbfa0fc00 +# CONSTRUCT x4e20c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2@4 +# AUNIT --inst x4e20c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmaxnm Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fmaxnm(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.103 FMAXNM (vector) page C7-1626 line 90711 MATCH x0e400400/mask=xbfe0fc00 +# CONSTRUCT x0e400400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2@2 +# AUNIT --inst x0e400400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision SIMD 4H when Q = 0 + +:fmaxnm Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fmaxnm(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.103 FMAXNM (vector) page C7-1626 line 90711 MATCH x0e400400/mask=xbfe0fc00 +# CONSTRUCT x4e400400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2@2 +# AUNIT --inst x4e400400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision SIMD 8H when Q = 1 + +:fmaxnm Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fmaxnm(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.104 FMAXNM (scalar) page C7-1628 line 90842 MATCH x1e206800/mask=xff20fc00 +# CONSTRUCT x1e606800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = ext ARG2 ARG3 f>:1 inst_next goto ARG1 ARG3 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2 +# AUNIT --inst x1e606800/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmaxnm Rd_FPR64, Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x6 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = Rn_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd + local tmp1:1 = Rn_FPR64 f> Rm_FPR64; + if (tmp1) goto inst_next; + Rd_FPR64 = Rm_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.104 FMAXNM (scalar) page C7-1628 line 90842 MATCH x1e206800/mask=xff20fc00 +# CONSTRUCT x1e206800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = ext ARG2 ARG3 f>:1 inst_next goto ARG1 ARG3 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2 +# AUNIT --inst x1e206800/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmaxnm Rd_FPR32, Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x6 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = Rn_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd + local tmp1:1 = Rn_FPR32 f> Rm_FPR32; + if (tmp1) goto inst_next; + Rd_FPR32 = Rm_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.104 FMAXNM (scalar) page C7-1628 line 90842 MATCH x1e206800/mask=xff20fc00 +# CONSTRUCT x1ee06800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = ext ARG2 ARG3 f>:1 inst_next goto ARG1 ARG3 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2 +# AUNIT --inst x1ee06800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fmaxnm Rd_FPR16, Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x6 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = Rn_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd + local tmp1:1 = Rn_FPR16 f> Rm_FPR16; + if (tmp1) goto inst_next; + Rd_FPR16 = Rm_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.105 FMAXNMP (scalar) page C7-1630 line 90948 MATCH x7e30c800/mask=xffbffc00 +# CONSTRUCT x7e70c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmp/1@8 +# AUNIT --inst x7e70c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmaxnmp Rd_FPR64, Rn_VPR128.2D +is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_fmaxnmp(Rn_VPR128.2D, 8:1); +} + +# C7.2.105 FMAXNMP (scalar) page C7-1630 line 90948 MATCH x7e30c800/mask=xffbffc00 +# CONSTRUCT x7e30c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmp/1@4 +# AUNIT --inst x7e30c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmaxnmp Rd_FPR32, Rn_VPR64.2S +is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fmaxnmp(Rn_VPR64.2S, 4:1); +} + +# C7.2.105 FMAXNMP (scalar) page C7-1630 line 90948 MATCH x5e30c800/mask=xfffffc00 +# CONSTRUCT x5e30c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 Rn_FPR32 =NEON_fmaxnmp/1@2 +# AUNIT --inst x5e30c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant + +:fmaxnmp Rd_FPR16, vRn_VPR128^".2H" +is b_1031=0b0101111000110000110010 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd +{ + Rd_FPR16 = NEON_fmaxnmp(Rn_FPR32, 2:1); +} + +# C7.2.106 FMAXNMP (vector) page C7-1632 line 91052 MATCH x2e20c400/mask=xbfa0fc00 +# CONSTRUCT x6e60c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnmp/2@8 +# AUNIT --inst x6e60c400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmaxnmp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fmaxnmp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.106 FMAXNMP (vector) page C7-1632 line 91052 MATCH x2e20c400/mask=xbfa0fc00 +# CONSTRUCT x2e20c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnmp/2@4 +# AUNIT --inst x2e20c400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmaxnmp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fmaxnmp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.106 FMAXNMP (vector) page C7-1632 line 91052 MATCH x2e20c400/mask=xbfa0fc00 +# CONSTRUCT x6e20c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnmp/2@4 +# AUNIT --inst x6e20c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmaxnmp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fmaxnmp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.106 FMAXNMP (vector) page C7-1632 line 91052 MATCH x2e400400/mask=xbfe0fc00 +# CONSTRUCT x2e400400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnmp/2@2 +# AUNIT --inst x2e400400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fmaxnmp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fmaxnmp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.106 FMAXNMP (vector) page C7-1632 line 91052 MATCH x2e400400/mask=xbfe0fc00 +# CONSTRUCT x6e400400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnmp/2@2 +# AUNIT --inst x6e400400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fmaxnmp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fmaxnmp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.107 FMAXNMV page C7-1634 line 91185 MATCH x2e30c800/mask=xbfbffc00 +# CONSTRUCT x6e30c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmv/1@4 +# AUNIT --inst x6e30c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmaxnmv Rd_FPR32, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fmaxnmv(Rn_VPR128.4S, 4:1); +} + +# C7.2.107 FMAXNMV page C7-1634 line 91185 MATCH x0e30c800/mask=xbffffc00 +# CONSTRUCT x0e30c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmv/1@2 +# AUNIT --inst x0e30c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fmaxnmv Rd_FPR16, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_1029=0b00111000110000110010 & Rd_FPR16 & Rn_VPR64.4H & Zd +{ + Rd_FPR16 = NEON_fmaxnmv(Rn_VPR64.4H, 2:1); +} + +# C7.2.107 FMAXNMV page C7-1634 line 91185 MATCH x0e30c800/mask=xbffffc00 +# CONSTRUCT x4e30c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmv/1@2 +# AUNIT --inst x4e30c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fmaxnmv Rd_FPR16, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_1029=0b00111000110000110010 & Rd_FPR16 & Rn_VPR128.8H & Zd +{ + Rd_FPR16 = NEON_fmaxnmv(Rn_VPR128.8H, 2:1); +} + +# C7.2.108 FMAXP (scalar) page C7-1636 line 91293 MATCH x7e30f800/mask=xffbffc00 +# CONSTRUCT x7e70f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmv/1@8 +# AUNIT --inst x7e70f800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmaxp Rd_FPR64, Rn_VPR128.2D +is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_fmaxnmv(Rn_VPR128.2D, 8:1); +} + +# C7.2.108 FMAXP (scalar) page C7-1636 line 91293 MATCH x7e30f800/mask=xffbffc00 +# CONSTRUCT x7e30f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxp/1@4 +# AUNIT --inst x7e30f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmaxp Rd_FPR32, Rn_VPR64.2S +is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fmaxp(Rn_VPR64.2S, 4:1); +} + +# C7.2.108 FMAXP (scalar) page C7-1636 line 91293 MATCH x5e30f800/mask=xfffffc00 +# CONSTRUCT x5e30f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 Rn_FPR32 =NEON_fmaxp/1@2 +# AUNIT --inst x5e30f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant + +:fmaxp Rd_FPR16, vRn_VPR128^".2H" +is b_1031=0b0101111000110000111110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd +{ + Rd_FPR16 = NEON_fmaxp(Rn_FPR32, 2:1); +} + +# C7.2.109 FMAXP (vector) page C7-1638 line 91397 MATCH x2e20f400/mask=xbfa0fc00 +# CONSTRUCT x6e60f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxp/2@8 +# AUNIT --inst x6e60f400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmaxp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fmaxp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.109 FMAXP (vector) page C7-1638 line 91397 MATCH x2e20f400/mask=xbfa0fc00 +# CONSTRUCT x2e20f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxp/2@4 +# AUNIT --inst x2e20f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmaxp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fmaxp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.109 FMAXP (vector) page C7-1638 line 91397 MATCH x2e20f400/mask=xbfa0fc00 +# CONSTRUCT x6e20f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxp/2@4 +# AUNIT --inst x6e20f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmaxp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fmaxp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.109 FMAXP (vector) page C7-1638 line 91397 MATCH x2e403400/mask=xbfe0fc00 +# CONSTRUCT x2e403400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxp/2@2 +# AUNIT --inst x2e403400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fmaxp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fmaxp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.109 FMAXP (vector) page C7-1638 line 91397 MATCH x2e403400/mask=xbfe0fc00 +# CONSTRUCT x6e403400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxp/2@2 +# AUNIT --inst x6e403400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fmaxp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fmaxp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.110 FMAXV page C7-1640 line 91528 MATCH x2e30f800/mask=xbfbffc00 +# CONSTRUCT x6e30f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxv/1@4 +# AUNIT --inst x6e30f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmaxv Rd_FPR32, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fmaxv(Rn_VPR128.4S, 4:1); +} + +# C7.2.110 FMAXV page C7-1640 line 91528 MATCH x0e30f800/mask=xbffffc00 +# CONSTRUCT x0e30f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxv/1@2 +# AUNIT --inst x0e30f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fmaxv Rd_FPR16, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_1029=0b00111000110000111110 & Rd_FPR16 & Rn_VPR64.4H & Zd +{ + Rd_FPR16 = NEON_fmaxv(Rn_VPR64.4H, 2:1); +} + +# C7.2.110 FMAXV page C7-1640 line 91528 MATCH x0e30f800/mask=xbffffc00 +# CONSTRUCT x4e30f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxv/1@2 +# AUNIT --inst x4e30f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fmaxv Rd_FPR16, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_1029=0b00111000110000111110 & Rd_FPR16 & Rn_VPR128.8H & Zd +{ + Rd_FPR16 = NEON_fmaxv(Rn_VPR128.8H, 2:1); +} + +# C7.2.111 FMIN (vector) page C7-1642 line 91635 MATCH x0ea0f400/mask=xbfa0fc00 +# CONSTRUCT x4ee0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2@8 +# AUNIT --inst x4ee0f400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmin Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fmin(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.111 FMIN (vector) page C7-1642 line 91635 MATCH x0ea0f400/mask=xbfa0fc00 +# CONSTRUCT x0ea0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2@4 +# AUNIT --inst x0ea0f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmin Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fmin(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.111 FMIN (vector) page C7-1642 line 91635 MATCH x0ea0f400/mask=xbfa0fc00 +# CONSTRUCT x4ea0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2@4 +# AUNIT --inst x4ea0f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmin Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fmin(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.111 FMIN (vector) page C7-1642 line 91635 MATCH x0ec03400/mask=xbfe0fc00 +# CONSTRUCT x0ec03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2@2 +# AUNIT --inst x0ec03400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fmin Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fmin(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.111 FMIN (vector) page C7-1642 line 91635 MATCH x0ec03400/mask=xbfe0fc00 +# CONSTRUCT x4ec03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2@2 +# AUNIT --inst x4ec03400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fmin Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fmin(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.112 FMIN (scalar) page C7-1644 line 91761 MATCH x1e205800/mask=xff20fc00 +# CONSTRUCT x1e605800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2 +# AUNIT --inst x1e605800/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmin Rd_FPR64, Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x5 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_fmin(Rn_FPR64, Rm_FPR64); +} + +# C7.2.112 FMIN (scalar) page C7-1644 line 91761 MATCH x1e205800/mask=xff20fc00 +# CONSTRUCT x1e205800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2 +# AUNIT --inst x1e205800/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmin Rd_FPR32, Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x5 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fmin(Rn_FPR32, Rm_FPR32); +} + +# C7.2.112 FMIN (scalar) page C7-1644 line 91761 MATCH x1e205800/mask=xff20fc00 +# CONSTRUCT x1ee05800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2 +# AUNIT --inst x1ee05800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fmin Rd_FPR16, Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x5 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_fmin(Rn_FPR16, Rm_FPR16); +} + +# C7.2.113 FMINNM (vector) page C7-1646 line 91863 MATCH x0ea0c400/mask=xbfa0fc00 +# CONSTRUCT x4ee0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2@8 +# AUNIT --inst x4ee0c400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fminnm Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fminnm(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.113 FMINNM (vector) page C7-1646 line 91863 MATCH x0ea0c400/mask=xbfa0fc00 +# CONSTRUCT x0ea0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2@4 +# AUNIT --inst x0ea0c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminnm Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fminnm(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.113 FMINNM (vector) page C7-1646 line 91863 MATCH x0ea0c400/mask=xbfa0fc00 +# CONSTRUCT x4ea0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2@4 +# AUNIT --inst x4ea0c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminnm Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fminnm(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.113 FMINNM (vector) page C7-1646 line 91863 MATCH x0ec00400/mask=xbfe0fc00 +# CONSTRUCT x0ec00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2@2 +# AUNIT --inst x0ec00400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fminnm Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fminnm(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.113 FMINNM (vector) page C7-1646 line 91863 MATCH x0ec00400/mask=xbfe0fc00 +# CONSTRUCT x4ec00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2@2 +# AUNIT --inst x4ec00400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fminnm Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fminnm(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.114 FMINNM (scalar) page C7-1648 line 91994 MATCH x1e207800/mask=xff20fc00 +# CONSTRUCT x1e607800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2 +# AUNIT --inst x1e607800/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fminnm Rd_FPR64, Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x7 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_fminnm(Rn_FPR64, Rm_FPR64); +} + +# C7.2.114 FMINNM (scalar) page C7-1648 line 91994 MATCH x1e207800/mask=xff20fc00 +# CONSTRUCT x1e207800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2 +# AUNIT --inst x1e207800/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminnm Rd_FPR32, Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x7 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fminnm(Rn_FPR32, Rm_FPR32); +} + +# C7.2.114 FMINNM (scalar) page C7-1648 line 91994 MATCH x1e207800/mask=xff20fc00 +# CONSTRUCT x1ee07800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2 +# AUNIT --inst x1ee07800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fminnm Rd_FPR16, Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x7 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_fminnm(Rn_FPR16, Rm_FPR16); +} + +# C7.2.115 FMINNMP (scalar) page C7-1650 line 92101 MATCH x7eb0c800/mask=xffbffc00 +# CONSTRUCT x7ef0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fminnmp/1@8 +# AUNIT --inst x7ef0c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fminnmp Rd_FPR64, Rn_VPR128.2D +is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_fminnmp(Rn_VPR128.2D, 8:1); +} + +# C7.2.115 FMINNMP (scalar) page C7-1650 line 92101 MATCH x7eb0c800/mask=xffbffc00 +# CONSTRUCT x7eb0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fminnmp/1@4 +# AUNIT --inst x7eb0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminnmp Rd_FPR32, Rn_VPR64.2S +is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fminnmp(Rn_VPR64.2S, 4:1); +} + +# C7.2.115 FMINNMP (scalar) page C7-1650 line 92101 MATCH x5eb0c800/mask=xfffffc00 +# CONSTRUCT x5eb0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 Rn_FPR32 =NEON_fminnmp/1@2 +# AUNIT --inst x5eb0c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant + +:fminnmp Rd_FPR16, vRn_VPR128^".2H" +is b_1031=0b0101111010110000110010 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd +{ + Rd_FPR16 = NEON_fminnmp(Rn_FPR32, 2:1); +} + +# C7.2.116 FMINNMP (vector) page C7-1652 line 92205 MATCH x2ea0c400/mask=xbfa0fc00 +# CONSTRUCT x6ee0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnmp/2@8 +# AUNIT --inst x6ee0c400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fminnmp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fminnmp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.116 FMINNMP (vector) page C7-1652 line 92205 MATCH x2ea0c400/mask=xbfa0fc00 +# CONSTRUCT x2ea0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnmp/2@4 +# AUNIT --inst x2ea0c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminnmp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fminnmp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.116 FMINNMP (vector) page C7-1652 line 92205 MATCH x2ea0c400/mask=xbfa0fc00 +# CONSTRUCT x6ea0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnmp/2@4 +# AUNIT --inst x6ea0c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminnmp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fminnmp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.116 FMINNMP (vector) page C7-1652 line 92205 MATCH x2ec00400/mask=xbfe0fc00 +# CONSTRUCT x2ec00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnmp/2@2 +# AUNIT --inst x2ec00400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fminnmp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fminnmp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.116 FMINNMP (vector) page C7-1652 line 92205 MATCH x2ec00400/mask=xbfe0fc00 +# CONSTRUCT x6ec00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnmp/2@2 +# AUNIT --inst x6ec00400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fminnmp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fminnmp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.117 FMINNMV page C7-1654 line 92338 MATCH x2eb0c800/mask=xbfbffc00 +# CONSTRUCT x6eb0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fminnmv/1@4 +# AUNIT --inst x6eb0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminnmv Rd_FPR32, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fminnmv(Rn_VPR128.4S, 4:1); +} + +# C7.2.117 FMINNMV page C7-1654 line 92338 MATCH x0eb0c800/mask=xbffffc00 +# CONSTRUCT x0eb0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fminnmv/1@2 +# AUNIT --inst x0eb0c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fminnmv Rd_FPR16, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_1029=0b00111010110000110010 & Rd_FPR16 & Rn_VPR64.4H & Zd +{ + Rd_FPR16 = NEON_fminnmv(Rn_VPR64.4H, 2:1); +} + +# C7.2.117 FMINNMV page C7-1654 line 92338 MATCH x0eb0c800/mask=xbffffc00 +# CONSTRUCT x4eb0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fminnmv/1@2 +# AUNIT --inst x4eb0c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fminnmv Rd_FPR16, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_1029=0b00111010110000110010 & Rd_FPR16 & Rn_VPR128.8H & Zd +{ + Rd_FPR16 = NEON_fminnmv(Rn_VPR128.8H, 2:1); +} + +# C7.2.118 FMINP (scalar) page C7-1656 line 92446 MATCH x7eb0f800/mask=xffbffc00 +# CONSTRUCT x7ef0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fminp/1@8 +# AUNIT --inst x7ef0f800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fminp Rd_FPR64, Rn_VPR128.2D +is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_fminp(Rn_VPR128.2D, 8:1); +} + +# C7.2.118 FMINP (scalar) page C7-1656 line 92446 MATCH x7eb0f800/mask=xffbffc00 +# CONSTRUCT x7eb0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fminp/1@4 +# AUNIT --inst x7eb0f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminp Rd_FPR32, Rn_VPR64.2S +is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fminp(Rn_VPR64.2S, 4:1); +} + +# C7.2.118 FMINP (scalar) page C7-1656 line 92446 MATCH x5eb0f800/mask=xfffffc00 +# CONSTRUCT x5eb0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 Rn_FPR32 =NEON_fminp/1@2 +# AUNIT --inst x5eb0f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant + +:fminp Rd_FPR16, vRn_VPR128^".2H" +is b_1031=0b0101111010110000111110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd +{ + Rd_FPR16 = NEON_fminp(Rn_FPR32, 2:1); +} + +# C7.2.119 FMINP (vector) page C7-1658 line 92550 MATCH x2ea0f400/mask=xbfa0fc00 +# CONSTRUCT x6ee0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminp/2@8 +# AUNIT --inst x6ee0f400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fminp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fminp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.119 FMINP (vector) page C7-1658 line 92550 MATCH x2ea0f400/mask=xbfa0fc00 +# CONSTRUCT x2ea0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminp/2@4 +# AUNIT --inst x2ea0f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fminp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.119 FMINP (vector) page C7-1658 line 92550 MATCH x2ea0f400/mask=xbfa0fc00 +# CONSTRUCT x6ea0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminp/2@4 +# AUNIT --inst x6ea0f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fminp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.119 FMINP (vector) page C7-1658 line 92550 MATCH x2ec03400/mask=xbfe0fc00 +# CONSTRUCT x2ec03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminp/2@2 +# AUNIT --inst x2ec03400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fminp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fminp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.119 FMINP (vector) page C7-1658 line 92550 MATCH x2ec03400/mask=xbfe0fc00 +# CONSTRUCT x6ec03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminp/2@2 +# AUNIT --inst x6ec03400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fminp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fminp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.120 FMINV page C7-1660 line 92681 MATCH x2eb0f800/mask=xbfbffc00 +# CONSTRUCT x6eb0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fminv/1@4 +# AUNIT --inst x6eb0f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fminv Rd_FPR32, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fminv(Rn_VPR128.4S, 4:1); +} + +# C7.2.120 FMINV page C7-1660 line 92681 MATCH x0eb0f800/mask=xbffffc00 +# CONSTRUCT x0eb0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fminv/1@2 +# AUNIT --inst x0eb0f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fminv Rd_FPR16, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_1029=0b00111010110000111110 & Rd_FPR16 & Rn_VPR64.4H & Zd +{ + Rd_FPR16 = NEON_fminv(Rn_VPR64.4H, 2:1); +} + +# C7.2.120 FMINV page C7-1660 line 92681 MATCH x0eb0f800/mask=xbffffc00 +# CONSTRUCT x4eb0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fminv/1@2 +# AUNIT --inst x4eb0f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fminv Rd_FPR16, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_1029=0b00111010110000111110 & Rd_FPR16 & Rn_VPR128.8H & Zd +{ + Rd_FPR16 = NEON_fminv(Rn_VPR128.8H, 2:1); +} + +# C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x0f801000/mask=xbf80f400 +# CONSTRUCT x4fc01000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f* &=$f+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@8 +# AUNIT --inst x4fc01000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" + +:fmla Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & b_1215=0x1 & b_1010=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd element Re_VPR128.D[vIndex] lane size 8 + local tmp1:8 = Re_VPR128.D.vIndex; + # simd infix TMPQ1 = Rn_VPR128.2D f* tmp1 on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* tmp1; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D f+ TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f+ TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f+ TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x0f801000/mask=xbf80f400 +# CONSTRUCT x0f801000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f* &=$f+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@4 +# AUNIT --inst x0f801000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" + +:fmla Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x1 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPD1 = Rn_VPR64.2S f* tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] f* tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] f* tmp1; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S f+ TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] f+ TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] f+ TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x0f801000/mask=xbf80f400 +# CONSTRUCT x4f801000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f* &=$f+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@4 +# AUNIT --inst x4f801000/mask=xffc0f400 --rand sfp --status fail --comment "nofpround" + +:fmla Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x1 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPQ1 = Rn_VPR128.4S f* tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* tmp1; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S f+ TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f+ TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f+ TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f+ TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f+ TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x5f001000/mask=xffc0f400 +# CONSTRUCT x5f001000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* &=f+ +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@2 +# AUNIT --inst x5f001000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# Scalar half-precision variant + +:fmla Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM +is b_2231=0b0101111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp2:2 = Rn_FPR16 f* tmp1; + Rd_FPR16 = Rd_FPR16 f+ tmp2; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x5f801000/mask=xff80f400 +# CONSTRUCT x5f801000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* &=f+ +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@4 +# AUNIT --inst x5f801000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" +# Scalar, single-precision and double-precision variant, sz=0 + +:fmla Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex +is b_2331=0b010111111 & b_22=0 & b_1215=0b0001 & b_10=0 & Re_VPR128.S & vIndex & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + local tmp2:4 = Rn_FPR32 f* tmp1; + Rd_FPR32 = Rd_FPR32 f+ tmp2; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x5f801000/mask=xff80f400 +# CONSTRUCT x5fc01000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* &=f+ +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@8 +# AUNIT --inst x5fc01000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" +# Scalar, single-precision and double-precision variant, sz=1 + +:fmla Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex +is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b0001 & b_10=0 & Re_VPR128.D & vIndex & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Zd +{ + # simd element Re_VPR128.D[vIndex] lane size 8 + local tmp1:8 = Re_VPR128.D.vIndex; + local tmp2:8 = Rn_FPR64 f* tmp1; + Rd_FPR64 = Rd_FPR64 f+ tmp2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x0f001000/mask=xbfc0f400 +# CONSTRUCT x0f001000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f* &=$f+$@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@2 +# AUNIT --inst x0f001000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# Vector, half-precision variant SIMD 4H when Q = 0 + +:fmla Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=0 & b_2229=0b00111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD1 = Rn_VPR64.4H f* tmp1 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] f* tmp1; + TMPD1[16,16] = Rn_VPR64.4H[16,16] f* tmp1; + TMPD1[32,16] = Rn_VPR64.4H[32,16] f* tmp1; + TMPD1[48,16] = Rn_VPR64.4H[48,16] f* tmp1; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H f+ TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] f+ TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] f+ TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] f+ TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] f+ TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x0f001000/mask=xbfc0f400 +# CONSTRUCT x4f001000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f* &=$f+$@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@2 +# AUNIT --inst x4f001000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# Vector, half-precision variant SIMD 8H when Q = 1 + +:fmla Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=1 & b_2229=0b00111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPQ1 = Rn_VPR128.8H f* tmp1 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] f* tmp1; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] f* tmp1; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] f* tmp1; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] f* tmp1; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] f* tmp1; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] f* tmp1; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] f* tmp1; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] f* tmp1; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H f+ TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] f+ TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] f+ TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] f+ TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] f+ TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] f+ TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] f+ TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] f+ TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] f+ TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.122 FMLA (vector) page C7-1666 line 93022 MATCH x0e20cc00/mask=xbfa0fc00 +# CONSTRUCT x4e60cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@8 &=$f+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@8 +# AUNIT --inst x4e60cc00/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:fmla Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.2D & b_1115=0x19 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.2D f* Rm_VPR128.2D on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* Rm_VPR128.2D[64,64]; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D f+ TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f+ TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f+ TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.122 FMLA (vector) page C7-1666 line 93022 MATCH x0e20cc00/mask=xbfa0fc00 +# CONSTRUCT x0e20cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@4 +# AUNIT --inst x0e20cc00/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" + +:fmla Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.2S & b_1115=0x19 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Re_VPR128 & Zd +{ + # simd infix TMPD1 = Rn_VPR64.2S f* Rm_VPR64.2S on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] f* Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] f* Rm_VPR64.2S[32,32]; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S f+ TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] f+ TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] f+ TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.122 FMLA (vector) page C7-1666 line 93022 MATCH x0e20cc00/mask=xbfa0fc00 +# CONSTRUCT x4e20cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@4 +# AUNIT --inst x4e20cc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" + +:fmla Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.4S & b_1115=0x19 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S f* Rm_VPR128.4S on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* Rm_VPR128.4S[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S f+ TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f+ TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f+ TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f+ TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f+ TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.122 FMLA (vector) page C7-1666 line 93022 MATCH x0e400c00/mask=xbfe0fc00 +# CONSTRUCT x0e400c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@2 +# AUNIT --inst x0e400c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fmla Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + # simd infix TMPD1 = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 4 + TMPD1[0,32] = Rn_VPR64.4H[0,32] f* Rm_VPR64.4H[0,32]; + TMPD1[32,32] = Rn_VPR64.4H[32,32] f* Rm_VPR64.4H[32,32]; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H f+ TMPD1 on lane size 4 + Rd_VPR64.4H[0,32] = Rd_VPR64.4H[0,32] f+ TMPD1[0,32]; + Rd_VPR64.4H[32,32] = Rd_VPR64.4H[32,32] f+ TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.122 FMLA (vector) page C7-1666 line 93022 MATCH x0e400c00/mask=xbfe0fc00 +# CONSTRUCT x4e400c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@2 +# AUNIT --inst x4e400c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fmla Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 4 + TMPQ1[0,32] = Rn_VPR128.8H[0,32] f* Rm_VPR128.8H[0,32]; + TMPQ1[32,32] = Rn_VPR128.8H[32,32] f* Rm_VPR128.8H[32,32]; + TMPQ1[64,32] = Rn_VPR128.8H[64,32] f* Rm_VPR128.8H[64,32]; + TMPQ1[96,32] = Rn_VPR128.8H[96,32] f* Rm_VPR128.8H[96,32]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H f+ TMPQ1 on lane size 4 + Rd_VPR128.8H[0,32] = Rd_VPR128.8H[0,32] f+ TMPQ1[0,32]; + Rd_VPR128.8H[32,32] = Rd_VPR128.8H[32,32] f+ TMPQ1[32,32]; + Rd_VPR128.8H[64,32] = Rd_VPR128.8H[64,32] f+ TMPQ1[64,32]; + Rd_VPR128.8H[96,32] = Rd_VPR128.8H[96,32] f+ TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.123 FMLAL, FMLAL2 (by element) page C7-1668 line 93140 MATCH x0f800000/mask=xbfc0f400 +# CONSTRUCT x0f800000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:4 ARG3 $f* $float2float@2:8 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal/3@4 +# AUNIT --inst x0f800000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 2S when Q = 0 + +:fmlal Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b0000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd +{ + TMPS1 = Rn_VPR64[0,32]; + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 + TMPS2[0,16] = TMPS1[0,16] f* tmp2; + TMPS2[16,16] = TMPS1[16,16] f* tmp2; + # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) + TMPD3[0,32] = float2float(TMPS2[0,16]); + TMPD3[32,32] = float2float(TMPS2[16,16]); + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD3 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD3[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD3[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.123 FMLAL, FMLAL2 (by element) page C7-1668 line 93140 MATCH x0f800000/mask=xbfc0f400 +# CONSTRUCT x4f800000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:8 ARG3 $f* $float2float@2:16 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal/3@2 +# AUNIT --inst x4f800000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 4S when Q = 1 + +:fmlal Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b0000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd +{ + TMPD1 = Rn_VPR128[0,64]; + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* tmp2; + TMPD2[16,16] = TMPD1[16,16] f* tmp2; + TMPD2[32,16] = TMPD1[32,16] f* tmp2; + TMPD2[48,16] = TMPD1[48,16] f* tmp2; + # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.123 FMLAL, FMLAL2 (by element) page C7-1668 line 93140 MATCH x2f808000/mask=xbfc0f400 +# CONSTRUCT x2f808000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:4 ARG3 $f* $float2float@2:8 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal2/3@2 +# AUNIT --inst x2f808000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 2S when Q = 0 + +:fmlal2 Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=0 & b_2329=0b1011111 & b_22=0 & b_1215=0b1000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd +{ + TMPS1 = Rn_VPR64[32,32]; + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 + TMPS2[0,16] = TMPS1[0,16] f* tmp2; + TMPS2[16,16] = TMPS1[16,16] f* tmp2; + # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) + TMPD3[0,32] = float2float(TMPS2[0,16]); + TMPD3[32,32] = float2float(TMPS2[16,16]); + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD3 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD3[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD3[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.123 FMLAL, FMLAL2 (by element) page C7-1668 line 93140 MATCH x2f808000/mask=xbfc0f400 +# CONSTRUCT x6f808000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 ARG3 $f* $float2float@2:16 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal2/3@2 +# AUNIT --inst x6f808000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 4S when Q = 1 + +:fmlal2 Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=1 & b_2329=0b1011111 & b_22=0 & b_1215=0b1000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd +{ + TMPD1 = Rn_VPR128[64,64]; + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* tmp2; + TMPD2[16,16] = TMPD1[16,16] f* tmp2; + TMPD2[32,16] = TMPD1[32,16] f* tmp2; + TMPD2[48,16] = TMPD1[48,16] f* tmp2; + # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.124 FMLAL, FMLAL2 (vector) page C7-1670 line 93272 MATCH x0e20ec00/mask=xbfe0fc00 +# CONSTRUCT x0e20ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:4 ARG3[0]:4 $f*@2 $float2float@2:8 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal/3@2 +# AUNIT --inst x0e20ec00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 2S when Q = 0 + +:fmlal Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR64^".2H" +is b_31=0 & b_30=0 & b_2329=0b0011100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR64 & Rm_VPR64 & Zd +{ + TMPS1 = Rn_VPR64[0,32]; + TMPS2 = Rm_VPR64[0,32]; + # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 + TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; + TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; + # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) + TMPD4[0,32] = float2float(TMPS3[0,16]); + TMPD4[32,32] = float2float(TMPS3[16,16]); + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD4 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD4[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD4[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.124 FMLAL, FMLAL2 (vector) page C7-1670 line 93272 MATCH x0e20ec00/mask=xbfe0fc00 +# CONSTRUCT x4e20ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:8 ARG3[0]:8 $f*@2 $float2float@2:16 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal/3@2 +# AUNIT --inst x4e20ec00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 4S when Q = 1 + +:fmlal Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H +is b_31=0 & b_30=1 & b_2329=0b0011100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd +{ + TMPD1 = Rn_VPR128[0,64]; + TMPD2 = Rm_VPR64.4H[0,64]; + # simd infix TMPD3 = TMPD1 f* TMPD2 on lane size 2 + TMPD3[0,16] = TMPD1[0,16] f* TMPD2[0,16]; + TMPD3[16,16] = TMPD1[16,16] f* TMPD2[16,16]; + TMPD3[32,16] = TMPD1[32,16] f* TMPD2[32,16]; + TMPD3[48,16] = TMPD1[48,16] f* TMPD2[48,16]; + # simd resize TMPQ4 = float2float(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = float2float(TMPD3[0,16]); + TMPQ4[32,32] = float2float(TMPD3[16,16]); + TMPQ4[64,32] = float2float(TMPD3[32,16]); + TMPQ4[96,32] = float2float(TMPD3[48,16]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.124 FMLAL, FMLAL2 (vector) page C7-1670 line 93272 MATCH x2e20cc00/mask=xbfe0fc00 +# CONSTRUCT x2e20cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:4 ARG3[1]:4 $f*@2 $float2float@2:8 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal2/3@2 +# AUNIT --inst x2e20cc00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 2S when Q = 0 + +:fmlal2 Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR128^".2H" +is b_31=0 & b_30=0 & b_2329=0b1011100 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR128 & Rm_VPR128 & Zd +{ + TMPS1 = Rn_VPR64[32,32]; + TMPS2 = Rm_VPR128[32,32]; + # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 + TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; + TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; + # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) + TMPD4[0,32] = float2float(TMPS3[0,16]); + TMPD4[32,32] = float2float(TMPS3[16,16]); + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD4 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD4[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD4[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.124 FMLAL, FMLAL2 (vector) page C7-1670 line 93272 MATCH x2e20cc00/mask=xbfe0fc00 +# CONSTRUCT x6e20cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 ARG3 $f*@2 $float2float@2:16 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal2/3@2 +# AUNIT --inst x6e20cc00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 4S when Q = 1 + +:fmlal2 Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H +is b_31=0 & b_30=1 & b_2329=0b1011100 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd +{ + TMPD1 = Rn_VPR128[64,64]; + # simd infix TMPD2 = TMPD1 f* Rm_VPR64.4H on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* Rm_VPR64.4H[0,16]; + TMPD2[16,16] = TMPD1[16,16] f* Rm_VPR64.4H[16,16]; + TMPD2[32,16] = TMPD1[32,16] f* Rm_VPR64.4H[32,16]; + TMPD2[48,16] = TMPD1[48,16] f* Rm_VPR64.4H[48,16]; + # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x0f805000/mask=xbf80f400 +# CONSTRUCT x4fc05000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f* &=$f-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@8 +# AUNIT --inst x4fc05000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" + +:fmls Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & b_1215=0x5 & b_1010=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd element Re_VPR128.D[vIndex] lane size 8 + local tmp1:8 = Re_VPR128.D.vIndex; + # simd infix TMPQ1 = Rn_VPR128.2D f* tmp1 on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* tmp1; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D f- TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f- TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f- TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x0f805000/mask=xbf80f400 +# CONSTRUCT x0f805000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f* &=$f-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@4 +# AUNIT --inst x0f805000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" + +:fmls Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x5 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPD1 = Rn_VPR64.2S f* tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] f* tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] f* tmp1; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S f- TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] f- TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] f- TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x0f805000/mask=xbf80f400 +# CONSTRUCT x4f805000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f* &=$f-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@4 +# AUNIT --inst x4f805000/mask=xffc0f400 --rand sfp --status fail --comment "nofpround" + +:fmls Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x5 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPQ1 = Rn_VPR128.4S f* tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* tmp1; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S f- TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f- TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f- TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f- TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f- TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x5f005000/mask=xffc0f400 +# CONSTRUCT x5f005000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* &=f- +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@2 +# AUNIT --inst x5f005000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# Scalar half-precision variant + +:fmls Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM +is b_2231=0b0101111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp2:2 = Rn_FPR16 f* tmp1; + Rd_FPR16 = Rd_FPR16 f- tmp2; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x5f805000/mask=xff80f400 +# CONSTRUCT x5f805000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* &=f- +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@4 +# AUNIT --inst x5f805000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" +# Scalar, single-precision and double-precision variant, sz=0 + +:fmls Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex +is b_2331=0b010111111 & b_22=0 & b_1215=0b0101 & b_10=0 & Re_VPR128.S & vIndex & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + local tmp2:4 = Rn_FPR32 f* tmp1; + Rd_FPR32 = Rd_FPR32 f- tmp2; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x5f805000/mask=xff80f400 +# CONSTRUCT x5fc05000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* &=f- +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@8 +# AUNIT --inst x5fc05000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" +# Scalar, single-precision and double-precision variant, sz=1 + +:fmls Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex +is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b0101 & b_10=0 & Re_VPR128.D & vIndex & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Zd +{ + # simd element Re_VPR128.D[vIndex] lane size 8 + local tmp1:8 = Re_VPR128.D.vIndex; + local tmp2:8 = Rn_FPR64 f* tmp1; + Rd_FPR64 = Rd_FPR64 f- tmp2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x0f005000/mask=xbfc0f400 +# CONSTRUCT x0f005000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@2 &=$f-$@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@2 +# AUNIT --inst x0f005000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# Vector, half-precision variant SIMD 4H when Q = 0 + +:fmls Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=0 & b_2229=0b00111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD1 = Rn_VPR64.4H f* tmp1 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] f* tmp1; + TMPD1[16,16] = Rn_VPR64.4H[16,16] f* tmp1; + TMPD1[32,16] = Rn_VPR64.4H[32,16] f* tmp1; + TMPD1[48,16] = Rn_VPR64.4H[48,16] f* tmp1; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H f- TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] f- TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] f- TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] f- TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] f- TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x0f005000/mask=xbfc0f400 +# CONSTRUCT x4f005000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@2 &=$f-$@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@2 +# AUNIT --inst x4f005000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# Vector, half-precision variant SIMD 8H when Q = 1 + +:fmls Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=1 & b_2229=0b00111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPQ1 = Rn_VPR128.8H f* tmp1 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] f* tmp1; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] f* tmp1; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] f* tmp1; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] f* tmp1; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] f* tmp1; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] f* tmp1; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] f* tmp1; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] f* tmp1; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H f- TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] f- TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] f- TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] f- TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] f- TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] f- TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] f- TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] f- TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] f- TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.126 FMLS (vector) page C7-1676 line 93631 MATCH x0ea0cc00/mask=xbfa0fc00 +# CONSTRUCT x4ee0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG1 $f*@8 &=$f-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@8 +# AUNIT --inst x4ee0cc00/mask=xffe0fc00 --rand dfp --status fail --comment "nofpround" + +:fmls Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x19 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.2D f* Rd_VPR128.2D on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* Rd_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* Rd_VPR128.2D[64,64]; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D f- TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f- TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f- TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.126 FMLS (vector) page C7-1676 line 93631 MATCH x0ea0cc00/mask=xbfa0fc00 +# CONSTRUCT x0ea0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@4 &=f-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@4 +# AUNIT --inst x0ea0cc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" + +:fmls Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x19 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix TMPD1 = Rn_VPR64.2S f* Rm_VPR64.2S on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] f* Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] f* Rm_VPR64.2S[32,32]; + Rd_VPR64.2S = Rd_VPR64.2S f- TMPD1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.126 FMLS (vector) page C7-1676 line 93631 MATCH x0ea0cc00/mask=xbfa0fc00 +# CONSTRUCT x4ea0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@4 +# AUNIT --inst x4ea0cc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" + +:fmls Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x19 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S f* Rm_VPR128.4S on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* Rm_VPR128.4S[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S f- TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f- TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f- TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f- TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f- TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.126 FMLS (vector) page C7-1676 line 93631 MATCH x0ec00c00/mask=xbfe0fc00 +# CONSTRUCT x0ec00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@2 +# AUNIT --inst x0ec00c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fmls Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + # simd infix TMPD1 = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 4 + TMPD1[0,32] = Rn_VPR64.4H[0,32] f* Rm_VPR64.4H[0,32]; + TMPD1[32,32] = Rn_VPR64.4H[32,32] f* Rm_VPR64.4H[32,32]; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H f- TMPD1 on lane size 4 + Rd_VPR64.4H[0,32] = Rd_VPR64.4H[0,32] f- TMPD1[0,32]; + Rd_VPR64.4H[32,32] = Rd_VPR64.4H[32,32] f- TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.126 FMLS (vector) page C7-1676 line 93631 MATCH x0ec00c00/mask=xbfe0fc00 +# CONSTRUCT x4ec00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@2 +# AUNIT --inst x4ec00c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fmls Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 4 + TMPQ1[0,32] = Rn_VPR128.8H[0,32] f* Rm_VPR128.8H[0,32]; + TMPQ1[32,32] = Rn_VPR128.8H[32,32] f* Rm_VPR128.8H[32,32]; + TMPQ1[64,32] = Rn_VPR128.8H[64,32] f* Rm_VPR128.8H[64,32]; + TMPQ1[96,32] = Rn_VPR128.8H[96,32] f* Rm_VPR128.8H[96,32]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H f- TMPQ1 on lane size 4 + Rd_VPR128.8H[0,32] = Rd_VPR128.8H[0,32] f- TMPQ1[0,32]; + Rd_VPR128.8H[32,32] = Rd_VPR128.8H[32,32] f- TMPQ1[32,32]; + Rd_VPR128.8H[64,32] = Rd_VPR128.8H[64,32] f- TMPQ1[64,32]; + Rd_VPR128.8H[96,32] = Rd_VPR128.8H[96,32] f- TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.127 FMLSL, FMLSL2 (by element) page C7-1678 line 93750 MATCH x0f804000/mask=xbfc0f400 +# CONSTRUCT x0f804000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:4 ARG3 $f* $float2float@2:8 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl/3@2 +# AUNIT --inst x0f804000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 2S when Q = 0 + +:fmlsl Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b0100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd +{ + TMPS1 = Rn_VPR64[0,32]; + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 + TMPS2[0,16] = TMPS1[0,16] f* tmp2; + TMPS2[16,16] = TMPS1[16,16] f* tmp2; + # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) + TMPD3[0,32] = float2float(TMPS2[0,16]); + TMPD3[32,32] = float2float(TMPS2[16,16]); + # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD3 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD3[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD3[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.127 FMLSL, FMLSL2 (by element) page C7-1678 line 93750 MATCH x0f804000/mask=xbfc0f400 +# CONSTRUCT x4f804000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:8 ARG3 $f* $float2float@2:16 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl/3@2 +# AUNIT --inst x4f804000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 4S when Q = 1 + +:fmlsl Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b0100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd +{ + TMPD1 = Rn_VPR128[0,64]; + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* tmp2; + TMPD2[16,16] = TMPD1[16,16] f* tmp2; + TMPD2[32,16] = TMPD1[32,16] f* tmp2; + TMPD2[48,16] = TMPD1[48,16] f* tmp2; + # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.127 FMLSL, FMLSL2 (by element) page C7-1678 line 93750 MATCH x2f80c000/mask=xbfc0f400 +# CONSTRUCT x2f80c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:4 ARG3 $f* $float2float@2:8 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl2/3@2 +# AUNIT --inst x2f80c000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 2S when Q = 0 + +:fmlsl2 Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=0 & b_2329=0b1011111 & b_22=0 & b_1215=0b1100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd +{ + TMPS1 = Rn_VPR64[32,32]; + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 + TMPS2[0,16] = TMPS1[0,16] f* tmp2; + TMPS2[16,16] = TMPS1[16,16] f* tmp2; + # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) + TMPD3[0,32] = float2float(TMPS2[0,16]); + TMPD3[32,32] = float2float(TMPS2[16,16]); + # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD3 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD3[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD3[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.127 FMLSL, FMLSL2 (by element) page C7-1678 line 93750 MATCH x2f80c000/mask=xbfc0f400 +# CONSTRUCT x6f80c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 ARG3 $f* $float2float@2:16 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl2/3@2 +# AUNIT --inst x6f80c000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 4S when Q = 1 + +:fmlsl2 Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=1 & b_2329=0b1011111 & b_22=0 & b_1215=0b1100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd +{ + TMPD1 = Rn_VPR128[64,64]; + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* tmp2; + TMPD2[16,16] = TMPD1[16,16] f* tmp2; + TMPD2[32,16] = TMPD1[32,16] f* tmp2; + TMPD2[48,16] = TMPD1[48,16] f* tmp2; + # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.128 FMLSL, FMLSL2 (vector) page C7-1680 line 93882 MATCH x0ea0ec00/mask=xbfe0fc00 +# CONSTRUCT x0ea0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:4 ARG3[0]:4 $f*@2 $float2float@2:8 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl/3@2 +# AUNIT --inst x0ea0ec00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 2S when Q = 0 + +:fmlsl Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR64^".2H" +is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR64 & Rm_VPR64 & Zd +{ + TMPS1 = Rn_VPR64[0,32]; + TMPS2 = Rm_VPR64[0,32]; + # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 + TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; + TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; + # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) + TMPD4[0,32] = float2float(TMPS3[0,16]); + TMPD4[32,32] = float2float(TMPS3[16,16]); + # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD4 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD4[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD4[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.128 FMLSL, FMLSL2 (vector) page C7-1680 line 93882 MATCH x0ea0ec00/mask=xbfe0fc00 +# CONSTRUCT x4ea0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[0]:8 ARG3[0]:8 $f*@2 $float2float@2:16 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl/3@2 +# AUNIT --inst x4ea0ec00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 4S when Q = 1 + +:fmlsl Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H +is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd +{ + TMPD1 = Rn_VPR128[0,64]; + TMPD2 = Rm_VPR64.4H[0,64]; + # simd infix TMPD3 = TMPD1 f* TMPD2 on lane size 2 + TMPD3[0,16] = TMPD1[0,16] f* TMPD2[0,16]; + TMPD3[16,16] = TMPD1[16,16] f* TMPD2[16,16]; + TMPD3[32,16] = TMPD1[32,16] f* TMPD2[32,16]; + TMPD3[48,16] = TMPD1[48,16] f* TMPD2[48,16]; + # simd resize TMPQ4 = float2float(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = float2float(TMPD3[0,16]); + TMPQ4[32,32] = float2float(TMPD3[16,16]); + TMPQ4[64,32] = float2float(TMPD3[32,16]); + TMPQ4[96,32] = float2float(TMPD3[48,16]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.128 FMLSL, FMLSL2 (vector) page C7-1680 line 93882 MATCH x2ea0cc00/mask=xbfe0fc00 +# CONSTRUCT x2ea0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:4 ARG3[1]:4 $f*@2 $float2float@2:8 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl2/3@2 +# AUNIT --inst x2ea0cc00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 2S when Q = 0 + +:fmlsl2 Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR128^".2H" +is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR128 & Rm_VPR128 & Zd +{ + TMPS1 = Rn_VPR64[32,32]; + TMPS2 = Rm_VPR128[32,32]; + # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 + TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; + TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; + # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) + TMPD4[0,32] = float2float(TMPS3[0,16]); + TMPD4[32,32] = float2float(TMPS3[16,16]); + # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD4 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD4[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD4[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.128 FMLSL, FMLSL2 (vector) page C7-1680 line 93882 MATCH x2ea0cc00/mask=xbfe0fc00 +# CONSTRUCT x6ea0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 ARG3 $f*@2 $float2float@2:16 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl2/3@2 +# AUNIT --inst x6ea0cc00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" +# SIMD 4S when Q = 1 + +:fmlsl2 Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd +{ + TMPD1 = Rn_VPR128[64,64]; + # simd infix TMPD2 = TMPD1 f* Rm_VPR64.4H on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* Rm_VPR64.4H[0,16]; + TMPD2[16,16] = TMPD1[16,16] f* Rm_VPR64.4H[16,16]; + TMPD2[32,16] = TMPD1[32,16] f* Rm_VPR64.4H[32,16]; + TMPD2[48,16] = TMPD1[48,16] f* Rm_VPR64.4H[48,16]; + # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.129 FMOV (vector, immediate) page C7-1682 line 94007 MATCH x0f00f400/mask=x9ff8fc00 +# C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# CONSTRUCT x6f00f400/mask=xfff8fc00 MATCHED 4 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1@8 +# AUNIT --inst x6f00f400/mask=xfff8fc00 --rand dfp --status nopcodeop + +:fmov Rd_VPR128.2D, Imm_neon_uimm8Shift +is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmode=0xf & b_1011=1 & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fmov(Imm_neon_uimm8Shift, 8:1); +} + +# C7.2.129 FMOV (vector, immediate) page C7-1682 line 94007 MATCH x0f00f400/mask=x9ff8fc00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 +# CONSTRUCT x0f00f400/mask=xfff8fc00 MATCHED 3 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2:4 =NEON_fmov/1@4 +# AUNIT --inst x0f00f400/mask=xfff8fc00 --rand dfp --status nopcodeop + +:fmov Rd_VPR64.2S, Imm_neon_uimm8Shift +is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmode=0xf & b_1011=1 & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fmov(Imm_neon_uimm8Shift:4, 4:1); +} + +# C7.2.129 FMOV (vector, immediate) page C7-1682 line 94007 MATCH x0f00f400/mask=x9ff8fc00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 +# CONSTRUCT x4f00f400/mask=xfff8fc00 MATCHED 3 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2:4 =NEON_fmov/1@4 +# AUNIT --inst x4f00f400/mask=xfff8fc00 --rand sfp --status nopcodeop + +:fmov Rd_VPR128.4S, Imm_neon_uimm8Shift +is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmode=0xf & b_1011=1 & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fmov(Imm_neon_uimm8Shift:4, 4:1); +} + +# C7.2.129 FMOV (vector, immediate) page C7-1682 line 94007 MATCH x0f00fc00/mask=xbff8fc00 +# C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 +# CONSTRUCT x0f00fc00/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 int2float:2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1@2 +# AUNIT --inst x0f00fc00/mask=xfff8fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 4H when Q = 0 + +:fmov Rd_VPR64.4H, Imm_neon_uimm8Shift +is b_31=0 & b_30=0 & b_1929=0b00111100000 & b_1015=0b111111 & Rd_VPR64.4H & Imm_neon_uimm8Shift & Zd +{ + local tmp1:2 = int2float(Imm_neon_uimm8Shift); + # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) + Rd_VPR64.4H[0,16] = tmp1; + Rd_VPR64.4H[16,16] = tmp1; + Rd_VPR64.4H[32,16] = tmp1; + Rd_VPR64.4H[48,16] = tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.129 FMOV (vector, immediate) page C7-1682 line 94007 MATCH x0f00fc00/mask=xbff8fc00 +# C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 +# CONSTRUCT x4f00fc00/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 int2float:2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1@2 +# AUNIT --inst x4f00fc00/mask=xfff8fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant SIMD 8H when Q = 1 + +:fmov Rd_VPR128.8H, Imm_neon_uimm8Shift +is b_31=0 & b_30=1 & b_1929=0b00111100000 & b_1015=0b111111 & Rd_VPR128.8H & Imm_neon_uimm8Shift & Zd +{ + local tmp1:2 = int2float(Imm_neon_uimm8Shift); + # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) + Rd_VPR128.8H[0,16] = tmp1; + Rd_VPR128.8H[16,16] = tmp1; + Rd_VPR128.8H[32,16] = tmp1; + Rd_VPR128.8H[48,16] = tmp1; + Rd_VPR128.8H[64,16] = tmp1; + Rd_VPR128.8H[80,16] = tmp1; + Rd_VPR128.8H[96,16] = tmp1; + Rd_VPR128.8H[112,16] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.130 FMOV (register) page C7-1684 line 94119 MATCH x1e204000/mask=xff3ffc00 +# CONSTRUCT x1ee04000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1ee04000/mask=xfffffc00 --rand hfp --status noqemu +# Half-precision variant when type == 11 arg1=Rd_FPR16 arg2=Rn_FPR16 + +:fmov Rd_FPR16, Rn_FPR16 +is b_2431=0b00011110 & b_2223=0b11 & b_1021=0b100000010000 & Rd_FPR16 & Rn_FPR16 & Rd_FPR64 & Zd +{ + Rd_FPR16 = Rn_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.130 FMOV (register) page C7-1684 line 94119 MATCH x1e204000/mask=xff3ffc00 +# CONSTRUCT x1e204000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1e204000/mask=xfffffc00 --rand sfp --status pass +# Single-precision variant when type == 00 arg1=Rd_FPR32 arg2=Rn_FPR32 + +:fmov Rd_FPR32, Rn_FPR32 +is b_2431=0b00011110 & b_2223=0b00 & b_1021=0b100000010000 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = Rn_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.130 FMOV (register) page C7-1684 line 94119 MATCH x1e204000/mask=xff3ffc00 +# CONSTRUCT x1e604000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1e604000/mask=xfffffc00 --rand dfp --status pass +# Double-precision variant when type == 01 arg1=Rd_FPR64 arg2=Rn_FPR64 + +:fmov Rd_FPR64, Rn_FPR64 +is b_2431=0b00011110 & b_2223=0b01 & b_1021=0b100000010000 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = Rn_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x1e660000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1e660000/mask=xfffffc00 --rand dfp --status noqemu --comment "nofpround" +# UNDOCUMENTED Double-precision to 32-bit variant when sf == 0 && type == 01 && rmode == 00 && opcode = 110 arg1=Rd_GPR32 arg2=Rn_FPR64 + +:fmov Rd_GPR32, Rn_FPR64 +is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 +{ + Rd_GPR32 = float2float(Rn_FPR64); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x9e260000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x9e260000/mask=xfffffc00 --rand sfp --status noqemu --comment "nofpround" +# UNDOCUMENTED Single-precision to 64-bit variant when sf == 1 && type == 00 && rmode == 00 && opcode = 110 arg1=Rd_GPR64 arg2=Rn_FPR32 + +:fmov Rd_GPR64, Rn_FPR32 +is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & Rn_FPR32 +{ + Rd_GPR64 = float2float(Rn_FPR32); +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x1e670000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1e670000/mask=xfffffc00 --rand dfp --status noqemu --comment "nofpround" +# UNDOCUMENTED 32-bit to Double-precision variant when sf == 0 && type == 01 && rmode == 00 && opcode = 111 arg1=Rd_FPR64 arg2=Rn_GPR32 + +:fmov Rd_FPR64, Rn_GPR32 +is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR64 & Rn_GPR32 & Zd +{ + Rd_FPR64 = float2float(Rn_GPR32); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x9e270000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x9e270000/mask=xfffffc00 --rand sfp --status noqemu --comment "nofpround" +# UNDOCUMENTED 64-bit to single-precision variant when sf == 1 && type == 00 && rmode == 00 && opcode = 111 arg1=Rd_FPR32 arg2=Rn_GPR64 + +:fmov Rd_FPR32, Rn_GPR64 +is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR32 & Rn_GPR64 & Zd +{ + Rd_FPR32 = float2float(Rn_GPR64); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x1ee60000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1ee60000/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision to 32-bit variant when sf == 0 && type == 11 && rmode == 00 && opcode == 110 arg1=Rd_GPR32 arg2=Rn_FPR16 + +:fmov Rd_GPR32, Rn_FPR16 +is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR32 & Rn_FPR16 & Rd_GPR64 +{ + Rd_GPR32 = float2float(Rn_FPR16); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x9ee60000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x9ee60000/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision to 64-bit variant when sf == 1 && type == 11 && rmode == 00 && opcode == 110 arg1=Rd_GPR64 arg2=Rn_FPR16 + +:fmov Rd_GPR64, Rn_FPR16 +is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & Rn_FPR16 +{ + Rd_GPR64 = float2float(Rn_FPR16); +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x1ee70000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1ee70000/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# 32-bit to half-precision variant when sf == 0 && type == 11 && rmode == 00 && opcode == 111 arg1=Rd_FPR16 arg2=Rn_GPR32 + +:fmov Rd_FPR16, Rn_GPR32 +is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR16 & Rn_GPR32 & Zd +{ + Rd_FPR16 = float2float(Rn_GPR32); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x1e270000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1e270000/mask=xfffffc00 --rand sfp --status pass --comment "nofpround" +# 32-bit to single-precision variant when sf == 0 && type == 00 && rmode == 00 && opcode == 111 arg1=Rd_FPR32 arg2=Rn_GPR32 + +:fmov Rd_FPR32, Rn_GPR32 +is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR32 & Rn_GPR32 & Zd +{ + Rd_FPR32 = Rn_GPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x1e260000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1e260000/mask=xfffffc00 --rand sfp --status pass --comment "nofpround" +# Single-precision to 32-bit variant when sf == 0 && type == 00 && rmode == 00 && opcode == 110 arg1=Rd_GPR32 arg2=Rn_FPR32 + +:fmov Rd_GPR32, Rn_FPR32 +is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR32 & Rn_FPR32 & Rd_GPR64 +{ + Rd_GPR32 = Rn_FPR32; + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x9ee70000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =float2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x9ee70000/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# 64-bit to half-precision variant when sf == 1 && type == 11 && rmode == 00 && opcode == 111 arg1=Rd_FPR16 arg2=Rn_GPR64 + +:fmov Rd_FPR16, Rn_GPR64 +is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR16 & Rn_GPR64 & Zd +{ + Rd_FPR16 = float2float(Rn_GPR64); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x9e670000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x9e670000/mask=xfffffc00 --rand dfp --status pass --comment "nofpround" +# 64-bit to double-precision variant when sf == 1 && type == 01 && rmode == 00 && opcode == 111 arg1=Rd_FPR64 arg2=Rn_GPR64 + +:fmov Rd_FPR64, Rn_GPR64 +is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR64 & Rn_GPR64 & Zd +{ + Rd_FPR64 = Rn_GPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x9eaf0000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 1:1 &=$copy@8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_fmov/2 +# AUNIT --inst x9eaf0000/mask=xfffffc00 --rand dfp --status pass --comment "nofpround" +# 64-bit to top half of 128-bit variant when sf == 1 && type == 10 && rmode == 01 && opcode == 111 arg1=vRd_VPR128^".D[1]" arg2=Rn_GPR64 + +:fmov vRd_VPR128^".D[1]", Rn_GPR64 +is b_31=1 & b_2430=0b0011110 & b_2223=0b10 & b_21=1 & b_1920=0b01 & b_1618=0b111 & b_1015=0b000000 & vRd_VPR128 & Rd_VPR128 & Rn_GPR64 & Zd +{ + # simd copy Rd_VPR128 element 1:1 = Rn_GPR64 (lane size 8) + Rd_VPR128[64,64] = Rn_GPR64; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x9e660000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x9e660000/mask=xfffffc00 --rand dfp --status pass --comment "nofpround" +# Double-precision to 64-bit variant when sf == 1 && type == 01 && rmode == 00 && opcode == 110 arg1=Rd_GPR64 arg2=Rn_FPR64 + +:fmov Rd_GPR64, Rn_FPR64 +is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & Rn_FPR64 +{ + Rd_GPR64 = Rn_FPR64; +} + +# C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 +# CONSTRUCT x9eae0000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 =ARG2[1]:8 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1@8 +# AUNIT --inst x9eae0000/mask=xfffffc00 --rand dfp --status pass --comment "nofpround" +# Top half of 128-bit to 64-bit variant when sf == 1 && type == 10 && rmode == 01 && opcode == 110 arg1=Rd_GPR64 arg2=vRd_VPR128^".D[1]" + +:fmov Rd_GPR64, vRn_VPR128^".D[1]" +is b_31=1 & b_2430=0b0011110 & b_2223=0b10 & b_21=1 & b_1920=0b01 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & vRn_VPR128 & Rn_VPR128 +{ + Rd_GPR64 = Rn_VPR128[64,64]; +} + +# C7.2.132 FMOV (scalar, immediate) page C7-1689 line 94427 MATCH x1e201000/mask=xff201fe0 +# CONSTRUCT x1e601001/mask=xffe01fe1 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1e601001/mask=xffe01fe1 --rand dfp --status pass + +:fmov Rd_FPR64, Imm8_fmov64_operand +is ImmS_ImmR_TestSet=1 & m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Imm8_fmov64_operand & b_1012=4 & imm5=0x0 & Rd_FPR64 & Zd +{ + Rd_FPR64 = Imm8_fmov64_operand:8; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.132 FMOV (scalar, immediate) page C7-1689 line 94427 MATCH x1e201000/mask=xff201fe0 +# CONSTRUCT x1e201000/mask=xffe01fe0 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1e201000/mask=xffe01fe0 --rand sfp --status pass + +:fmov Rd_FPR32, Imm8_fmov32_operand +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Imm8_fmov32_operand & b_1012=4 & imm5=0x0 & Rd_FPR32 & Zd +{ + Rd_FPR32 = Imm8_fmov32_operand:4; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.132 FMOV (scalar, immediate) page C7-1689 line 94427 MATCH x1e201000/mask=xff201fe0 +# CONSTRUCT x1ee01000/mask=xffe01fe0 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 +# AUNIT --inst x1ee01000/mask=xffe01fe0 --rand hfp --status noqemu + +:fmov Rd_FPR16, Imm8_fmov16_operand +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Imm8_fmov16_operand & b_1012=4 & imm5=0x0 & Rd_FPR16 & Zd +{ + Rd_FPR16 = Imm8_fmov16_operand:2; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.133 FMSUB page C7-1691 line 94515 MATCH x1f008000/mask=xff208000 +# CONSTRUCT x1f408000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmsub/3 +# AUNIT --inst x1f408000/mask=xffe08000 --rand dfp --status nopcodeop --comment "nofpround" + +:fmsub Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=0 & Rm_FPR64 & b_15=1 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_fmsub(Rn_FPR64, Rm_FPR64, Ra_FPR64); +} + +# C7.2.133 FMSUB page C7-1691 line 94515 MATCH x1f008000/mask=xff208000 +# CONSTRUCT x1f008000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmsub/3 +# AUNIT --inst x1f008000/mask=xffe08000 --rand sfp --status nopcodeop --comment "nofpround" + +:fmsub Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=0 & Rm_FPR32 & b_15=1 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fmsub(Rn_FPR32, Rm_FPR32, Ra_FPR32); +} + +# C7.2.133 FMSUB page C7-1691 line 94515 MATCH x1f008000/mask=xff208000 +# CONSTRUCT x1fc08000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmsub/3 +# AUNIT --inst x1fc08000/mask=xffe08000 --rand hfp --status noqemu --comment "nofpround" + +:fmsub Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=0 & Rm_FPR16 & b_15=1 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_fmsub(Rn_FPR16, Rm_FPR16, Ra_FPR16); +} + +# C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x5f009000/mask=xffc0f400 +# CONSTRUCT x5f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@2 +# AUNIT --inst x5f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# FMUL (by element) Scalar, half-precision + +:fmul Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM +is b_2231=0b0101111100 & b_1215=0b1001 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + Rd_FPR16 = Rn_FPR16 f* tmp1; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x5f809000/mask=xff80f400 +# CONSTRUCT x5f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@4 +# AUNIT --inst x5f809000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" +# FMUL (by element) Scalar, single-precision and double-precision sz=0 + +:fmul Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex +is b_2331=0b010111111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.S & vIndex & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + Rd_FPR32 = Rn_FPR32 f* tmp1; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x5f809000/mask=xff80f400 +# CONSTRUCT x5fc09000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@8 +# AUNIT --inst x5fc09000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" +# FMUL (by element) Scalar, single-precision and double-precision sz=1 + +:fmul Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex +is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.D & vIndex & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Zd +{ + # simd element Re_VPR128.D[vIndex] lane size 8 + local tmp1:8 = Re_VPR128.D.vIndex; + Rd_FPR64 = Rn_FPR64 f* tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x0f009000/mask=xbfc0f400 +# CONSTRUCT x0f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@2 +# AUNIT --inst x0f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# FMUL (by element) Vector, half-precision, Q=0 + +:fmul Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 &b_30=0 & b_2229=0b00111100 & b_1215=0b1001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* tmp1 on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* tmp1; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* tmp1; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* tmp1; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x0f009000/mask=xbfc0f400 +# CONSTRUCT x4f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@2 +# AUNIT --inst x4f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# FMUL (by element) Vector, half-precision, Q=1 + +:fmul Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 &b_30=1 & b_2229=0b00111100 & b_1215=0b1001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* tmp1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* tmp1; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* tmp1; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* tmp1; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* tmp1; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* tmp1; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* tmp1; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* tmp1; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x0f809000/mask=xbf80f400 +# CONSTRUCT x4fc09000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@8 +# AUNIT --inst x4fc09000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" +# Vector, single-precision and double-precision Q=1 and sz:L=10 + +:fmul Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex +is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd element Re_VPR128.D[vIndex] lane size 8 + local tmp1:8 = Re_VPR128.D.vIndex; + # simd infix Rd_VPR128.2D = Rn_VPR128.2D f* tmp1 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f* tmp1; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f* tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x0f809000/mask=xbf80f400 +# CONSTRUCT x0f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@4 +# AUNIT --inst x0f809000/mask=xffc0f400 --rand sfp --status fail --comment "nofpround" +# Vector, single-precision and double-precision Q=0 and sz:L=0x + +:fmul Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* tmp1 on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f* tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f* tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x0f809000/mask=xbf80f400 +# CONSTRUCT x4f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@4 +# AUNIT --inst x4f809000/mask=xffc0f400 --rand sfp --status fail --comment "nofpround" +# Vector, single-precision and double-precision Q=1 and sz:L=0x + +:fmul Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix Rd_VPR128.4S = Rn_VPR128.4S f* tmp1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f* tmp1; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f* tmp1; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f* tmp1; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f* tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.135 FMUL (vector) page C7-1697 line 94875 MATCH x2e20dc00/mask=xbfa0fc00 +# CONSTRUCT x6e60dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f*@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@8 +# AUNIT --inst x6e60dc00/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:fmul Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1b & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd infix Rd_VPR128.2D = Rn_VPR128.2D f* Rm_VPR128.2D on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f* Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f* Rm_VPR128.2D[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.135 FMUL (vector) page C7-1697 line 94875 MATCH x2e20dc00/mask=xbfa0fc00 +# CONSTRUCT x2e20dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f*@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@4 +# AUNIT --inst x2e20dc00/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" + +:fmul Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1b & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* Rm_VPR64.2S on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f* Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f* Rm_VPR64.2S[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.135 FMUL (vector) page C7-1697 line 94875 MATCH x2e20dc00/mask=xbfa0fc00 +# CONSTRUCT x6e20dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f*@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@4 +# AUNIT --inst x6e20dc00/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" + +:fmul Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1b & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix Rd_VPR128.4S = Rn_VPR128.4S f* Rm_VPR128.4S on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f* Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f* Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f* Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f* Rm_VPR128.4S[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.135 FMUL (vector) page C7-1697 line 94875 MATCH x2e401c00/mask=xbfe0fc00 +# CONSTRUCT x2e401c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@2 +# AUNIT --inst x2e401c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant when Q=0 suf=VPR64.4H + +:fmul Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* Rm_VPR64.4H[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.135 FMUL (vector) page C7-1697 line 94875 MATCH x2e401c00/mask=xbfe0fc00 +# CONSTRUCT x6e401c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@2 +# AUNIT --inst x6e401c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant when Q=1 suf=VPR128.8H + +:fmul Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* Rm_VPR128.8H[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.136 FMUL (scalar) page C7-1699 line 94990 MATCH x1e200800/mask=xff20fc00 +# CONSTRUCT x1e600800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2 +# AUNIT --inst x1e600800/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:fmul Rd_FPR64, Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x0 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = Rn_FPR64 f* Rm_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.136 FMUL (scalar) page C7-1699 line 94990 MATCH x1e200800/mask=xff20fc00 +# CONSTRUCT x1e200800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2 +# AUNIT --inst x1e200800/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" + +:fmul Rd_FPR32, Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x0 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = Rn_FPR32 f* Rm_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.136 FMUL (scalar) page C7-1699 line 94990 MATCH x1e200800/mask=xff20fc00 +# CONSTRUCT x1ee00800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2 +# AUNIT --inst x1ee00800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fmul Rd_FPR16, Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x0 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = Rn_FPR16 f* Rm_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x2f809000/mask=xbf80f400 +# CONSTRUCT x6fc09000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@8 +# AUNIT --inst x6fc09000/mask=xffe0f400 --rand dfp --status nopcodeop --comment "nofpround" + +:fmulx Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & b_1215=0x9 & b_1010=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); + Rd_VPR128.2D = NEON_fmulx(Rn_VPR128.2D, tmp1, 8:1); +} + +# C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x2f809000/mask=xbf80f400 +# CONSTRUCT x2f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@4 +# AUNIT --inst x2f809000/mask=xffc0f400 --rand sfp --status fail --comment "nofpround" + +:fmulx Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x9 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* tmp1 on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f* tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f* tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x2f809000/mask=xbf80f400 +# CONSTRUCT x6f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@4 +# AUNIT --inst x6f809000/mask=xffc0f400 --rand sfp --status nopcodeop --comment "nofpround" + +:fmulx Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x9 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + Rd_VPR128.4S = NEON_fmulx(Rn_VPR128.4S, tmp1, 4:1); +} + +# C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x7f009000/mask=xffc0f400 +# CONSTRUCT x7f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@2 +# AUNIT --inst x7f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# Scalar, half-precision variant + +:fmulx Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM +is b_2231=0b0111111100 & b_1215=0b1001 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + Rd_FPR16 = Rn_FPR16 f* tmp1; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x7f809000/mask=xff80f400 +# CONSTRUCT x7f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@4 +# AUNIT --inst x7f809000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" +# Scalar, single-precision and double-precision variant when sz=0 Ts=S V=32 + +:fmulx Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex +is b_2331=0b011111111 & b_22=0 & b_1215=0b1001 & b_10=0 & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + Rd_FPR32 = Rn_FPR32 f* tmp1; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x7f809000/mask=xff80f400 +# CONSTRUCT x7fc09000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@8 +# AUNIT --inst x7fc09000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" +# Scalar, single-precision and double-precision variant when sz=1 Ts=D V=64 + +:fmulx Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex +is b_2331=0b011111111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Re_VPR128.D & vIndex & Zd +{ + # simd element Re_VPR128.D[vIndex] lane size 8 + local tmp1:8 = Re_VPR128.D.vIndex; + Rd_FPR64 = Rn_FPR64 f* tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x2f009000/mask=xbfc0f400 +# CONSTRUCT x2f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@2 +# AUNIT --inst x2f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# Vector, half-precision variant when Q = 0 suf=64.4H + +:fmulx Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=0 & b_2229=0b10111100 & b_1215=0b1001 & b_10=0 & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* tmp1 on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* tmp1; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* tmp1; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* tmp1; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x2f009000/mask=xbfc0f400 +# CONSTRUCT x6f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@2 +# AUNIT --inst x6f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" +# Vector, half-precision variant when Q = 1 suf=128.8H + +:fmulx Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=1 & b_2229=0b10111100 & b_1215=0b1001 & b_10=0 & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* tmp1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* tmp1; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* tmp1; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* tmp1; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* tmp1; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* tmp1; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* tmp1; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* tmp1; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.138 FMULX page C7-1705 line 95331 MATCH x5e20dc00/mask=xffa0fc00 +# CONSTRUCT x5e60dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2 +# AUNIT --inst x5e60dc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmulx Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=3 & Rm_FPR64 & b_1115=0x1b & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_fmulx(Rn_FPR64, Rm_FPR64); +} + +# C7.2.138 FMULX page C7-1705 line 95331 MATCH x5e20dc00/mask=xffa0fc00 +# CONSTRUCT x5e20dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2 +# AUNIT --inst x5e20dc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmulx Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=1 & Rm_FPR32 & b_1115=0x1b & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fmulx(Rn_FPR32, Rm_FPR32); +} + +# C7.2.138 FMULX page C7-1705 line 95331 MATCH x0e20dc00/mask=xbfa0fc00 +# CONSTRUCT x4e60dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@8 +# AUNIT --inst x4e60dc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:fmulx Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1b & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fmulx(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.138 FMULX page C7-1705 line 95331 MATCH x0e20dc00/mask=xbfa0fc00 +# CONSTRUCT x0e20dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@4 +# AUNIT --inst x0e20dc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmulx Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1b & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fmulx(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.138 FMULX page C7-1705 line 95331 MATCH x0e20dc00/mask=xbfa0fc00 +# CONSTRUCT x4e20dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@4 +# AUNIT --inst x4e20dc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:fmulx Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1b & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fmulx(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.138 FMULX page C7-1705 line 95331 MATCH x5e401c00/mask=xffe0fc00 +# CONSTRUCT x5e401c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2 +# AUNIT --inst x5e401c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Scalar half precision variant + +:fmulx Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_2131=0b01011110010 & b_1015=0b000111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd +{ + Rd_FPR16 = Rn_FPR16 f* Rm_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.138 FMULX page C7-1705 line 95331 MATCH x0e401c00/mask=xbfe0fc00 +# CONSTRUCT x0e401c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@2 +# AUNIT --inst x0e401c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=0 suf=64.4H + +:fmulx Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* Rm_VPR64.4H[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.138 FMULX page C7-1705 line 95331 MATCH x0e401c00/mask=xbfe0fc00 +# CONSTRUCT x4e401c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@2 +# AUNIT --inst x4e401c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=1 suf=128.8H + +:fmulx Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* Rm_VPR128.8H[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.139 FNEG (vector) page C7-1708 line 95520 MATCH x2ea0f800/mask=xbfbffc00 +# CONSTRUCT x6ee0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$fneg@8 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1@8 +# AUNIT --inst x6ee0f800/mask=xfffffc00 --rand dfp --status pass + +:fneg Rd_VPR128.2D, Rn_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd unary Rd_VPR128.2D = f-(Rn_VPR128.2D) on lane size 8 + Rd_VPR128.2D[0,64] = f-(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = f-(Rn_VPR128.2D[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.139 FNEG (vector) page C7-1708 line 95520 MATCH x2ea0f800/mask=xbfbffc00 +# CONSTRUCT x2ea0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$fneg@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1@4 +# AUNIT --inst x2ea0f800/mask=xfffffc00 --rand sfp --status pass + +:fneg Rd_VPR64.2S, Rn_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd unary Rd_VPR64.2S = f-(Rn_VPR64.2S) on lane size 4 + Rd_VPR64.2S[0,32] = f-(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = f-(Rn_VPR64.2S[32,32]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.139 FNEG (vector) page C7-1708 line 95520 MATCH x2ea0f800/mask=xbfbffc00 +# CONSTRUCT x6ea0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$fneg@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1@4 +# AUNIT --inst x6ea0f800/mask=xfffffc00 --rand sfp --status pass + +:fneg Rd_VPR128.4S, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd unary Rd_VPR128.4S = f-(Rn_VPR128.4S) on lane size 4 + Rd_VPR128.4S[0,32] = f-(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = f-(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = f-(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = f-(Rn_VPR128.4S[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.139 FNEG (vector) page C7-1708 line 95520 MATCH x2ef8f800/mask=xbffffc00 +# CONSTRUCT x2ef8f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$fneg@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1@2 +# AUNIT --inst x2ef8f800/mask=xfffffc00 --rand hfp --status noqemu +# Half-precision variant when Q=0 suf=64.4H + +:fneg Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_1029=0b10111011111000111110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + # simd unary Rd_VPR64.4H = f-(Rn_VPR64.4H) on lane size 2 + Rd_VPR64.4H[0,16] = f-(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = f-(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = f-(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = f-(Rn_VPR64.4H[48,16]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.139 FNEG (vector) page C7-1708 line 95520 MATCH x2ef8f800/mask=xbffffc00 +# CONSTRUCT x6ef8f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$fneg@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1@2 +# AUNIT --inst x6ef8f800/mask=xfffffc00 --rand hfp --status noqemu +# Half-precision variant when Q=1 suf=128.8H + +:fneg Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_1029=0b10111011111000111110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + # simd unary Rd_VPR128.8H = f-(Rn_VPR128.8H) on lane size 2 + Rd_VPR128.8H[0,16] = f-(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = f-(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = f-(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = f-(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = f-(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = f-(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = f-(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = f-(Rn_VPR128.8H[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.140 FNEG (scalar) page C7-1710 line 95628 MATCH x1e214000/mask=xff3ffc00 +# CONSTRUCT x1e614000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =fneg +# SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1 +# AUNIT --inst x1e614000/mask=xfffffc00 --rand dfp --status pass + +:fneg Rd_FPR64, Rn_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x2 & b_1014=0x10 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = f- Rn_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.140 FNEG (scalar) page C7-1710 line 95628 MATCH x1e214000/mask=xff3ffc00 +# CONSTRUCT x1e214000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =fneg +# SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1 +# AUNIT --inst x1e214000/mask=xfffffc00 --rand sfp --status pass + +:fneg Rd_FPR32, Rn_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x2 & b_1014=0x10 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = f- Rn_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.140 FNEG (scalar) page C7-1710 line 95628 MATCH x1e214000/mask=xff3ffc00 +# CONSTRUCT x1ee14000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =fneg +# SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1 +# AUNIT --inst x1ee14000/mask=xfffffc00 --rand hfp --status noqemu + +:fneg Rd_FPR16, Rn_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x2 & b_1014=0x10 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = f- Rn_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.141 FNMADD page C7-1712 line 95720 MATCH x1f200000/mask=xff208000 +# CONSTRUCT x1f600000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 fneg =NEON_fnmadd/3 +# AUNIT --inst x1f600000/mask=xffe08000 --rand dfp --status nopcodeop --comment "nofpround" + +:fnmadd Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=1 & Rm_FPR64 & b_15=0 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd +{ + local tmp1:8 = f- Ra_FPR64; + Rd_FPR64 = NEON_fnmadd(Rn_FPR64, Rm_FPR64, tmp1); +} + +# C7.2.141 FNMADD page C7-1712 line 95720 MATCH x1f200000/mask=xff208000 +# CONSTRUCT x1f200000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 fneg =NEON_fnmadd/3 +# AUNIT --inst x1f200000/mask=xffe08000 --rand sfp --status nopcodeop --comment "nofpround" + +:fnmadd Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=1 & Rm_FPR32 & b_15=0 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd +{ + local tmp1:4 = f- Ra_FPR32; + Rd_FPR32 = NEON_fnmadd(Rn_FPR32, Rm_FPR32, tmp1); +} + +# C7.2.141 FNMADD page C7-1712 line 95720 MATCH x1f200000/mask=xff208000 +# CONSTRUCT x1fe00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 fneg =NEON_fnmadd/3 +# AUNIT --inst x1fe00000/mask=xffe08000 --rand hfp --status noqemu --comment "nofpround" + +:fnmadd Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=1 & Rm_FPR16 & b_15=0 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd +{ + local tmp1:2 = f- Ra_FPR16; + Rd_FPR16 = NEON_fnmadd(Rn_FPR16, Rm_FPR16, tmp1); +} + +# C7.2.142 FNMSUB page C7-1714 line 95845 MATCH x1f208000/mask=xff208000 +# CONSTRUCT x1f608000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fnmsub/3 +# AUNIT --inst x1f608000/mask=xffe08000 --rand dfp --status nopcodeop --comment "nofpround" + +:fnmsub Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=1 & Rm_FPR64 & b_15=1 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_fnmsub(Rn_FPR64, Rm_FPR64, Ra_FPR64); +} + +# C7.2.142 FNMSUB page C7-1714 line 95845 MATCH x1f208000/mask=xff208000 +# CONSTRUCT x1f208000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fnmsub/3 +# AUNIT --inst x1f208000/mask=xffe08000 --rand sfp --status nopcodeop --comment "nofpround" + +:fnmsub Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=1 & Rm_FPR32 & b_15=1 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_fnmsub(Rn_FPR32, Rm_FPR32, Ra_FPR32); +} + +# C7.2.142 FNMSUB page C7-1714 line 95845 MATCH x1f208000/mask=xff208000 +# CONSTRUCT x1fe08000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fnmsub/3 +# AUNIT --inst x1fe08000/mask=xffe08000 --rand hfp --status noqemu --comment "nofpround" + +:fnmsub Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=1 & Rm_FPR16 & b_15=1 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_fnmsub(Rn_FPR16, Rm_FPR16, Ra_FPR16); +} + +# C7.2.143 FNMUL (scalar) page C7-1716 line 95969 MATCH x1e208800/mask=xff20fc00 +# CONSTRUCT x1e608800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* =fneg +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fnmul/2 +# AUNIT --inst x1e608800/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:fnmul Rd_FPR64, Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x8 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + local tmp1:8 = Rn_FPR64 f* Rm_FPR64; + Rd_FPR64 = f- tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.143 FNMUL (scalar) page C7-1716 line 95969 MATCH x1e208800/mask=xff20fc00 +# CONSTRUCT x1e208800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* =fneg +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fnmul/2 +# AUNIT --inst x1e208800/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" + +:fnmul Rd_FPR32, Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x8 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + local tmp1:4 = Rn_FPR32 f* Rm_FPR32; + Rd_FPR32 = f- tmp1; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.143 FNMUL (scalar) page C7-1716 line 95969 MATCH x1e208800/mask=xff20fc00 +# CONSTRUCT x1ee08800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 f* =fneg +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fnmul/2 +# AUNIT --inst x1ee08800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fnmul Rd_FPR16, Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x8 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd +{ + local tmp1:2 = Rn_FPR16 f* Rm_FPR16; + Rd_FPR16 = f- tmp1; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.144 FRECPE page C7-1718 line 96074 MATCH x0ea1d800/mask=xbfbffc00 +# CONSTRUCT x4ee1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1@8 +# AUNIT --inst x4ee1d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" + +:frecpe Rd_VPR128.2D, Rn_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_frecpe(Rn_VPR128.2D, 8:1); +} + +# C7.2.144 FRECPE page C7-1718 line 96074 MATCH x0ea1d800/mask=xbfbffc00 +# CONSTRUCT x0ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1@4 +# AUNIT --inst x0ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:frecpe Rd_VPR64.2S, Rn_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_frecpe(Rn_VPR64.2S, 4:1); +} + +# C7.2.144 FRECPE page C7-1718 line 96074 MATCH x0ea1d800/mask=xbfbffc00 +# CONSTRUCT x4ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1@4 +# AUNIT --inst x4ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:frecpe Rd_VPR128.4S, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_frecpe(Rn_VPR128.4S, 4:1); +} + +# C7.2.144 FRECPE page C7-1718 line 96074 MATCH x5ea1d800/mask=xffbffc00 +# CONSTRUCT x5ee1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1 +# AUNIT --inst x5ee1d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" + +:frecpe Rd_FPR64, Rn_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & size_high=1 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_frecpe(Rn_FPR64); +} + +# C7.2.144 FRECPE page C7-1718 line 96074 MATCH x5ea1d800/mask=xffbffc00 +# CONSTRUCT x5ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1 +# AUNIT --inst x5ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" + +:frecpe Rd_FPR32, Rn_FPR32 +is b_3031=1 & u=0 & b_2428=0x1e & size_high=1 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_frecpe(Rn_FPR32); +} + +# C7.2.144 FRECPE page C7-1718 line 96074 MATCH x5ef9d800/mask=xfffffc00 +# CONSTRUCT x5ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1 +# AUNIT --inst x5ef9d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Scalar half precision variant + +:frecpe Rd_FPR16, Rn_FPR16 +is b_1031=0b0101111011111001110110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = NEON_frecpe(Rn_FPR16); +} + +# C7.2.144 FRECPE page C7-1718 line 96074 MATCH x0ef9d800/mask=xbffffc00 +# CONSTRUCT x0ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1@2 +# AUNIT --inst x0ef9d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=0 suf=64.4H + +:frecpe Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_1029=0b00111011111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_frecpe(Rn_VPR64.4H, 2:1); +} + +# C7.2.144 FRECPE page C7-1718 line 96074 MATCH x0ef9d800/mask=xbffffc00 +# CONSTRUCT x4ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1@2 +# AUNIT --inst x4ef9d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=1 suf=128.8H + +:frecpe Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_1029=0b00111011111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_frecpe(Rn_VPR128.8H, 2:1); +} + +# C7.2.145 FRECPS page C7-1721 line 96253 MATCH x5e20fc00/mask=xffa0fc00 +# CONSTRUCT x5e60fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2 +# AUNIT --inst x5e60fc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:frecps Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=3 & Rm_FPR64 & b_1115=0x1f & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_frecps(Rn_FPR64, Rm_FPR64); +} + +# C7.2.145 FRECPS page C7-1721 line 96253 MATCH x5e20fc00/mask=xffa0fc00 +# CONSTRUCT x5e20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2 +# AUNIT --inst x5e20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:frecps Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=1 & Rm_FPR32 & b_1115=0x1f & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_frecps(Rn_FPR32, Rm_FPR32); +} + +# C7.2.145 FRECPS page C7-1721 line 96253 MATCH x0e20fc00/mask=xbfa0fc00 +# CONSTRUCT x4e60fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2@8 +# AUNIT --inst x4e60fc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" + +:frecps Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1f & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_frecps(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.145 FRECPS page C7-1721 line 96253 MATCH x0e20fc00/mask=xbfa0fc00 +# CONSTRUCT x0e20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2@4 +# AUNIT --inst x0e20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:frecps Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1f & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_frecps(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.145 FRECPS page C7-1721 line 96253 MATCH x0e20fc00/mask=xbfa0fc00 +# CONSTRUCT x4e20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2@4 +# AUNIT --inst x4e20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" + +:frecps Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1f & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_frecps(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.145 FRECPS page C7-1721 line 96253 MATCH x5e403c00/mask=xffe0fc00 +# CONSTRUCT x5e403c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2 +# AUNIT --inst x5e403c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Scalar half precision variant + +:frecps Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_2131=0b01011110010 & b_1015=0b001111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd +{ + Rd_FPR16 = NEON_frecps(Rn_FPR16, Rm_FPR16); +} + +# C7.2.145 FRECPS page C7-1721 line 96253 MATCH x0e403c00/mask=xbfe0fc00 +# CONSTRUCT x0e403c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2@2 +# AUNIT --inst x0e403c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=0 suf=64.4H + +:frecps Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_frecps(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.145 FRECPS page C7-1721 line 96253 MATCH x0e403c00/mask=xbfe0fc00 +# CONSTRUCT x4e403c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2@2 +# AUNIT --inst x4e403c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=1 suf=128.8H + +:frecps Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_frecps(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.146 FRECPX page C7-1724 line 96442 MATCH x5ef9f800/mask=xfffffc00 +# CONSTRUCT x5ef9f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpx/1 +# AUNIT --inst x5ef9f800/mask=xfffffc00 --rand hfp --status noqemu +# Half-precision variant + +:frecpx Rd_FPR16, Rn_FPR16 +is b_1031=0b0101111011111001111110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = NEON_frecpx(Rn_FPR16); +} + +# C7.2.146 FRECPX page C7-1724 line 96442 MATCH x5ea1f800/mask=xffbffc00 +# CONSTRUCT x5ea1f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpx/1 +# AUNIT --inst x5ea1f800/mask=xfffffc00 --rand sfp --status nopcodeop +# Single-precision and double-precision variant when sz=0 suf=32 + +:frecpx Rd_FPR32, Rn_FPR32 +is b_2331=0b010111101 & b_22=0 & b_1021=0b100001111110 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = NEON_frecpx(Rn_FPR32); +} + +# C7.2.146 FRECPX page C7-1724 line 96442 MATCH x5ea1f800/mask=xffbffc00 +# CONSTRUCT x5ee1f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frecpx/1 +# AUNIT --inst x5ee1f800/mask=xfffffc00 --rand dfp --status nopcodeop +# Single-precision and double-precision variant when sz=1 suf=64 + +:frecpx Rd_FPR64, Rn_FPR64 +is b_2331=0b010111101 & b_22=1 & b_1021=0b100001111110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_frecpx(Rn_FPR64); +} + +# C7.2.140 FRINTA (vector) page C7-1313 line 76386 KEEPWITH + +frint_vmode: "a" is b_29=1 & b_23=0 & b_12=0 { } +frint_vmode: "i" is b_29=1 & b_23=1 & b_12=1 { } +frint_vmode: "m" is b_29=0 & b_23=0 & b_12=1 { } +frint_vmode: "n" is b_29=0 & b_23=0 & b_12=0 { } +frint_vmode: "p" is b_29=0 & b_23=1 & b_12=0 { } +frint_vmode: "x" is b_29=1 & b_23=0 & b_12=1 { } +frint_vmode: "z" is b_29=0 & b_23=1 & b_12=1 { } + +# C7.2.155 FRINTA (vector) page C7-1742 line 97273 MATCH x2e798800/mask=xbffffc00 +# C7.2.157 FRINTI (vector) page C7-1746 line 97503 MATCH x2ef99800/mask=xbffffc00 +# C7.2.159 FRINTM (vector) page C7-1750 line 97735 MATCH x0e799800/mask=xbffffc00 +# C7.2.161 FRINTN (vector) page C7-1754 line 97967 MATCH x0e798800/mask=xbffffc00 +# C7.2.163 FRINTP (vector) page C7-1758 line 98199 MATCH x0ef98800/mask=xbffffc00 +# C7.2.165 FRINTX (vector) page C7-1762 line 98431 MATCH x2e799800/mask=xbffffc00 +# C7.2.167 FRINTZ (vector) page C7-1766 line 98666 MATCH x0ef99800/mask=xbffffc00 +# CONSTRUCT x0e798800/mask=xdf7fec00 MATCHED 7 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1@2 +# AUNIT --inst x0e798800/mask=xdf7fec00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant when Q=0 suf=64.4H + +:frint^frint_vmode Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_29 & b_2428=0b01110 & b_23 & b_1322=0b1111001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + # simd unary Rd_VPR64.4H = trunc(Rn_VPR64.4H) on lane size 2 + Rd_VPR64.4H[0,16] = trunc(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = trunc(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = trunc(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = trunc(Rn_VPR64.4H[48,16]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.155 FRINTA (vector) page C7-1742 line 97273 MATCH x2e798800/mask=xbffffc00 +# C7.2.157 FRINTI (vector) page C7-1746 line 97503 MATCH x2ef99800/mask=xbffffc00 +# C7.2.159 FRINTM (vector) page C7-1750 line 97735 MATCH x0e799800/mask=xbffffc00 +# C7.2.161 FRINTN (vector) page C7-1754 line 97967 MATCH x0e798800/mask=xbffffc00 +# C7.2.163 FRINTP (vector) page C7-1758 line 98199 MATCH x0ef98800/mask=xbffffc00 +# C7.2.165 FRINTX (vector) page C7-1762 line 98431 MATCH x2e799800/mask=xbffffc00 +# C7.2.167 FRINTZ (vector) page C7-1766 line 98666 MATCH x0ef99800/mask=xbffffc00 +# CONSTRUCT x4e798800/mask=xdf7fec00 MATCHED 7 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1@2 +# AUNIT --inst x4e798800/mask=xdf7fec00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant when Q=1 suf=128.8H + +:frint^frint_vmode Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_1322=0b1111001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + # simd unary Rd_VPR128.8H = trunc(Rn_VPR128.8H) on lane size 2 + Rd_VPR128.8H[0,16] = trunc(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = trunc(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = trunc(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = trunc(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = trunc(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = trunc(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = trunc(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = trunc(Rn_VPR128.8H[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.155 FRINTA (vector) page C7-1742 line 97273 MATCH x2e218800/mask=xbfbffc00 +# C7.2.157 FRINTI (vector) page C7-1746 line 97503 MATCH x2ea19800/mask=xbfbffc00 +# C7.2.159 FRINTM (vector) page C7-1750 line 97735 MATCH x0e219800/mask=xbfbffc00 +# C7.2.161 FRINTN (vector) page C7-1754 line 97967 MATCH x0e218800/mask=xbfbffc00 +# C7.2.163 FRINTP (vector) page C7-1758 line 98199 MATCH x0ea18800/mask=xbfbffc00 +# C7.2.165 FRINTX (vector) page C7-1762 line 98431 MATCH x2e219800/mask=xbfbffc00 +# C7.2.167 FRINTZ (vector) page C7-1766 line 98666 MATCH x0ea19800/mask=xbfbffc00 +# CONSTRUCT x0e218800/mask=xdf7fec00 MATCHED 7 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1@4 +# AUNIT --inst x0e218800/mask=xdf7fec00 --rand sfp --status fail --comment "nofpround" +# Single-precision and double-precision variant when sz=0 Q=0 suf=64.2S + +:frint^frint_vmode Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_29 & b_2428=0b01110 & b_23 & b_22=0b0 & b_1321=0b100001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + # simd unary Rd_VPR64.2S = trunc(Rn_VPR64.2S) on lane size 4 + Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.155 FRINTA (vector) page C7-1742 line 97273 MATCH x2e218800/mask=xbfbffc00 +# C7.2.157 FRINTI (vector) page C7-1746 line 97503 MATCH x2ea19800/mask=xbfbffc00 +# C7.2.159 FRINTM (vector) page C7-1750 line 97735 MATCH x0e219800/mask=xbfbffc00 +# C7.2.161 FRINTN (vector) page C7-1754 line 97967 MATCH x0e218800/mask=xbfbffc00 +# C7.2.163 FRINTP (vector) page C7-1758 line 98199 MATCH x0ea18800/mask=xbfbffc00 +# C7.2.165 FRINTX (vector) page C7-1762 line 98431 MATCH x2e219800/mask=xbfbffc00 +# C7.2.167 FRINTZ (vector) page C7-1766 line 98666 MATCH x0ea19800/mask=xbfbffc00 +# CONSTRUCT x4e218800/mask=xdf7fec00 MATCHED 7 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1@4 +# AUNIT --inst x4e218800/mask=xdf7fec00 --rand sfp --status fail --comment "nofpround" +# Single-precision and double-precision variant when sz=0 Q=1 suf=128.4S + +:frint^frint_vmode Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_22=0b0 & b_1321=0b100001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + # simd unary Rd_VPR128.4S = trunc(Rn_VPR128.4S) on lane size 4 + Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.155 FRINTA (vector) page C7-1742 line 97273 MATCH x2e218800/mask=xbfbffc00 +# C7.2.157 FRINTI (vector) page C7-1746 line 97503 MATCH x2ea19800/mask=xbfbffc00 +# C7.2.159 FRINTM (vector) page C7-1750 line 97735 MATCH x0e219800/mask=xbfbffc00 +# C7.2.161 FRINTN (vector) page C7-1754 line 97967 MATCH x0e218800/mask=xbfbffc00 +# C7.2.163 FRINTP (vector) page C7-1758 line 98199 MATCH x0ea18800/mask=xbfbffc00 +# C7.2.165 FRINTX (vector) page C7-1762 line 98431 MATCH x2e219800/mask=xbfbffc00 +# C7.2.167 FRINTZ (vector) page C7-1766 line 98666 MATCH x0ea19800/mask=xbfbffc00 +# CONSTRUCT x4e618800/mask=xdf7fec00 MATCHED 7 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$trunc@8 +# SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1@8 +# AUNIT --inst x4e618800/mask=xdf7fec00 --rand dfp --status fail --comment "nofpround" +# Single-precision and double-precision variant when sz=1 Q=1 suf=128.2D + +:frint^frint_vmode Rd_VPR128.2D, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_22=0b1 & b_1321=0b100001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + # simd unary Rd_VPR128.2D = trunc(Rn_VPR128.2D) on lane size 8 + Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.141 FRINTA (scalar) page C7-1315 line 76515 KEEPWITH +# FP rounding instruction (not implemented) + +frint_smode: "a" is b_1517=0b100 { } +frint_smode: "i" is b_1517=0b111 { } +frint_smode: "m" is b_1517=0b010 { } +frint_smode: "n" is b_1517=0b000 { } +frint_smode: "p" is b_1517=0b001 { } +frint_smode: "x" is b_1517=0b110 { } +frint_smode: "z" is b_1517=0b011 { } + +# C7.2.156 FRINTA (scalar) page C7-1744 line 97402 MATCH x1e264000/mask=xff3ffc00 +# C7.2.158 FRINTI (scalar) page C7-1748 line 97632 MATCH x1e27c000/mask=xff3ffc00 +# C7.2.160 FRINTM (scalar) page C7-1752 line 97864 MATCH x1e254000/mask=xff3ffc00 +# C7.2.162 FRINTN (scalar) page C7-1756 line 98096 MATCH x1e244000/mask=xff3ffc00 +# C7.2.164 FRINTP (scalar) page C7-1760 line 98328 MATCH x1e24c000/mask=xff3ffc00 +# C7.2.166 FRINTX (scalar) page C7-1764 line 98561 MATCH x1e274000/mask=xff3ffc00 +# C7.2.168 FRINTZ (scalar) page C7-1768 line 98795 MATCH x1e25c000/mask=xff3ffc00 +# CONSTRUCT x1ee44000/mask=xfffc7c00 MATCHED 7 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1 +# AUNIT --inst x1ee44000/mask=xfffc7c00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant when type = 11 suf=16 + +:frint^frint_smode Rd_FPR16, Rn_FPR16 +is b_2431=0b00011110 & b_2223=0b11 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & frint_smode & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = trunc(Rn_FPR16); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.156 FRINTA (scalar) page C7-1744 line 97402 MATCH x1e264000/mask=xff3ffc00 +# C7.2.158 FRINTI (scalar) page C7-1748 line 97632 MATCH x1e27c000/mask=xff3ffc00 +# C7.2.160 FRINTM (scalar) page C7-1752 line 97864 MATCH x1e254000/mask=xff3ffc00 +# C7.2.162 FRINTN (scalar) page C7-1756 line 98096 MATCH x1e244000/mask=xff3ffc00 +# C7.2.164 FRINTP (scalar) page C7-1760 line 98328 MATCH x1e24c000/mask=xff3ffc00 +# C7.2.166 FRINTX (scalar) page C7-1764 line 98561 MATCH x1e274000/mask=xff3ffc00 +# C7.2.168 FRINTZ (scalar) page C7-1768 line 98795 MATCH x1e25c000/mask=xff3ffc00 +# CONSTRUCT x1e244000/mask=xfffc7c00 MATCHED 7 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1 +# AUNIT --inst x1e244000/mask=xfffc7c00 --rand sfp --status fail --comment "nofpround" +# Single-precision variant when type = 00 suf=32 + +:frint^frint_smode Rd_FPR32, Rn_FPR32 +is b_2431=0b00011110 & b_2223=0b00 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & frint_smode & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = trunc(Rn_FPR32); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.156 FRINTA (scalar) page C7-1744 line 97402 MATCH x1e264000/mask=xff3ffc00 +# C7.2.158 FRINTI (scalar) page C7-1748 line 97632 MATCH x1e27c000/mask=xff3ffc00 +# C7.2.160 FRINTM (scalar) page C7-1752 line 97864 MATCH x1e254000/mask=xff3ffc00 +# C7.2.162 FRINTN (scalar) page C7-1756 line 98096 MATCH x1e244000/mask=xff3ffc00 +# C7.2.164 FRINTP (scalar) page C7-1760 line 98328 MATCH x1e24c000/mask=xff3ffc00 +# C7.2.166 FRINTX (scalar) page C7-1764 line 98561 MATCH x1e274000/mask=xff3ffc00 +# C7.2.168 FRINTZ (scalar) page C7-1768 line 98795 MATCH x1e25c000/mask=xff3ffc00 +# CONSTRUCT x1e644000/mask=xfffc7c00 MATCHED 7 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =trunc +# SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1 +# AUNIT --inst x1e644000/mask=xfffc7c00 --rand dfp --status fail --comment "nofpround" +# Double-precision variant when type = 01 suf=64 + +:frint^frint_smode Rd_FPR64, Rn_FPR64 +is b_2431=0b00011110 & b_2223=0b01 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & frint_smode & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = trunc(Rn_FPR64); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x7ef9d800/mask=xfffffc00 +# CONSTRUCT x7ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1 +# AUNIT --inst x7ef9d800/mask=xfffffc00 --status noqemu --comment "nofpround" +# Scalar half precision variant when Q=1 sz=1 ba=11 bb=111 V=FPR16 esize= + +:frsqrte Rd_FPR16, Rn_FPR16 +is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=1 & b_1021=0b111001110110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = NEON_frsqrte(Rn_FPR16); +} + +# C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x7ea1d800/mask=xffbffc00 +# CONSTRUCT x7ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1 +# AUNIT --inst x7ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" +# Scalar single-precision and double-precision variant when Q=1 sz=0 ba=11 bb=100 V=FPR32 esize= + +:frsqrte Rd_FPR32, Rn_FPR32 +is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=0 & b_1021=0b100001110110 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = NEON_frsqrte(Rn_FPR32); +} + +# C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x7ea1d800/mask=xffbffc00 +# CONSTRUCT x7ee1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1 +# AUNIT --inst x7ee1d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" +# Scalar single-precision and double-precision variant when Q=1 sz=1 ba=11 bb=100 V=FPR64 esize= + +:frsqrte Rd_FPR64, Rn_FPR64 +is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=1 & b_1021=0b100001110110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_frsqrte(Rn_FPR64); +} + +# C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x2ef9d800/mask=xbffffc00 +# CONSTRUCT x2ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1@2 +# AUNIT --inst x2ef9d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=0 sz=1 ba=10 bb=111 V=VPR64.4H esize=@2 + +:frsqrte Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_frsqrte(Rn_VPR64.4H, 2:1); +} + +# C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x2ef9d800/mask=xbffffc00 +# CONSTRUCT x6ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1@2 +# AUNIT --inst x6ef9d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=1 sz=1 ba=10 bb=111 V=VPR128.8H esize=@2 + +:frsqrte Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_frsqrte(Rn_VPR128.8H, 2:1); +} + +# C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x2ea1d800/mask=xbfbffc00 +# CONSTRUCT x2ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1@4 +# AUNIT --inst x2ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" +# Vector single-precision and double-precision variant when Q=0 sz=0 ba=10 bb=100 V=VPR64.2S esize=@4 + +:frsqrte Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_frsqrte(Rn_VPR64.2S, 4:1); +} + +# C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x2ea1d800/mask=xbfbffc00 +# CONSTRUCT x6ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1@4 +# AUNIT --inst x6ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" +# Vector single-precision and double-precision variant when Q=1 sz=0 ba=10 bb=100 V=VPR128.4S esize=@4 + +:frsqrte Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_frsqrte(Rn_VPR128.4S, 4:1); +} + +# C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x2ea1d800/mask=xbfbffc00 +# CONSTRUCT x6ee1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1@8 +# AUNIT --inst x6ee1d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" +# Vector single-precision and double-precision variant when Q=1 sz=1 ba=10 bb=100 V=VPR128.2D esize=@8 + +:frsqrte Rd_VPR128.2D, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b100001110110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_frsqrte(Rn_VPR128.2D, 8:1); +} + +# C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x5ec03c00/mask=xffe0fc00 +# CONSTRUCT x5ec03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrts/1 +# AUNIT --inst x5ec03c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Scalar half precision variant when Q=1 sz=1 ba=01 bb=0 bc=00 V=FPR16 esize= + +:frsqrts Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd +{ + Rd_FPR16 = NEON_frsqrts(Rn_FPR16); +} + +# C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x5ea0fc00/mask=xffa0fc00 +# CONSTRUCT x5ea0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2 +# AUNIT --inst x5ea0fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" +# Scalar single-precision and double-precision variant when Q=1 sz=0 ba=01 bb=1 bc=11 V=FPR32 esize= + +:frsqrts Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd +{ + Rd_FPR32 = NEON_frsqrts(Rn_FPR32, Rm_FPR32); +} + +# C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x5ea0fc00/mask=xffa0fc00 +# CONSTRUCT x5ee0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2 +# AUNIT --inst x5ee0fc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" +# Scalar single-precision and double-precision variant when Q=1 sz=1 ba=01 bb=1 bc=11 V=FPR64 esize= + +:frsqrts Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=1 & b_21=1 & b_1015=0b111111 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd +{ + Rd_FPR64 = NEON_frsqrts(Rn_FPR64, Rm_FPR64); +} + +# C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x0ec03c00/mask=xbfe0fc00 +# CONSTRUCT x0ec03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2@2 +# AUNIT --inst x0ec03c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=0 sz=1 ba=00 bb=0 bc=00 V=VPR64.4H esize=@2 + +:frsqrts Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_frsqrts(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x0ec03c00/mask=xbfe0fc00 +# CONSTRUCT x4ec03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2@2 +# AUNIT --inst x4ec03c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Vector half precision variant when Q=1 sz=1 ba=00 bb=0 bc=00 V=VPR128.8H esize=@2 + +:frsqrts Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_frsqrts(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x0ea0fc00/mask=xbfa0fc00 +# CONSTRUCT x0ea0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2@4 +# AUNIT --inst x0ea0fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" +# Vector single-precision and double-precision variant when Q=0 sz=0 ba=00 bb=1 bc=11 V=VPR64.2S esize=@4 + +:frsqrts Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_frsqrts(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x0ea0fc00/mask=xbfa0fc00 +# CONSTRUCT x4ea0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2@4 +# AUNIT --inst x4ea0fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" +# Vector single-precision and double-precision variant when Q=1 sz=0 ba=00 bb=1 bc=11 V=VPR128.4S esize=@4 + +:frsqrts Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_frsqrts(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x0ea0fc00/mask=xbfa0fc00 +# CONSTRUCT x4ee0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2@8 +# AUNIT --inst x4ee0fc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" +# Vector single-precision and double-precision variant when Q=1 sz=1 ba=00 bb=1 bc=11 V=VPR128.2D esize=@8 + +:frsqrts Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=1 & b_21=1 & b_1015=0b111111 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_frsqrts(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.171 FSQRT (vector) page C7-1776 line 99266 MATCH x2ef9f800/mask=xbffffc00 +# CONSTRUCT x2ef9f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1@2 +# AUNIT --inst x2ef9f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant when Q=0 sz=1 ba=111 esize=2 suf=VPR64.4H + +:fsqrt Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001111110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_fsqrt(Rn_VPR64.4H, 2:1); +} + +# C7.2.171 FSQRT (vector) page C7-1776 line 99266 MATCH x2ef9f800/mask=xbffffc00 +# CONSTRUCT x6ef9f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1@2 +# AUNIT --inst x6ef9f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant when Q=1 sz=1 ba=111 esize=2 suf=VPR128.8H + +:fsqrt Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001111110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_fsqrt(Rn_VPR128.8H, 2:1); +} + +# C7.2.171 FSQRT (vector) page C7-1776 line 99266 MATCH x2ea1f800/mask=xbfbffc00 +# CONSTRUCT x2ea1f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1@4 +# AUNIT --inst x2ea1f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" +# Single-precision and double-precision variant when Q=0 sz=0 ba=100 esize=4 suf=VPR64.2S + +:fsqrt Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001111110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_fsqrt(Rn_VPR64.2S, 4:1); +} + +# C7.2.171 FSQRT (vector) page C7-1776 line 99266 MATCH x2ea1f800/mask=xbfbffc00 +# CONSTRUCT x6ea1f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1@4 +# AUNIT --inst x6ea1f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" +# Single-precision and double-precision variant when Q=1 sz=0 ba=100 esize=4 suf=VPR128.4S + +:fsqrt Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001111110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_fsqrt(Rn_VPR128.4S, 4:1); +} + +# C7.2.171 FSQRT (vector) page C7-1776 line 99266 MATCH x2ea1f800/mask=xbfbffc00 +# CONSTRUCT x6ee1f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1@8 +# AUNIT --inst x6ee1f800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" +# Single-precision and double-precision variant when Q=1 sz=1 ba=100 esize=8 suf=VPR128.2D + +:fsqrt Rd_VPR128.2D, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b100001111110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_fsqrt(Rn_VPR128.2D, 8:1); +} + +# C7.2.172 FSQRT (scalar) page C7-1778 line 99375 MATCH x1e21c000/mask=xff3ffc00 +# CONSTRUCT x1ee1c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =sqrt/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1 +# AUNIT --inst x1ee1c000/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant + +:fsqrt Rd_FPR16, Rn_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x3 & b_1014=0x10 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = sqrt(Rn_FPR16); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.172 FSQRT (scalar) page C7-1778 line 99375 MATCH x1e21c000/mask=xff3ffc00 +# CONSTRUCT x1e21c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =sqrt/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1 +# AUNIT --inst x1e21c000/mask=xfffffc00 --rand sfp --status fail --comment "nofpround" +# Single-precision variant + +:fsqrt Rd_FPR32, Rn_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x3 & b_1014=0x10 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = sqrt(Rn_FPR32); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.172 FSQRT (scalar) page C7-1778 line 99375 MATCH x1e21c000/mask=xff3ffc00 +# CONSTRUCT x1e61c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =sqrt/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1 +# AUNIT --inst x1e61c000/mask=xfffffc00 --rand dfp --status fail --comment "nofpround" +# Double-precision variant + +:fsqrt Rd_FPR64, Rn_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x3 & b_1014=0x10 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = sqrt(Rn_FPR64); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.173 FSUB (vector) page C7-1780 line 99472 MATCH x0ea0d400/mask=xbfa0fc00 +# CONSTRUCT x4ee0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2@8 +# AUNIT --inst x4ee0d400/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:fsub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1a & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd infix Rd_VPR128.2D = Rn_VPR128.2D f- Rm_VPR128.2D on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f- Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f- Rm_VPR128.2D[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.173 FSUB (vector) page C7-1780 line 99472 MATCH x0ea0d400/mask=xbfa0fc00 +# CONSTRUCT x0ea0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2@4 +# AUNIT --inst x0ea0d400/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" + +:fsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1a & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix Rd_VPR64.2S = Rn_VPR64.2S f- Rm_VPR64.2S on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f- Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f- Rm_VPR64.2S[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.173 FSUB (vector) page C7-1780 line 99472 MATCH x0ea0d400/mask=xbfa0fc00 +# CONSTRUCT x4ea0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2@4 +# AUNIT --inst x4ea0d400/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" + +:fsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1a & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix Rd_VPR128.4S = Rn_VPR128.4S f- Rm_VPR128.4S on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f- Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f- Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f- Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f- Rm_VPR128.4S[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.173 FSUB (vector) page C7-1780 line 99472 MATCH x0ec01400/mask=xbfe0fc00 +# CONSTRUCT x0ec01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2@2 +# AUNIT --inst x0ec01400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant when Q=0 suf=VPR64.4H + +:fsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rn_VPR64.4H f- Rm_VPR64.4H on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f- Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f- Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f- Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f- Rm_VPR64.4H[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.173 FSUB (vector) page C7-1780 line 99472 MATCH x0ec01400/mask=xbfe0fc00 +# CONSTRUCT x4ec01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$f-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2@2 +# AUNIT --inst x4ec01400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" +# Half-precision variant when Q=1 suf=VPR128.8H + +:fsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rn_VPR128.8H f- Rm_VPR128.8H on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f- Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f- Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f- Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f- Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f- Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f- Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f- Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f- Rm_VPR128.8H[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.174 FSUB (scalar) page C7-1782 line 99588 MATCH x1e203800/mask=xff20fc00 +# CONSTRUCT x1e603800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f- +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2 +# AUNIT --inst x1e603800/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" + +:fsub Rd_FPR64, Rn_FPR64, Rm_FPR64 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x3 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = Rn_FPR64 f- Rm_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.174 FSUB (scalar) page C7-1782 line 99588 MATCH x1e203800/mask=xff20fc00 +# CONSTRUCT x1e203800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f- +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2 +# AUNIT --inst x1e203800/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" + +:fsub Rd_FPR32, Rn_FPR32, Rm_FPR32 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x3 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = Rn_FPR32 f- Rm_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.174 FSUB (scalar) page C7-1782 line 99588 MATCH x1e203800/mask=xff20fc00 +# CONSTRUCT x1ee03800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =f- +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2 +# AUNIT --inst x1ee03800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" + +:fsub Rd_FPR16, Rn_FPR16, Rm_FPR16 +is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x3 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = Rn_FPR16 f- Rm_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.189 LDNP (SIMD&FP) page C7-1829 line 102510 MATCH x2c400000/mask=x3fc00000 +# CONSTRUCT x2c400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =load ext ARG2 ARG3 4 +:8 =load ext +# SMACRO(pseudo) ARG1 ARG3 =NEON_ldnp1/1 ARG2 ARG3 =NEON_ldnp2/1 +# AUNIT --inst x2c400000/mask=xffc00000 --status nomem + +:ldnp Rt_FPR32, Rt2_FPR32, addrPairIndexed +is b_3031=0b00 & b_2229=0b10110001 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 & Zt & Zt2 +{ + Rt_FPR32 = * addrPairIndexed; + zext_zs(Zt); # zero upper 28 bytes of Zt + local tmp1:8 = addrPairIndexed + 4; + Rt2_FPR32 = * tmp1; + zext_zs(Zt2); # zero upper 28 bytes of Zt2 +} + +# C7.2.189 LDNP (SIMD&FP) page C7-1829 line 102510 MATCH x2c400000/mask=x3fc00000 +# CONSTRUCT x6c400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =load ext ARG2 ARG3 8 +:8 =load ext +# SMACRO(pseudo) ARG1 ARG3 =NEON_ldnp1/1 ARG2 ARG3 =NEON_ldnp2/1 +# AUNIT --inst x6c400000/mask=xffc00000 --status nomem + +:ldnp Rt_FPR64, Rt2_FPR64, addrPairIndexed +is b_3031=0b01 & b_2229=0b10110001 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 & Zt & Zt2 +{ + Rt_FPR64 = * addrPairIndexed; + zext_zd(Zt); # zero upper 24 bytes of Zt + local tmp1:8 = addrPairIndexed + 8; + Rt2_FPR64 = * tmp1; + zext_zd(Zt2); # zero upper 24 bytes of Zt2 +} + +# C7.2.189 LDNP (SIMD&FP) page C7-1829 line 102510 MATCH x2c400000/mask=x3fc00000 +# CONSTRUCT xac400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =load ext ARG2 ARG3 16 +:8 =load ext +# SMACRO(pseudo) ARG1 ARG3 =NEON_ldnp1/1 ARG2 ARG3 =NEON_ldnp2/1 +# AUNIT --inst xac400000/mask=xffc00000 --status nomem + +:ldnp Rt_FPR128, Rt2_FPR128, addrPairIndexed +is b_3031=0b10 & b_2229=0b10110001 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 & Zt & Zt2 +{ + Rt_FPR128 = * addrPairIndexed; + zext_zq(Zt); # zero upper 16 bytes of Zt + local tmp1:8 = addrPairIndexed + 16; + Rt2_FPR128 = * tmp1; + zext_zq(Zt2); # zero upper 16 bytes of Zt2 +} + +# C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2cc00000/mask=x3fc00000 +# C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2dc00000/mask=x3fc00000 +# C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2d400000/mask=x3fc00000 +# C7.2.189 LDNP (SIMD&FP) page C7-1829 line 102510 MATCH x2c400000/mask=x3fc00000 +# CONSTRUCT xac400000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =load ext ARG2 ARG3 16 +:8 =load ext +# SMACRO(pseudo) ARG1 ARG3 =NEON_ldp1/1 ARG2 ARG3 =NEON_ldp2/1 +# AUNIT --inst xac400000/mask=xfe400000 --status nomem + +:ldp Rt_FPR128, Rt2_FPR128, addrPairIndexed +is b_3031=0b10 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 & Zt & Zt2 +{ + Rt_FPR128 = * addrPairIndexed; + zext_zq(Zt); # zero upper 16 bytes of Zt + local tmp1:8 = addrPairIndexed + 16; + Rt2_FPR128 = * tmp1; + zext_zq(Zt2); # zero upper 16 bytes of Zt2 +} + +# C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2cc00000/mask=x3fc00000 +# C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2dc00000/mask=x3fc00000 +# C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2d400000/mask=x3fc00000 +# C7.2.189 LDNP (SIMD&FP) page C7-1829 line 102510 MATCH x2c400000/mask=x3fc00000 +# CONSTRUCT x2c400000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =load ext ARG2 ARG3 4 +:8 =load ext +# SMACRO(pseudo) ARG1 ARG3 =NEON_ldp1/1 ARG2 ARG3 =NEON_ldp2/1 +# AUNIT --inst x2c400000/mask=xfe400000 --status nomem + +:ldp Rt_FPR32, Rt2_FPR32, addrPairIndexed +is b_3031=0b00 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 & Zt & Zt2 +{ + Rt_FPR32 = * addrPairIndexed; + zext_zs(Zt); # zero upper 28 bytes of Zt + local tmp1:8 = addrPairIndexed + 4; + Rt2_FPR32 = * tmp1; + zext_zs(Zt2); # zero upper 28 bytes of Zt2 +} + +# C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2cc00000/mask=x3fc00000 +# C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2dc00000/mask=x3fc00000 +# C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2d400000/mask=x3fc00000 +# C7.2.189 LDNP (SIMD&FP) page C7-1829 line 102510 MATCH x2c400000/mask=x3fc00000 +# CONSTRUCT x6c400000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =load ext ARG2 ARG3 8 +:8 =load ext +# SMACRO(pseudo) ARG1 ARG3 =NEON_ldp1/1 ARG2 ARG3 =NEON_ldp2/1 +# AUNIT --inst x6c400000/mask=xfe400000 --status nomem + +:ldp Rt_FPR64, Rt2_FPR64, addrPairIndexed +is b_3031=0b01 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 & Zt & Zt2 +{ + Rt_FPR64 = * addrPairIndexed; + zext_zd(Zt); # zero upper 24 bytes of Zt + local tmp1:8 = addrPairIndexed + 8; + Rt2_FPR64 = * tmp1; + zext_zd(Zt2); # zero upper 24 bytes of Zt2 +} + +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400400/mask=x3f600c00 +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400c00/mask=x3f600c00 +# CONSTRUCT x3c400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst x3c400400/mask=xffe00400 --status nomem +# Post- and Pre-index 8-bit variant when size==00 && opc==01 F=FPR8 + +:ldr Rt_FPR8, addrIndexed +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR8 & addrIndexed & Zt +{ + Rt_FPR8 = * addrIndexed; + zext_zb(Zt); # zero upper 31 bytes of Zt +} + +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400400/mask=x3f600c00 +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400c00/mask=x3f600c00 +# CONSTRUCT x7c400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst x7c400400/mask=xffe00400 --status nomem +# Post- and Pre-index 16-bit variant when size==01 && opc==01 F=FPR16 + +:ldr Rt_FPR16, addrIndexed +is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR16 & addrIndexed & Zt +{ + Rt_FPR16 = * addrIndexed; + zext_zh(Zt); # zero upper 30 bytes of Zt +} + +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400400/mask=x3f600c00 +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400c00/mask=x3f600c00 +# CONSTRUCT xbc400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst xbc400400/mask=xffe00400 --status nomem +# Post- and Pre-index 32-bit variant when size==10 && opc==01 F=FPR32 + +:ldr Rt_FPR32, addrIndexed +is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR32 & addrIndexed & Zt +{ + Rt_FPR32 = * addrIndexed; + zext_zs(Zt); # zero upper 28 bytes of Zt +} + +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400400/mask=x3f600c00 +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400c00/mask=x3f600c00 +# CONSTRUCT xfc400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst xfc400400/mask=xffe00400 --status nomem +# Post- and Pre-index 64-bit variant when size==11 && opc==01 F=FPR64 + +:ldr Rt_FPR64, addrIndexed +is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR64 & addrIndexed & Zt +{ + Rt_FPR64 = * addrIndexed; + zext_zd(Zt); # zero upper 24 bytes of Zt +} + +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400400/mask=x3f600c00 +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400c00/mask=x3f600c00 +# CONSTRUCT x3cc00400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst x3cc00400/mask=xffe00400 --status nomem +# Post- and Pre-index 128-bit variant when size==00 && opc==11 F=FPR128 + +:ldr Rt_FPR128, addrIndexed +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=0 & b_10=1 & Rt_FPR128 & addrIndexed & Zt +{ + Rt_FPR128 = * addrIndexed; + zext_zq(Zt); # zero upper 16 bytes of Zt +} + +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3d400000/mask=x3f400000 +# CONSTRUCT x3d400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst x3d400000/mask=xffc00000 --status nomem +# Unsigned offset 8-bit variant when size == 00 && opc == 01 F=FPR8 + +:ldr Rt_FPR8, addrUIMM +is b_3031=0b00 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR8 & addrUIMM & Zt +{ + Rt_FPR8 = * addrUIMM; + zext_zb(Zt); # zero upper 31 bytes of Zt +} + +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3d400000/mask=x3f400000 +# CONSTRUCT x7d400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst x7d400000/mask=xffc00000 --status nomem +# Unsigned offset 16-bit variant when size == 01 && opc == 01 F=FPR16 + +:ldr Rt_FPR16, addrUIMM +is b_3031=0b01 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR16 & addrUIMM & Zt +{ + Rt_FPR16 = * addrUIMM; + zext_zh(Zt); # zero upper 30 bytes of Zt +} + +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3d400000/mask=x3f400000 +# CONSTRUCT xbd400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst xbd400000/mask=xffc00000 --status nomem +# Unsigned offset 32-bit variant when size == 10 && opc == 01 F=FPR32 + +:ldr Rt_FPR32, addrUIMM +is b_3031=0b10 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR32 & addrUIMM & Zt +{ + Rt_FPR32 = * addrUIMM; + zext_zs(Zt); # zero upper 28 bytes of Zt +} + +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3d400000/mask=x3f400000 +# CONSTRUCT xfd400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst xfd400000/mask=xffc00000 --status nomem +# Unsigned offset 64-bit variant when size == 11 && opc == 01 F=FPR64 + +:ldr Rt_FPR64, addrUIMM +is b_3031=0b11 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR64 & addrUIMM & Zt +{ + Rt_FPR64 = * addrUIMM; + zext_zd(Zt); # zero upper 24 bytes of Zt +} + +# C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3d400000/mask=x3f400000 +# CONSTRUCT x3dc00000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst x3dc00000/mask=xffc00000 --status nomem +# Unsigned offset 128-bit variant when size == 00 && opc == 11 F=FPR128 + +:ldr Rt_FPR128, addrUIMM +is b_3031=0b00 & b_2429=0b111101 & b_2223=0b11 & Rt_FPR128 & addrUIMM & Zt +{ + Rt_FPR128 = * addrUIMM; + zext_zq(Zt); # zero upper 16 bytes of Zt +} + +# C7.2.192 LDR (literal, SIMD&FP) page C7-1839 line 103142 MATCH x1c000000/mask=x3f000000 +# CONSTRUCT x5c000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load:8 +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst x5c000000/mask=xff000000 --status nomem + +:ldr Rt_FPR64, AddrLoc19 +is size.ldstr=1 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR64 & Zt +{ + Rt_FPR64 = *:8 AddrLoc19; + zext_zd(Zt); # zero upper 24 bytes of Zt +} + +# C7.2.192 LDR (literal, SIMD&FP) page C7-1839 line 103142 MATCH x1c000000/mask=x3f000000 +# CONSTRUCT x9c000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load:16 +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst x9c000000/mask=xff000000 --status nomem + +:ldr Rt_FPR128, AddrLoc19 +is size.ldstr=2 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR128 & Zt +{ + Rt_FPR128 = *:16 AddrLoc19; + zext_zq(Zt); # zero upper 16 bytes of Zt +} + +# C7.2.192 LDR (literal, SIMD&FP) page C7-1839 line 103142 MATCH x1c000000/mask=x3f000000 +# CONSTRUCT x1c000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load:4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 +# AUNIT --inst x1c000000/mask=xff000000 --status nomem + +:ldr Rt_FPR32, AddrLoc19 +is size.ldstr=0 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR32 & Zt +{ + Rt_FPR32 = *:4 AddrLoc19; + zext_zs(Zt); # zero upper 28 bytes of Zt +} + +# C7.2.178 LDR (register, SIMD&FP) page C7-1411 line 82199 KEEPWITH + +extend_amount: "" is b_3031=0b00 & b_23=0 & b_12=0 { export 0:1; } +extend_amount: " #0" is b_3031=0b00 & b_23=0 & b_12=1 { export 0:1; } +extend_amount: "" is b_3031=0b01 & b_23=0 & b_12=0 { export 0:1; } +extend_amount: " #1" is b_3031=0b01 & b_23=0 & b_12=1 { export 1:1; } +extend_amount: "" is b_3031=0b10 & b_23=0 & b_12=0 { export 0:1; } +extend_amount: " #2" is b_3031=0b10 & b_23=0 & b_12=1 { export 2:1; } +extend_amount: "" is b_3031=0b11 & b_23=0 & b_12=0 { export 0:1; } +extend_amount: " #3" is b_3031=0b11 & b_23=0 & b_12=1 { export 3:1; } +extend_amount: "" is b_3031=0b00 & b_23=1 & b_12=0 { export 0:1; } +extend_amount: " #4" is b_3031=0b00 & b_23=1 & b_12=1 { export 4:1; } + +extend_spec: ", uxtw" is b_1315=0b010 & Rm_GPR32 { local tmp:8 = zext(Rm_GPR32); export tmp; } +extend_spec: ", sxtw" is b_1315=0b110 & Rm_GPR32 { local tmp:8 = sext(Rm_GPR32); export tmp; } +extend_spec: ", sxtx" is b_1315=0b111 & Rm_GPR64 { export Rm_GPR64; } +extend_spec: ", lsl" is b_1315=0b011 & b_12=1 & Rm_GPR64 { export Rm_GPR64; } # same as uxtx +extend_spec: "" is b_1315=0b011 & b_12=0 & Rm_GPR64 { export Rm_GPR64; } # same as uxtx + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT x3c600800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst x3c600800/mask=xffe02c00 --status nomem +# 8-fsreg,LDR-8-fsreg variant when size == 00 && opc == 01 && option is not 011 bb=b_13 option=0 F=FPR8 G=GPR32 + +:ldr Rt_FPR8, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR8 = * tmp2; + zext_zb(Zt); # zero upper 31 bytes of Zt +} + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT x3c602800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst x3c602800/mask=xffe02c00 --status nomem +# 8-fsreg,LDR-8-fsreg variant when size == 00 && opc == 01 && option is not 011 bb=b_13 option=1 F=FPR8 G=GPR64 + +:ldr Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR8 = * tmp2; + zext_zb(Zt); # zero upper 31 bytes of Zt +} + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT x3c606800/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst x3c606800/mask=xffe0ec00 --status nomem +# 8-fsreg,LDR-8-fsreg variant when size == 00 && opc == 01 && option is 011 bb=b_1315 option=0b011 F=FPR8 G=GPR64 + +:ldr Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_1315=0b011 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR8 = * tmp2; + zext_zb(Zt); # zero upper 31 bytes of Zt +} + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT x7c600800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst x7c600800/mask=xffe02c00 --status nomem +# 16-fsreg,LDR-16-fsreg variant when size == 01 && opc == 01 bb=b_13 option=0 F=FPR16 G=GPR32 + +:ldr Rt_FPR16, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] +is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR16 = * tmp2; + zext_zh(Zt); # zero upper 30 bytes of Zt +} + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT x7c602800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst x7c602800/mask=xffe02c00 --status nomem +# 16-fsreg,LDR-16-fsreg variant when size == 01 && opc == 01 bb=b_13 option=1 F=FPR16 G=GPR64 + +:ldr Rt_FPR16, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR16 = * tmp2; + zext_zh(Zt); # zero upper 30 bytes of Zt +} + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT xbc600800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst xbc600800/mask=xffe02c00 --status nomem +# 32-fsreg,LDR-32-fsreg variant when size == 10 && opc == 01 bb=b_13 option=0 F=FPR32 G=GPR32 + +:ldr Rt_FPR32, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] +is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR32 = * tmp2; + zext_zs(Zt); # zero upper 28 bytes of Zt +} + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT xbc602800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst xbc602800/mask=xffe02c00 --status nomem +# 32-fsreg,LDR-32-fsreg variant when size == 10 && opc == 01 bb=b_13 option=1 F=FPR32 G=GPR64 + +:ldr Rt_FPR32, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR32 = * tmp2; + zext_zs(Zt); # zero upper 28 bytes of Zt +} + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT xfc600800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst xfc600800/mask=xffe02c00 --status nomem +# 64-fsreg,LDR-64-fsreg variant when size == 11 && opc == 01 bb=b_13 option=0 F=FPR64 G=GPR32 + +:ldr Rt_FPR64, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] +is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR64 = * tmp2; + zext_zd(Zt); # zero upper 24 bytes of Zt +} + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT xfc602800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst xfc602800/mask=xffe02c00 --status nomem +# 64-fsreg,LDR-64-fsreg variant when size == 11 && opc == 01 bb=b_13 option=1 F=FPR64 G=GPR64 + +:ldr Rt_FPR64, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR64 = * tmp2; + zext_zd(Zt); # zero upper 24 bytes of Zt +} + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT x3ce00800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst x3ce00800/mask=xffe02c00 --status nomem +# 128-fsreg,LDR-128-fsreg variant when size == 00 && opc == 11 bb=b_13 option=0 F=FPR128 G=GPR32 + +:ldr Rt_FPR128, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR128 = * tmp2; + zext_zq(Zt); # zero upper 16 bytes of Zt +} + +# C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 +# CONSTRUCT x3ce02800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load +# SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 +# AUNIT --inst x3ce02800/mask=xffe02c00 --status nomem +# 128-fsreg,LDR-128-fsreg variant when size == 00 && opc == 11 bb=b_13 option=1 F=FPR128 G=GPR64 + +:ldr Rt_FPR128, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + Rt_FPR128 = * tmp2; + zext_zq(Zt); # zero upper 16 bytes of Zt +} + +# C7.2.194 LDUR (SIMD&FP) page C7-1844 line 103424 MATCH x3c400000/mask=x3f600c00 +# CONSTRUCT x3cc00000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldur/1 +# AUNIT --inst x3cc00000/mask=xffe00c00 --status nomem + +:ldur Rt_FPR128, addrIndexed +is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=1 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR128 & Zt +{ + Rt_FPR128 = * addrIndexed; + zext_zq(Zt); # zero upper 16 bytes of Zt +} + +# C7.2.194 LDUR (SIMD&FP) page C7-1844 line 103424 MATCH x3c400000/mask=x3f600c00 +# CONSTRUCT x7c400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldur/1 +# AUNIT --inst x7c400000/mask=xffe00c00 --status nomem + +:ldur Rt_FPR16, addrIndexed +is size.ldstr=1 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR16 & Zt +{ + Rt_FPR16 = * addrIndexed; + zext_zh(Zt); # zero upper 30 bytes of Zt +} + +# C7.2.194 LDUR (SIMD&FP) page C7-1844 line 103424 MATCH x3c400000/mask=x3f600c00 +# CONSTRUCT xbc400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldur/1 +# AUNIT --inst xbc400000/mask=xffe00c00 --status nomem + +:ldur Rt_FPR32, addrIndexed +is size.ldstr=2 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR32 & Zt +{ + Rt_FPR32 = * addrIndexed; + zext_zs(Zt); # zero upper 28 bytes of Zt +} + +# C7.2.194 LDUR (SIMD&FP) page C7-1844 line 103424 MATCH x3c400000/mask=x3f600c00 +# CONSTRUCT xfc400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldur/1 +# AUNIT --inst xfc400000/mask=xffe00c00 --status nomem + +:ldur Rt_FPR64, addrIndexed +is size.ldstr=3 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR64 & Zt +{ + Rt_FPR64 = * addrIndexed; + zext_zd(Zt); # zero upper 24 bytes of Zt +} + +# C7.2.194 LDUR (SIMD&FP) page C7-1844 line 103424 MATCH x3c400000/mask=x3f600c00 +# CONSTRUCT x3c400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =load +# SMACRO(pseudo) ARG1 ARG2 =NEON_ldur/1 +# AUNIT --inst x3c400000/mask=xffe00c00 --status nomem + +:ldur Rt_FPR8, addrIndexed +is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR8 & Zt +{ + Rt_FPR8 = * addrIndexed; + zext_zb(Zt); # zero upper 31 bytes of Zt +} + +# C7.2.195 MLA (by element) page C7-1846 line 103549 MATCH x2f000000/mask=xbf00f400 +# CONSTRUCT x2f800000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $* &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@4 +# AUNIT --inst x2f800000/mask=xffc0f400 --status pass + +:mla Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & vIndex & Re_VPR128.S & b_1215=0x0 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPD1 = Rn_VPR64.2S * tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] * tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] * tmp1; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.195 MLA (by element) page C7-1846 line 103549 MATCH x2f000000/mask=xbf00f400 +# CONSTRUCT x2f400000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $* &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@2 +# AUNIT --inst x2f400000/mask=xffc0f400 --status pass + +:mla Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x0 & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD1 = Rn_VPR64.4H * tmp1 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] * tmp1; + TMPD1[16,16] = Rn_VPR64.4H[16,16] * tmp1; + TMPD1[32,16] = Rn_VPR64.4H[32,16] * tmp1; + TMPD1[48,16] = Rn_VPR64.4H[48,16] * tmp1; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.195 MLA (by element) page C7-1846 line 103549 MATCH x2f000000/mask=xbf00f400 +# CONSTRUCT x6f800000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $* &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@4 +# AUNIT --inst x6f800000/mask=xffc0f400 --status pass + +:mla Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x0 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPQ1 = Rn_VPR128.4S * tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] * tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] * tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] * tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] * tmp1; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.195 MLA (by element) page C7-1846 line 103549 MATCH x2f000000/mask=xbf00f400 +# CONSTRUCT x6f400000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $* &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@2 +# AUNIT --inst x6f400000/mask=xffc0f400 --status pass + +:mla Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x0 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPQ1 = Rn_VPR128.8H * tmp1 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] * tmp1; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] * tmp1; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] * tmp1; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] * tmp1; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] * tmp1; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] * tmp1; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] * tmp1; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] * tmp1; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 +# CONSTRUCT x4e209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@1 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@1 +# AUNIT --inst x4e209400/mask=xffe0fc00 --status pass + +:mla Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x12 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.16B * Rm_VPR128.16B on lane size 1 + TMPQ1[0,8] = Rn_VPR128.16B[0,8] * Rm_VPR128.16B[0,8]; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] * Rm_VPR128.16B[8,8]; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] * Rm_VPR128.16B[16,8]; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] * Rm_VPR128.16B[24,8]; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] * Rm_VPR128.16B[32,8]; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] * Rm_VPR128.16B[40,8]; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] * Rm_VPR128.16B[48,8]; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] * Rm_VPR128.16B[56,8]; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] * Rm_VPR128.16B[64,8]; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] * Rm_VPR128.16B[72,8]; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] * Rm_VPR128.16B[80,8]; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] * Rm_VPR128.16B[88,8]; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] * Rm_VPR128.16B[96,8]; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] * Rm_VPR128.16B[104,8]; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] * Rm_VPR128.16B[112,8]; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] * Rm_VPR128.16B[120,8]; + # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 +# CONSTRUCT x0ea09400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@4 +# AUNIT --inst x0ea09400/mask=xffe0fc00 --status pass + +:mla Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x12 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix TMPD1 = Rn_VPR64.2S * Rm_VPR64.2S on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] * Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] * Rm_VPR64.2S[32,32]; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 +# CONSTRUCT x0e609400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@2 +# AUNIT --inst x0e609400/mask=xffe0fc00 --status pass + +:mla Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x12 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix TMPD1 = Rn_VPR64.4H * Rm_VPR64.4H on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] * Rm_VPR64.4H[0,16]; + TMPD1[16,16] = Rn_VPR64.4H[16,16] * Rm_VPR64.4H[16,16]; + TMPD1[32,16] = Rn_VPR64.4H[32,16] * Rm_VPR64.4H[32,16]; + TMPD1[48,16] = Rn_VPR64.4H[48,16] * Rm_VPR64.4H[48,16]; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 +# CONSTRUCT x4ea09400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@4 +# AUNIT --inst x4ea09400/mask=xffe0fc00 --status pass + +:mla Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S * Rm_VPR128.4S on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] * Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] * Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] * Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] * Rm_VPR128.4S[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 +# CONSTRUCT x0e209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@1 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@1 +# AUNIT --inst x0e209400/mask=xffe0fc00 --status pass + +:mla Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x12 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix TMPD1 = Rn_VPR64.8B * Rm_VPR64.8B on lane size 1 + TMPD1[0,8] = Rn_VPR64.8B[0,8] * Rm_VPR64.8B[0,8]; + TMPD1[8,8] = Rn_VPR64.8B[8,8] * Rm_VPR64.8B[8,8]; + TMPD1[16,8] = Rn_VPR64.8B[16,8] * Rm_VPR64.8B[16,8]; + TMPD1[24,8] = Rn_VPR64.8B[24,8] * Rm_VPR64.8B[24,8]; + TMPD1[32,8] = Rn_VPR64.8B[32,8] * Rm_VPR64.8B[32,8]; + TMPD1[40,8] = Rn_VPR64.8B[40,8] * Rm_VPR64.8B[40,8]; + TMPD1[48,8] = Rn_VPR64.8B[48,8] * Rm_VPR64.8B[48,8]; + TMPD1[56,8] = Rn_VPR64.8B[56,8] * Rm_VPR64.8B[56,8]; + # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 +# CONSTRUCT x4e609400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@2 +# AUNIT --inst x4e609400/mask=xffe0fc00 --status pass + +:mla Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H * Rm_VPR128.8H on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] * Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] * Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] * Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] * Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] * Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] * Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] * Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] * Rm_VPR128.8H[112,16]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.197 MLS (by element) page C7-1850 line 103784 MATCH x2f004000/mask=xbf00f400 +# CONSTRUCT x2f804000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $* &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@4 +# AUNIT --inst x2f804000/mask=xffc0f400 --status pass + +:mls Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x4 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPD1 = Rn_VPR64.2S * tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] * tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] * tmp1; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.197 MLS (by element) page C7-1850 line 103784 MATCH x2f004000/mask=xbf00f400 +# CONSTRUCT x2f404000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $* &=$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@2 +# AUNIT --inst x2f404000/mask=xffc0f400 --status pass + +:mls Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x4 & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD1 = Rn_VPR64.4H * tmp1 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] * tmp1; + TMPD1[16,16] = Rn_VPR64.4H[16,16] * tmp1; + TMPD1[32,16] = Rn_VPR64.4H[32,16] * tmp1; + TMPD1[48,16] = Rn_VPR64.4H[48,16] * tmp1; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H - TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] - TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] - TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] - TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] - TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.197 MLS (by element) page C7-1850 line 103784 MATCH x2f004000/mask=xbf00f400 +# CONSTRUCT x6f804000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $* &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@4 +# AUNIT --inst x6f804000/mask=xffc0f400 --status pass + +:mls Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x4 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPQ1 = Rn_VPR128.4S * tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] * tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] * tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] * tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] * tmp1; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.197 MLS (by element) page C7-1850 line 103784 MATCH x2f004000/mask=xbf00f400 +# CONSTRUCT x6f404000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $* &=$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@2 +# AUNIT --inst x6f404000/mask=xffc0f400 --status pass + +:mls Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x4 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPQ1 = Rn_VPR128.8H * tmp1 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] * tmp1; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] * tmp1; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] * tmp1; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] * tmp1; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] * tmp1; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] * tmp1; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] * tmp1; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] * tmp1; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 +# CONSTRUCT x6e209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@1 &=$-@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@1 +# AUNIT --inst x6e209400/mask=xffe0fc00 --status pass + +:mls Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x12 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.16B * Rm_VPR128.16B on lane size 1 + TMPQ1[0,8] = Rn_VPR128.16B[0,8] * Rm_VPR128.16B[0,8]; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] * Rm_VPR128.16B[8,8]; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] * Rm_VPR128.16B[16,8]; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] * Rm_VPR128.16B[24,8]; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] * Rm_VPR128.16B[32,8]; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] * Rm_VPR128.16B[40,8]; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] * Rm_VPR128.16B[48,8]; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] * Rm_VPR128.16B[56,8]; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] * Rm_VPR128.16B[64,8]; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] * Rm_VPR128.16B[72,8]; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] * Rm_VPR128.16B[80,8]; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] * Rm_VPR128.16B[88,8]; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] * Rm_VPR128.16B[96,8]; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] * Rm_VPR128.16B[104,8]; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] * Rm_VPR128.16B[112,8]; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] * Rm_VPR128.16B[120,8]; + # simd infix Rd_VPR128.16B = Rd_VPR128.16B - TMPQ1 on lane size 1 + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] - TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] - TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] - TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] - TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] - TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] - TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] - TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] - TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] - TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] - TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] - TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] - TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] - TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] - TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] - TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] - TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 +# CONSTRUCT x2ea09400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@4 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@4 +# AUNIT --inst x2ea09400/mask=xffe0fc00 --status pass + +:mls Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x12 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix TMPD1 = Rn_VPR64.2S * Rm_VPR64.2S on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] * Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] * Rm_VPR64.2S[32,32]; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 +# CONSTRUCT x2e609400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@2 &=$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@2 +# AUNIT --inst x2e609400/mask=xffe0fc00 --status pass + +:mls Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x12 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix TMPD1 = Rn_VPR64.4H * Rm_VPR64.4H on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] * Rm_VPR64.4H[0,16]; + TMPD1[16,16] = Rn_VPR64.4H[16,16] * Rm_VPR64.4H[16,16]; + TMPD1[32,16] = Rn_VPR64.4H[32,16] * Rm_VPR64.4H[32,16]; + TMPD1[48,16] = Rn_VPR64.4H[48,16] * Rm_VPR64.4H[48,16]; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H - TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] - TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] - TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] - TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] - TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 +# CONSTRUCT x6ea09400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@4 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@4 +# AUNIT --inst x6ea09400/mask=xffe0fc00 --status pass + +:mls Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S * Rm_VPR128.4S on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] * Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] * Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] * Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] * Rm_VPR128.4S[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 +# CONSTRUCT x2e209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@1 &=$-@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@1 +# AUNIT --inst x2e209400/mask=xffe0fc00 --status pass + +:mls Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x12 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix TMPD1 = Rn_VPR64.8B * Rm_VPR64.8B on lane size 1 + TMPD1[0,8] = Rn_VPR64.8B[0,8] * Rm_VPR64.8B[0,8]; + TMPD1[8,8] = Rn_VPR64.8B[8,8] * Rm_VPR64.8B[8,8]; + TMPD1[16,8] = Rn_VPR64.8B[16,8] * Rm_VPR64.8B[16,8]; + TMPD1[24,8] = Rn_VPR64.8B[24,8] * Rm_VPR64.8B[24,8]; + TMPD1[32,8] = Rn_VPR64.8B[32,8] * Rm_VPR64.8B[32,8]; + TMPD1[40,8] = Rn_VPR64.8B[40,8] * Rm_VPR64.8B[40,8]; + TMPD1[48,8] = Rn_VPR64.8B[48,8] * Rm_VPR64.8B[48,8]; + TMPD1[56,8] = Rn_VPR64.8B[56,8] * Rm_VPR64.8B[56,8]; + # simd infix Rd_VPR64.8B = Rd_VPR64.8B - TMPD1 on lane size 1 + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] - TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] - TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] - TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] - TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] - TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] - TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] - TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] - TMPD1[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 +# CONSTRUCT x6e609400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $*@2 &=$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@2 +# AUNIT --inst x6e609400/mask=xffe0fc00 --status pass + +:mls Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H * Rm_VPR128.8H on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] * Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] * Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] * Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] * Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] * Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] * Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] * Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] * Rm_VPR128.8H[112,16]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.200 MOV (element) page C7-1856 line 104111 MATCH x6e000400/mask=xffe08400 +# C7.2.175 INS (element) page C7-1784 line 99692 MATCH x6e000400/mask=xffe08400 +# CONSTRUCT x6e010400/mask=xffe18400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO Rd_VPR128 ARG2 imm_neon_uimm4:1 &=$copy +# SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm4:1 &=NEON_mov/3@1 +# AUNIT --inst x6e010400/mask=xffe18400 --status pass + +:mov Rd_VPR128.B.imm_neon_uimm4, Rn_VPR128.B.immN_neon_uimm4 +is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & Rn_VPR128.B.immN_neon_uimm4 & immN_neon_uimm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd +{ + # simd element Rn_VPR128[immN_neon_uimm4] lane size 1 + local tmp1:1 = Rn_VPR128.B.immN_neon_uimm4; + # simd copy Rd_VPR128 element imm_neon_uimm4:1 = tmp1 (lane size 1) + Rd_VPR128.B.imm_neon_uimm4 = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.200 MOV (element) page C7-1856 line 104111 MATCH x6e000400/mask=xffe08400 +# C7.2.175 INS (element) page C7-1784 line 99692 MATCH x6e000400/mask=xffe08400 +# CONSTRUCT x6e080400/mask=xffef8400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO Rd_VPR128 ARG2 imm_neon_uimm1:1 &=$copy +# SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm1:1 &=NEON_mov/3@8 +# AUNIT --inst x6e080400/mask=xffef8400 --status pass + +:mov Rd_VPR128.D.imm_neon_uimm1, Rn_VPR128.D.immN_neon_uimm1 +is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & Rn_VPR128.D.immN_neon_uimm1 & immN_neon_uimm1 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd +{ + # simd element Rn_VPR128[immN_neon_uimm1] lane size 8 + local tmp1:8 = Rn_VPR128.D.immN_neon_uimm1; + # simd copy Rd_VPR128 element imm_neon_uimm1:1 = tmp1 (lane size 8) + Rd_VPR128.D.imm_neon_uimm1 = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.200 MOV (element) page C7-1856 line 104111 MATCH x6e000400/mask=xffe08400 +# C7.2.175 INS (element) page C7-1784 line 99692 MATCH x6e000400/mask=xffe08400 +# CONSTRUCT x6e020400/mask=xffe38400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO Rd_VPR128 ARG2 imm_neon_uimm3:1 &=$copy +# SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm3:1 &=NEON_mov/3@2 +# AUNIT --inst x6e020400/mask=xffe38400 --status pass + +:mov Rd_VPR128.H.imm_neon_uimm3, Rn_VPR128.H.immN_neon_uimm3 +is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & Rn_VPR128.H.immN_neon_uimm3 & immN_neon_uimm3 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd +{ + # simd element Rn_VPR128[immN_neon_uimm3] lane size 2 + local tmp1:2 = Rn_VPR128.H.immN_neon_uimm3; + # simd copy Rd_VPR128 element imm_neon_uimm3:1 = tmp1 (lane size 2) + Rd_VPR128.H.imm_neon_uimm3 = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.200 MOV (element) page C7-1856 line 104111 MATCH x6e000400/mask=xffe08400 +# C7.2.175 INS (element) page C7-1784 line 99692 MATCH x6e000400/mask=xffe08400 +# CONSTRUCT x6e040400/mask=xffe78400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO Rd_VPR128 ARG2 imm_neon_uimm2:1 &=$copy +# SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm2:1 &=NEON_mov/3@4 +# AUNIT --inst x6e040400/mask=xffe78400 --status pass + +:mov Rd_VPR128.S.imm_neon_uimm2, Rn_VPR128.S.immN_neon_uimm2 +is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & Rn_VPR128.S.immN_neon_uimm2 & immN_neon_uimm2 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd +{ + # simd element Rn_VPR128[immN_neon_uimm2] lane size 4 + local tmp1:4 = Rn_VPR128.S.immN_neon_uimm2; + # simd copy Rd_VPR128 element imm_neon_uimm2:1 = tmp1 (lane size 4) + Rd_VPR128.S.imm_neon_uimm2 = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.201 MOV (from general) page C7-1858 line 104209 MATCH x4e001c00/mask=xffe0fc00 +# C7.2.176 INS (general) page C7-1786 line 99801 MATCH x4e001c00/mask=xffe0fc00 +# CONSTRUCT x4e011c00/mask=xffe1fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO Rd_VPR128 ARG2[0]:1 imm_neon_uimm4:1 &=$copy +# SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm4:1 &=NEON_mov/3@1 +# AUNIT --inst x4e011c00/mask=xffe1fc00 --status pass + +:mov Rd_VPR128.B.imm_neon_uimm4, Rn_GPR32 +is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd +{ + local tmp1:1 = Rn_GPR32[0,8]; + # simd copy Rd_VPR128 element imm_neon_uimm4:1 = tmp1 (lane size 1) + Rd_VPR128.B.imm_neon_uimm4 = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.201 MOV (from general) page C7-1858 line 104209 MATCH x4e001c00/mask=xffe0fc00 +# C7.2.176 INS (general) page C7-1786 line 99801 MATCH x4e001c00/mask=xffe0fc00 +# CONSTRUCT x4e081c00/mask=xffeffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO Rd_VPR128 ARG2 imm_neon_uimm1:1 &=$copy +# SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm1:1 &=NEON_mov/3@8 +# AUNIT --inst x4e081c00/mask=xffeffc00 --status pass + +:mov Rd_VPR128.D.imm_neon_uimm1, Rn_GPR64 +is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR64 & Rd_VPR128 & Zd +{ + # simd copy Rd_VPR128 element imm_neon_uimm1:1 = Rn_GPR64 (lane size 8) + Rd_VPR128.D.imm_neon_uimm1 = Rn_GPR64; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.201 MOV (from general) page C7-1858 line 104209 MATCH x4e001c00/mask=xffe0fc00 +# C7.2.176 INS (general) page C7-1786 line 99801 MATCH x4e001c00/mask=xffe0fc00 +# CONSTRUCT x4e021c00/mask=xffe3fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO Rd_VPR128 ARG2[0]:2 imm_neon_uimm3:1 &=$copy +# SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm3:1 &=NEON_mov/3@2 +# AUNIT --inst x4e021c00/mask=xffe3fc00 --status pass + +:mov Rd_VPR128.H.imm_neon_uimm3, Rn_GPR32 +is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd +{ + local tmp1:2 = Rn_GPR32[0,16]; + # simd copy Rd_VPR128 element imm_neon_uimm3:1 = tmp1 (lane size 2) + Rd_VPR128.H.imm_neon_uimm3 = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.201 MOV (from general) page C7-1858 line 104209 MATCH x4e001c00/mask=xffe0fc00 +# C7.2.176 INS (general) page C7-1786 line 99801 MATCH x4e001c00/mask=xffe0fc00 +# CONSTRUCT x4e041c00/mask=xffe7fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO(force-primitive) Rd_VPR128 ARG2 imm_neon_uimm2:1 &=$copy +# SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm2:1 &=NEON_mov/3@2 +# AUNIT --inst x4e041c00/mask=xffe7fc00 --status pass + +:mov Rd_VPR128.S.imm_neon_uimm2, Rn_GPR32 +is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd +{ + # simd copy Rd_VPR128 element imm_neon_uimm2:1 = Rn_GPR32 (lane size 4) + Rd_VPR128.S.imm_neon_uimm2 = Rn_GPR32; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.202 MOV (vector) page C7-1860 line 104306 MATCH x0ea01c00/mask=xbfe0fc00 +# C7.2.213 ORR (vector, register) page C7-1882 line 105515 MATCH x0ea01c00/mask=xbfe0fc00 +# CONSTRUCT x4ea01c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_mov/1@1 +# AUNIT --inst x4ea01c00/mask=xffe0fc00 --status pass + +:mov Rd_VPR128.16B, Rn_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Rn=Rm & Zd +{ + Rd_VPR128.16B = Rn_VPR128.16B; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.202 MOV (vector) page C7-1860 line 104306 MATCH x0ea01c00/mask=xbfe0fc00 +# C7.2.213 ORR (vector, register) page C7-1882 line 105515 MATCH x0ea01c00/mask=xbfe0fc00 +# CONSTRUCT x0ea01c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_mov/1@1 +# AUNIT --inst x0ea01c00/mask=xffe0fc00 --status pass + +:mov Rd_VPR64.8B, Rn_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Rn=Rm & Zd +{ + Rd_VPR64.8B = Rn_VPR64.8B; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.203 MOV (to general) page C7-1861 line 104373 MATCH x0e003c00/mask=xbfe3fc00 +# C7.2.371 UMOV page C7-2236 line 125692 MATCH x0e003c00/mask=xbfe0fc00 +# CONSTRUCT x0e043c00/mask=xffe7fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO(force-primitive) ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_mov/1@4 +# AUNIT --inst x0e043c00/mask=xffe7fc00 --status pass + +:mov Rd_GPR32, Rn_VPR128.S.imm_neon_uimm2 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 & Rd_VPR128 +{ + # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 + local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; + Rd_GPR32 = tmp1; + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.203 MOV (to general) page C7-1861 line 104373 MATCH x0e003c00/mask=xbfe3fc00 +# C7.2.371 UMOV page C7-2236 line 125692 MATCH x0e003c00/mask=xbfe0fc00 +# CONSTRUCT x4e083c00/mask=xffeffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_mov/1@8 +# AUNIT --inst x4e083c00/mask=xffeffc00 --status pass + +:mov Rd_GPR64, Rn_VPR128.D.imm_neon_uimm1 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR64 +{ + # simd element Rn_VPR128[imm_neon_uimm1] lane size 8 + local tmp1:8 = Rn_VPR128.D.imm_neon_uimm1; + Rd_GPR64 = tmp1; +} + +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 +# CONSTRUCT x2f00e400/mask=xfff8fc00 MATCHED 3 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1 +# AUNIT --inst x2f00e400/mask=xfff8fc00 --status pass +# MOVI 64-bit scalar variant when datasize=64 q == 0 && op == 1 && cmode == 1110 + +:movi Rd_FPR64, Imm_neon_uimm8Shift +is b_31=0 & b_30=0 & b_29=1 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_FPR64 & Zd +{ + Rd_FPR64 = Imm_neon_uimm8Shift:8; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 +# CONSTRUCT x4f00e400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:1 &=$dup +# SMACRO(pseudo) ARG1 ARG2:1 =NEON_movi/1@1 +# AUNIT --inst x4f00e400/mask=xfff8fc00 --status pass +# MOVI 8-bit variant when datasize=128 q == 1 && op == 0 && cmode == 0b1110 + +:movi Rd_VPR128.16B, Imm_neon_uimm8Shift +is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.16B & Zd +{ + # simd duplicate Rd_VPR128.16B = all elements Imm_neon_uimm8Shift:1 (lane size 1) + Rd_VPR128.16B[0,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[8,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[16,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[24,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[32,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[40,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[48,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[56,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[64,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[72,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[80,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[88,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[96,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[104,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[112,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[120,8] = Imm_neon_uimm8Shift:1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 +# CONSTRUCT x6f00e400/mask=xfff8fc00 MATCHED 3 DOCUMENTED OPCODES +# SMACRO(force-primitive) ARG1 ARG2 =var:8 &=$dup +# SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@8 +# AUNIT --inst x6f00e400/mask=xfff8fc00 --status pass +# MOVI 64-bit vector variant when datasize=128 q == 1 && op == 1 && cmode == 1110 + +:movi Rd_VPR128.2D, Imm_neon_uimm8Shift +is b_31=0 & b_30=1 & b_29=1 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.2D & Zd +{ + local tmp1:8 = Imm_neon_uimm8Shift; + # simd duplicate Rd_VPR128.2D = all elements tmp1 (lane size 8) + Rd_VPR128.2D[0,64] = tmp1; + Rd_VPR128.2D[64,64] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 +# CONSTRUCT x0f00e400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@1 +# AUNIT --inst x0f00e400/mask=xfff8fc00 --status pass +# MOVI 8-bit variant when datasize=64 q == 0 && op == 0 && cmode == 1110 + +:movi Rd_VPR64.8B, Imm_neon_uimm8Shift +is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = Imm_neon_uimm8Shift:8; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 +# C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 +# CONSTRUCT x0f000400/mask=xfff89c00 MATCHED 3 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@4 +# AUNIT --inst x0f000400/mask=xfff89c00 --status pass +# MOVI 32-bit shifted immediate variant when datasize=64 q == 0 && op == 0 && cmode == 0xx0 + +:movi Rd_VPR64.2S, Imm_neon_uimm8Shift +is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_15=0 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = Imm_neon_uimm8Shift:8; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# CONSTRUCT x0f008400/mask=xfff8dc00 MATCHED 4 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@2 +# AUNIT --inst x0f008400/mask=xfff8dc00 --status pass +# MOVI 16-bit shifted immediate variant when datasize=64 q == 0 && op == 0 && cmode == 10x0 + +:movi Rd_VPR64.4H, Imm_neon_uimm8Shift +is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1415=0b10 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = Imm_neon_uimm8Shift:8; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 +# C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 +# CONSTRUCT x4f000400/mask=xfff89c00 MATCHED 3 DOCUMENTED OPCODES +# SMACRO(force-primitive) ARG1 ARG2:4 &=$dup +# SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@4 +# AUNIT --inst x4f000400/mask=xfff89c00 --status pass +# MOVI 32-bit shifted immediate variant when datasize=128 q == 1 && op == 0 && cmode == 0xx0 + +:movi Rd_VPR128.4S, Imm_neon_uimm8Shift +is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_15=0 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.4S & Zd +{ + # simd duplicate Rd_VPR128.4S = all elements Imm_neon_uimm8Shift:4 (lane size 4) + Rd_VPR128.4S[0,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[32,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[64,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[96,32] = Imm_neon_uimm8Shift:4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# CONSTRUCT x4f008400/mask=xfff8dc00 MATCHED 4 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:2 &=$dup +# SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@2 +# AUNIT --inst x4f008400/mask=xfff8dc00 --status pass +# MOVI 16-bit shifted immediate variant when datasize=128 q == 1 && op == 0 && cmode == 10x0 + +:movi Rd_VPR128.8H, Imm_neon_uimm8Shift +is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1415=0b10 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.8H & Zd +{ + # simd duplicate Rd_VPR128.8H = all elements Imm_neon_uimm8Shift:2 (lane size 2) + Rd_VPR128.8H[0,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[16,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[32,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[48,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[64,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[80,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[96,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[112,16] = Imm_neon_uimm8Shift:2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 +# CONSTRUCT x0f00c400/mask=xfff8ec00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 = +# SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@4 +# AUNIT --inst x0f00c400/mask=xfff8ec00 --status pass +# MOVI 32-bit shifting ones variant when datasize=64 q == 0 && op == 0 && cmode == 110x + +:movi Rd_VPR64.2S, Imm_neon_uimm8Shift +is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1315=0b110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = Imm_neon_uimm8Shift:8; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 +# CONSTRUCT x4f00c400/mask=xfff8ec00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:4 &=$dup +# SMACRO(pseudo) ARG1 ARG2:4 =NEON_movi/1@4 +# AUNIT --inst x4f00c400/mask=xfff8ec00 --status pass +# MOVI 32-bit shifting ones variant when datasize=128 q == 1 && op == 0 && cmode == 110x + +:movi Rd_VPR128.4S, Imm_neon_uimm8Shift +is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1315=0b110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.4S & Zd +{ + # simd duplicate Rd_VPR128.4S = all elements Imm_neon_uimm8Shift:4 (lane size 4) + Rd_VPR128.4S[0,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[32,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[64,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[96,32] = Imm_neon_uimm8Shift:4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.205 MUL (by element) page C7-1866 line 104646 MATCH x0f008000/mask=xbf00f400 +# CONSTRUCT x0f808000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@4 +# AUNIT --inst x0f808000/mask=xffc0f400 --status pass + +:mul Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x8 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix Rd_VPR64.2S = Rn_VPR64.2S * tmp1 on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] * tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] * tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.205 MUL (by element) page C7-1866 line 104646 MATCH x0f008000/mask=xbf00f400 +# CONSTRUCT x0f408000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@2 +# AUNIT --inst x0f408000/mask=xffc0f400 --status pass + +:mul Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x8 & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR64.4H = Rn_VPR64.4H * tmp1 on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] * tmp1; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] * tmp1; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] * tmp1; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] * tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.205 MUL (by element) page C7-1866 line 104646 MATCH x0f008000/mask=xbf00f400 +# CONSTRUCT x4f808000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(force-primitive) ARG1 ARG2 ARG3 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@4 +# AUNIT --inst x4f808000/mask=xffc0f400 --status pass + +:mul Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x8 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix Rd_VPR128.4S = Rn_VPR128.4S * tmp1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] * tmp1; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] * tmp1; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] * tmp1; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] * tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.205 MUL (by element) page C7-1866 line 104646 MATCH x0f008000/mask=xbf00f400 +# CONSTRUCT x4f408000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@2 +# AUNIT --inst x4f408000/mask=xffc0f400 --status pass + +:mul Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x8 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR128.8H = Rn_VPR128.8H * tmp1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] * tmp1; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] * tmp1; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] * tmp1; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] * tmp1; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] * tmp1; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] * tmp1; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] * tmp1; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] * tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 +# CONSTRUCT x4e209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$*@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@1 +# AUNIT --inst x4e209c00/mask=xffe0fc00 --status pass + +:mul Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x13 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix Rd_VPR128.16B = Rn_VPR128.16B * Rm_VPR128.16B on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] * Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] * Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] * Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] * Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] * Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] * Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] * Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] * Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] * Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] * Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] * Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] * Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] * Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] * Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] * Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] * Rm_VPR128.16B[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 +# CONSTRUCT x0ea09c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$*@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@4 +# AUNIT --inst x0ea09c00/mask=xffe0fc00 --status pass + +:mul Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x13 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix Rd_VPR64.2S = Rn_VPR64.2S * Rm_VPR64.2S on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] * Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] * Rm_VPR64.2S[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 +# CONSTRUCT x0e609c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@2 +# AUNIT --inst x0e609c00/mask=xffe0fc00 --status pass + +:mul Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x13 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rn_VPR64.4H * Rm_VPR64.4H on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] * Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] * Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] * Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] * Rm_VPR64.4H[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 +# CONSTRUCT x4ea09c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$*@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@4 +# AUNIT --inst x4ea09c00/mask=xffe0fc00 --status pass + +:mul Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix Rd_VPR128.4S = Rn_VPR128.4S * Rm_VPR128.4S on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] * Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] * Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] * Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] * Rm_VPR128.4S[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 +# CONSTRUCT x0e209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$*@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@1 +# AUNIT --inst x0e209c00/mask=xffe0fc00 --status pass + +:mul Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x13 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix Rd_VPR64.8B = Rn_VPR64.8B * Rm_VPR64.8B on lane size 1 + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] * Rm_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] * Rm_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] * Rm_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] * Rm_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] * Rm_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] * Rm_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] * Rm_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] * Rm_VPR64.8B[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 +# CONSTRUCT x4e609c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@2 +# AUNIT --inst x4e609c00/mask=xffe0fc00 --status pass + +:mul Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rn_VPR128.8H * Rm_VPR128.8H on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] * Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] * Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] * Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] * Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] * Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] * Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] * Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] * Rm_VPR128.8H[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.207 MVN page C7-1870 line 104876 MATCH x2e205800/mask=xbffffc00 +# C7.2.210 NOT page C7-1876 line 105222 MATCH x2e205800/mask=xbffffc00 +# CONSTRUCT x6e205800/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$~@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_mvn/1@1 +# AUNIT --inst x6e205800/mask=xfffffc00 --status pass + +:mvn Rd_VPR128.16B, Rn_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=16 & b_1216=5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd unary Rd_VPR128.16B = ~(Rn_VPR128.16B) on lane size 1 + Rd_VPR128.16B[0,8] = ~(Rn_VPR128.16B[0,8]); + Rd_VPR128.16B[8,8] = ~(Rn_VPR128.16B[8,8]); + Rd_VPR128.16B[16,8] = ~(Rn_VPR128.16B[16,8]); + Rd_VPR128.16B[24,8] = ~(Rn_VPR128.16B[24,8]); + Rd_VPR128.16B[32,8] = ~(Rn_VPR128.16B[32,8]); + Rd_VPR128.16B[40,8] = ~(Rn_VPR128.16B[40,8]); + Rd_VPR128.16B[48,8] = ~(Rn_VPR128.16B[48,8]); + Rd_VPR128.16B[56,8] = ~(Rn_VPR128.16B[56,8]); + Rd_VPR128.16B[64,8] = ~(Rn_VPR128.16B[64,8]); + Rd_VPR128.16B[72,8] = ~(Rn_VPR128.16B[72,8]); + Rd_VPR128.16B[80,8] = ~(Rn_VPR128.16B[80,8]); + Rd_VPR128.16B[88,8] = ~(Rn_VPR128.16B[88,8]); + Rd_VPR128.16B[96,8] = ~(Rn_VPR128.16B[96,8]); + Rd_VPR128.16B[104,8] = ~(Rn_VPR128.16B[104,8]); + Rd_VPR128.16B[112,8] = ~(Rn_VPR128.16B[112,8]); + Rd_VPR128.16B[120,8] = ~(Rn_VPR128.16B[120,8]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.207 MVN page C7-1870 line 104876 MATCH x2e205800/mask=xbffffc00 +# C7.2.210 NOT page C7-1876 line 105222 MATCH x2e205800/mask=xbffffc00 +# CONSTRUCT x2e205800/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$~@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_mvn/1@1 +# AUNIT --inst x2e205800/mask=xfffffc00 --status pass + +:mvn Rd_VPR64.8B, Rn_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=16 & b_1216=5 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd unary Rd_VPR64.8B = ~(Rn_VPR64.8B) on lane size 1 + Rd_VPR64.8B[0,8] = ~(Rn_VPR64.8B[0,8]); + Rd_VPR64.8B[8,8] = ~(Rn_VPR64.8B[8,8]); + Rd_VPR64.8B[16,8] = ~(Rn_VPR64.8B[16,8]); + Rd_VPR64.8B[24,8] = ~(Rn_VPR64.8B[24,8]); + Rd_VPR64.8B[32,8] = ~(Rn_VPR64.8B[32,8]); + Rd_VPR64.8B[40,8] = ~(Rn_VPR64.8B[40,8]); + Rd_VPR64.8B[48,8] = ~(Rn_VPR64.8B[48,8]); + Rd_VPR64.8B[56,8] = ~(Rn_VPR64.8B[56,8]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 +# C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 +# C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 +# C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 +# CONSTRUCT x2f000400/mask=xfff89c00 MATCHED 6 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:4 ~ &=$dup +# SMACRO(pseudo) ARG1 ARG2:4 =NEON_mvni/1@4 +# AUNIT --inst x2f000400/mask=xfff89c00 --status pass + +:mvni Rd_VPR64.2S, Imm_neon_uimm8Shift +is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & Imm_neon_uimm8Shift & b_1012=1 & Rd_VPR64.2S & Zd +{ + local tmp1:4 = ~ Imm_neon_uimm8Shift:4; + # simd duplicate Rd_VPR64.2S = all elements tmp1 (lane size 4) + Rd_VPR64.2S[0,32] = tmp1; + Rd_VPR64.2S[32,32] = tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# CONSTRUCT x2f008400/mask=xfff8dc00 MATCHED 5 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:2 ~ &=$dup +# SMACRO(pseudo) ARG1 ARG2:2 =NEON_mvni/1@2 +# AUNIT --inst x2f008400/mask=xfff8dc00 --status pass + +:mvni Rd_VPR64.4H, Imm_neon_uimm8Shift +is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=1 & Rd_VPR64.4H & Zd +{ + local tmp1:2 = ~ Imm_neon_uimm8Shift:2; + # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) + Rd_VPR64.4H[0,16] = tmp1; + Rd_VPR64.4H[16,16] = tmp1; + Rd_VPR64.4H[32,16] = tmp1; + Rd_VPR64.4H[48,16] = tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 +# C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 +# C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 +# C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 +# CONSTRUCT x6f000400/mask=xfff89c00 MATCHED 6 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:4 ~ &=$dup +# SMACRO(pseudo) ARG1 ARG2:4 =NEON_mvni/1@4 +# AUNIT --inst x6f000400/mask=xfff89c00 --status pass + +:mvni Rd_VPR128.4S, Imm_neon_uimm8Shift +is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1515=0 & b_1012=1 & Rd_VPR128.4S & Zd +{ + local tmp1:4 = ~ Imm_neon_uimm8Shift:4; + # simd duplicate Rd_VPR128.4S = all elements tmp1 (lane size 4) + Rd_VPR128.4S[0,32] = tmp1; + Rd_VPR128.4S[32,32] = tmp1; + Rd_VPR128.4S[64,32] = tmp1; + Rd_VPR128.4S[96,32] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# CONSTRUCT x6f008400/mask=xfff8dc00 MATCHED 5 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:2 ~ &=$dup +# SMACRO(pseudo) ARG1 ARG2:2 =NEON_mvni/1@2 +# AUNIT --inst x6f008400/mask=xfff8dc00 --status pass + +:mvni Rd_VPR128.8H, Imm_neon_uimm8Shift +is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=1 & Rd_VPR128.8H & Zd +{ + local tmp1:2 = ~ Imm_neon_uimm8Shift:2; + # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) + Rd_VPR128.8H[0,16] = tmp1; + Rd_VPR128.8H[16,16] = tmp1; + Rd_VPR128.8H[32,16] = tmp1; + Rd_VPR128.8H[48,16] = tmp1; + Rd_VPR128.8H[64,16] = tmp1; + Rd_VPR128.8H[80,16] = tmp1; + Rd_VPR128.8H[96,16] = tmp1; + Rd_VPR128.8H[112,16] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# CONSTRUCT x2f00c400/mask=xfff8ec00 MATCHED 3 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:4 ~ &=$dup +# SMACRO(pseudo) ARG1 ARG2:4 =NEON_mvni/1@4 +# AUNIT --inst x2f00c400/mask=xfff8ec00 --status pass + +:mvni Rd_VPR64.2S, Imm_neon_uimm8Shift +is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1315=6 & b_1011=1 & Rd_VPR64.2S & Zd +{ + local tmp1:4 = ~ Imm_neon_uimm8Shift:4; + # simd duplicate Rd_VPR64.2S = all elements tmp1 (lane size 4) + Rd_VPR64.2S[0,32] = tmp1; + Rd_VPR64.2S[32,32] = tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 +# C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# CONSTRUCT x6f00c400/mask=xfff8ec00 MATCHED 3 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:4 ~ &=$dup +# SMACRO(pseudo) ARG1 ARG2:4 =NEON_mvni/1@4 +# AUNIT --inst x6f00c400/mask=xfff8ec00 --status pass + +:mvni Rd_VPR128.4S, Imm_neon_uimm8Shift +is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1315=6 & b_1011=1 & Rd_VPR128.4S & Zd +{ + local tmp1:4 = ~ Imm_neon_uimm8Shift:4; + # simd duplicate Rd_VPR128.4S = all elements tmp1 (lane size 4) + Rd_VPR128.4S[0,32] = tmp1; + Rd_VPR128.4S[32,32] = tmp1; + Rd_VPR128.4S[64,32] = tmp1; + Rd_VPR128.4S[96,32] = tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x7e20b800/mask=xff3ffc00 +# CONSTRUCT x7ee0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =2comp +# SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1 +# AUNIT --inst x7ee0b800/mask=xfffffc00 --status pass + +:neg Rd_VPR64, Rn_VPR64 +is b_3131=0 & q=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64 & Rd_VPR64 & Zd +{ + Rd_VPR64 = - Rn_VPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 +# CONSTRUCT x2e20b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@1 +# AUNIT --inst x2e20b800/mask=xfffffc00 --status nopcodeop + +:neg Rd_VPR64.8B, Rn_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_neg(Rn_VPR64.8B, 1:1); +} + +# C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 +# CONSTRUCT x6e20b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@1 +# AUNIT --inst x6e20b800/mask=xfffffc00 --status nopcodeop + +:neg Rd_VPR128.16B, Rn_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_neg(Rn_VPR128.16B, 1:1); +} + +# C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 +# CONSTRUCT x2e60b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@2 +# AUNIT --inst x2e60b800/mask=xfffffc00 --status nopcodeop + +:neg Rd_VPR64.4H, Rn_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_neg(Rn_VPR64.4H, 2:1); +} + +# C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 +# CONSTRUCT x6e60b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@2 +# AUNIT --inst x6e60b800/mask=xfffffc00 --status nopcodeop + +:neg Rd_VPR128.8H, Rn_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_neg(Rn_VPR128.8H, 2:1); +} + +# C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 +# CONSTRUCT x2ea0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@4 +# AUNIT --inst x2ea0b800/mask=xfffffc00 --status nopcodeop + +:neg Rd_VPR64.2S, Rn_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_neg(Rn_VPR64.2S, 4:1); +} + +# C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 +# CONSTRUCT x6ea0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@4 +# AUNIT --inst x6ea0b800/mask=xfffffc00 --status nopcodeop + +:neg Rd_VPR128.4S, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_neg(Rn_VPR128.4S, 4:1); +} + +# C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 +# CONSTRUCT x6ee0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@8 +# AUNIT --inst x6ee0b800/mask=xfffffc00 --status nopcodeop + +:neg Rd_VPR128.2D, Rn_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_neg(Rn_VPR128.2D, 8:1); +} + +# C7.2.211 ORN (vector) page C7-1878 line 105307 MATCH x0ee01c00/mask=xbfe0fc00 +# CONSTRUCT x4ee01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $~@1 =$|@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_orn/2@1 +# AUNIT --inst x4ee01c00/mask=xffe0fc00 --status pass + +:orn Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd unary TMPQ1 = ~(Rm_VPR128.16B) on lane size 1 + TMPQ1[0,8] = ~(Rm_VPR128.16B[0,8]); + TMPQ1[8,8] = ~(Rm_VPR128.16B[8,8]); + TMPQ1[16,8] = ~(Rm_VPR128.16B[16,8]); + TMPQ1[24,8] = ~(Rm_VPR128.16B[24,8]); + TMPQ1[32,8] = ~(Rm_VPR128.16B[32,8]); + TMPQ1[40,8] = ~(Rm_VPR128.16B[40,8]); + TMPQ1[48,8] = ~(Rm_VPR128.16B[48,8]); + TMPQ1[56,8] = ~(Rm_VPR128.16B[56,8]); + TMPQ1[64,8] = ~(Rm_VPR128.16B[64,8]); + TMPQ1[72,8] = ~(Rm_VPR128.16B[72,8]); + TMPQ1[80,8] = ~(Rm_VPR128.16B[80,8]); + TMPQ1[88,8] = ~(Rm_VPR128.16B[88,8]); + TMPQ1[96,8] = ~(Rm_VPR128.16B[96,8]); + TMPQ1[104,8] = ~(Rm_VPR128.16B[104,8]); + TMPQ1[112,8] = ~(Rm_VPR128.16B[112,8]); + TMPQ1[120,8] = ~(Rm_VPR128.16B[120,8]); + # simd infix Rd_VPR128.16B = Rn_VPR128.16B | TMPQ1 on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.211 ORN (vector) page C7-1878 line 105307 MATCH x0ee01c00/mask=xbfe0fc00 +# CONSTRUCT x0ee01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $~@1 =$|@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_orn/2@1 +# AUNIT --inst x0ee01c00/mask=xffe0fc00 --status pass + +:orn Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd unary TMPD1 = ~(Rm_VPR64.8B) on lane size 1 + TMPD1[0,8] = ~(Rm_VPR64.8B[0,8]); + TMPD1[8,8] = ~(Rm_VPR64.8B[8,8]); + TMPD1[16,8] = ~(Rm_VPR64.8B[16,8]); + TMPD1[24,8] = ~(Rm_VPR64.8B[24,8]); + TMPD1[32,8] = ~(Rm_VPR64.8B[32,8]); + TMPD1[40,8] = ~(Rm_VPR64.8B[40,8]); + TMPD1[48,8] = ~(Rm_VPR64.8B[48,8]); + TMPD1[56,8] = ~(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR64.8B = Rn_VPR64.8B | TMPD1 on lane size 1 + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] | TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] | TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] | TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] | TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] | TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] | TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] | TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] | TMPD1[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 +# C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 +# C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 +# CONSTRUCT x0f001400/mask=xfff89c00 MATCHED 6 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:4 &=$|@4 +# SMACRO(pseudo) ARG1 ARG2:4 &=NEON_orn/2@4 +# AUNIT --inst x0f001400/mask=xfff89c00 --status pass + +:orr Rd_VPR64.2S, Imm_neon_uimm8Shift +is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1515=0 & b_1012=5 & Rd_VPR64.2S & Zd +{ + # simd infix Rd_VPR64.2S = Rd_VPR64.2S | Imm_neon_uimm8Shift:4 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] | Imm_neon_uimm8Shift:4; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] | Imm_neon_uimm8Shift:4; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 +# CONSTRUCT x0f009400/mask=xfff8dc00 MATCHED 3 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:2 &=$|@2 +# SMACRO(pseudo) ARG1 ARG2:2 &=NEON_orn/2@2 +# AUNIT --inst x0f009400/mask=xfff8dc00 --status pass + +:orr Rd_VPR64.4H, Imm_neon_uimm8Shift +is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=5 & Rd_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rd_VPR64.4H | Imm_neon_uimm8Shift:2 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] | Imm_neon_uimm8Shift:2; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] | Imm_neon_uimm8Shift:2; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] | Imm_neon_uimm8Shift:2; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] | Imm_neon_uimm8Shift:2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 +# C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 +# C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 +# CONSTRUCT x4f001400/mask=xfff89c00 MATCHED 6 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:4 &=$| +# SMACRO(pseudo) ARG1 ARG2:4 &=NEON_orn/2@4 +# AUNIT --inst x4f001400/mask=xfff89c00 --status pass + +:orr Rd_VPR128.4S, Imm_neon_uimm8Shift +is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1515=0 & b_1012=5 & Rd_VPR128.4S & Zd +{ + # simd infix Rd_VPR128.4S = Rd_VPR128.4S | Imm_neon_uimm8Shift:4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] | Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] | Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] | Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] | Imm_neon_uimm8Shift:4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 +# C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 +# CONSTRUCT x4f009400/mask=xfff8dc00 MATCHED 3 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2:2 &=$| +# SMACRO(pseudo) ARG1 ARG2:2 &=NEON_orr/2@2 +# AUNIT --inst x4f009400/mask=xfff8dc00 --status pass + +:orr Rd_VPR128.8H, Imm_neon_uimm8Shift +is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=5 & Rd_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rd_VPR128.8H | Imm_neon_uimm8Shift:2 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] | Imm_neon_uimm8Shift:2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.213 ORR (vector, register) page C7-1882 line 105515 MATCH x0ea01c00/mask=xbfe0fc00 +# C7.2.202 MOV (vector) page C7-1860 line 104306 MATCH x0ea01c00/mask=xbfe0fc00 +# CONSTRUCT x4ea01c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$|@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_orr/2@1 +# AUNIT --inst x4ea01c00/mask=xffe0fc00 --status pass + +:orr Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix Rd_VPR128.16B = Rn_VPR128.16B | Rm_VPR128.16B on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | Rm_VPR128.16B[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.213 ORR (vector, register) page C7-1882 line 105515 MATCH x0ea01c00/mask=xbfe0fc00 +# C7.2.202 MOV (vector) page C7-1860 line 104306 MATCH x0ea01c00/mask=xbfe0fc00 +# CONSTRUCT x0ea01c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$|@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_orr/2@1 +# AUNIT --inst x0ea01c00/mask=xffe0fc00 --status pass + +:orr Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix Rd_VPR64.8B = Rn_VPR64.8B | Rm_VPR64.8B on lane size 1 + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] | Rm_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] | Rm_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] | Rm_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] | Rm_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] | Rm_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] | Rm_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] | Rm_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] | Rm_VPR64.8B[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.214 PMUL page C7-1884 line 105605 MATCH x2e209c00/mask=xbf20fc00 +# CONSTRUCT x6e209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmul/2@1 +# AUNIT --inst x6e209c00/mask=xffe0fc00 --status nopcodeop + +:pmul Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x13 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_pmul(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.214 PMUL page C7-1884 line 105605 MATCH x2e209c00/mask=xbf20fc00 +# CONSTRUCT x2e209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmul/2@1 +# AUNIT --inst x2e209c00/mask=xffe0fc00 --status nopcodeop + +:pmul Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x13 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_pmul(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.215 PMULL, PMULL2 page C7-1886 line 105707 MATCH x0e20e000/mask=xbf20fc00 +# CONSTRUCT x0ee0e000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmull/2@8 +# AUNIT --inst x0ee0e000/mask=xffe0fc00 --status nopcodeop --comment "ext" + +:pmull Rd_VPR128.1Q, Rn_VPR64.1D, Rm_VPR64.1D +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.1D & b_1215=0xe & b_1011=0 & Rn_VPR64.1D & Rd_VPR128.1Q & Zd +{ + Rd_VPR128.1Q = NEON_pmull(Rn_VPR64.1D, Rm_VPR64.1D, 8:1); +} + +# C7.2.215 PMULL, PMULL2 page C7-1886 line 105707 MATCH x0e20e000/mask=xbf20fc00 +# CONSTRUCT x0e20e000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmull/2@1 +# AUNIT --inst x0e20e000/mask=xffe0fc00 --status nopcodeop --comment "ext" + +:pmull Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xe & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_pmull(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.215 PMULL, PMULL2 page C7-1886 line 105707 MATCH x0e20e000/mask=xbf20fc00 +# CONSTRUCT x4ee0e000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmull2/2@8 +# AUNIT --inst x4ee0e000/mask=xffe0fc00 --status nopcodeop --comment "ext" + +:pmull2 Rd_VPR128.1Q, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1215=0xe & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.1Q & Zd +{ + Rd_VPR128.1Q = NEON_pmull2(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.215 PMULL, PMULL2 page C7-1886 line 105707 MATCH x0e20e000/mask=xbf20fc00 +# CONSTRUCT x4e20e000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmull2/2@1 +# AUNIT --inst x4e20e000/mask=xffe0fc00 --status nopcodeop --comment "ext" + +:pmull2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xe & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_pmull2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 +# CONSTRUCT x6e204000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $+@2 0x80:2 &=$+@2 &=$shuffle@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn2/3@2 +# AUNIT --inst x6e204000/mask=xffe0fc00 --status pass --comment "intround" + +:raddhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; + # simd infix TMPQ1 = TMPQ1 + 0x80:2 on lane size 2 + TMPQ1[0,16] = TMPQ1[0,16] + 0x80:2; + TMPQ1[16,16] = TMPQ1[16,16] + 0x80:2; + TMPQ1[32,16] = TMPQ1[32,16] + 0x80:2; + TMPQ1[48,16] = TMPQ1[48,16] + 0x80:2; + TMPQ1[64,16] = TMPQ1[64,16] + 0x80:2; + TMPQ1[80,16] = TMPQ1[80,16] + 0x80:2; + TMPQ1[96,16] = TMPQ1[96,16] + 0x80:2; + TMPQ1[112,16] = TMPQ1[112,16] + 0x80:2; + # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 + Rd_VPR128.16B[64,8] = TMPQ1[8,8]; + Rd_VPR128.16B[72,8] = TMPQ1[24,8]; + Rd_VPR128.16B[80,8] = TMPQ1[40,8]; + Rd_VPR128.16B[88,8] = TMPQ1[56,8]; + Rd_VPR128.16B[96,8] = TMPQ1[72,8]; + Rd_VPR128.16B[104,8] = TMPQ1[88,8]; + Rd_VPR128.16B[112,8] = TMPQ1[104,8]; + Rd_VPR128.16B[120,8] = TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 +# CONSTRUCT x6ea04000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $+@8 0x80000000:8 &=$+@8 &=$shuffle@1-2@3-3:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn2/3@8 +# AUNIT --inst x6ea04000/mask=xffe0fc00 --status pass --comment "intround" + +:raddhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; + # simd infix TMPQ1 = TMPQ1 + 0x80000000:8 on lane size 8 + TMPQ1[0,64] = TMPQ1[0,64] + 0x80000000:8; + TMPQ1[64,64] = TMPQ1[64,64] + 0x80000000:8; + # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-2@3-3) lane size 4 + Rd_VPR128.4S[64,32] = TMPQ1[32,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 +# CONSTRUCT x6e604000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $+@4 0x8000:4 &=$+@4 &=$shuffle@1-4@3-5@5-6@7-7:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn2/3@4 +# AUNIT --inst x6e604000/mask=xffe0fc00 --status pass --comment "intround" + +:raddhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; + # simd infix TMPQ1 = TMPQ1 + 0x8000:4 on lane size 4 + TMPQ1[0,32] = TMPQ1[0,32] + 0x8000:4; + TMPQ1[32,32] = TMPQ1[32,32] + 0x8000:4; + TMPQ1[64,32] = TMPQ1[64,32] + 0x8000:4; + TMPQ1[96,32] = TMPQ1[96,32] + 0x8000:4; + # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-4@3-5@5-6@7-7) lane size 2 + Rd_VPR128.8H[64,16] = TMPQ1[16,16]; + Rd_VPR128.8H[80,16] = TMPQ1[48,16]; + Rd_VPR128.8H[96,16] = TMPQ1[80,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 +# CONSTRUCT x2ea04000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn/3@8 +# AUNIT --inst x2ea04000/mask=xffe0fc00 --status nopcodeop + +:raddhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_raddhn(Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 +# CONSTRUCT x2e604000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn/3@4 +# AUNIT --inst x2e604000/mask=xffe0fc00 --status nopcodeop + +:raddhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_raddhn(Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 +# CONSTRUCT x2e204000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn/3@2 +# AUNIT --inst x2e204000/mask=xffe0fc00 --status nopcodeop + +:raddhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_raddhn(Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.217 RAX1 page C7-1890 line 105949 MATCH xce608c00/mask=xffe0fc00 +# CONSTRUCT xce608c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 1:8 $<<@8 =$|@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_rax1/2@8 +# AUNIT --inst xce608c00/mask=xffe0fc00 --status noqemu + +:rax1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_2131=0b11001110011 & b_1015=0b100011 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd +{ + # simd infix TMPQ1 = Rm_VPR128.2D << 1:8 on lane size 8 + TMPQ1[0,64] = Rm_VPR128.2D[0,64] << 1:8; + TMPQ1[64,64] = Rm_VPR128.2D[64,64] << 1:8; + # simd infix Rd_VPR128.2D = Rn_VPR128.2D | TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] | TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] | TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.218 RBIT (vector) page C7-1891 line 106016 MATCH x2e605800/mask=xbffffc00 +# CONSTRUCT x2e605800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rbit/1@1 +# AUNIT --inst x2e605800/mask=xfffffc00 --status nopcodeop + +:rbit Rd_VPR64.8B, Rn_VPR64.8B +is b_31=0 & b_30=0 & b_1029=0b10111001100000010110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_rbit(Rn_VPR64.8B, 1:1); +} + +# C7.2.218 RBIT (vector) page C7-1891 line 106016 MATCH x2e605800/mask=xbffffc00 +# CONSTRUCT x6e605800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rbit/1@1 +# AUNIT --inst x6e605800/mask=xfffffc00 --status nopcodeop + +:rbit Rd_VPR128.16B, Rn_VPR128.16B +is b_31=0 & b_30=1 & b_1029=0b10111001100000010110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_rbit(Rn_VPR128.16B, 1:1); +} + +# C7.2.219 REV16 (vector) page C7-1893 line 106101 MATCH x0e201800/mask=xbf3ffc00 +# CONSTRUCT x4e201800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev16/1@1 +# AUNIT --inst x4e201800/mask=xfffffc00 --status nopcodeop + +:rev16 Rd_VPR128.16B, Rn_VPR128.16B +is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_rev16(Rn_VPR128.16B, 1:1); +} + +# C7.2.219 REV16 (vector) page C7-1893 line 106101 MATCH x0e201800/mask=xbf3ffc00 +# CONSTRUCT x0e201800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev16/1@1 +# AUNIT --inst x0e201800/mask=xfffffc00 --status nopcodeop + +:rev16 Rd_VPR64.8B, Rn_VPR64.8B +is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_rev16(Rn_VPR64.8B, 1:1); +} + +# C7.2.220 REV32 (vector) page C7-1895 line 106218 MATCH x2e200800/mask=xbf3ffc00 +# CONSTRUCT x6e200800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev32/1@1 +# AUNIT --inst x6e200800/mask=xfffffc00 --status nopcodeop + +:rev32 Rd_VPR128.16B, Rn_VPR128.16B +is b_3131=0 & Q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_rev32(Rn_VPR128.16B, 1:1); +} + +# C7.2.220 REV32 (vector) page C7-1895 line 106218 MATCH x2e200800/mask=xbf3ffc00 +# CONSTRUCT x2e600800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev32/1@2 +# AUNIT --inst x2e600800/mask=xfffffc00 --status nopcodeop + +:rev32 Rd_VPR64.4H, Rn_VPR64.4H +is b_3131=0 & Q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_rev32(Rn_VPR64.4H, 2:1); +} + +# C7.2.220 REV32 (vector) page C7-1895 line 106218 MATCH x2e200800/mask=xbf3ffc00 +# CONSTRUCT x2e200800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev32/1@1 +# AUNIT --inst x2e200800/mask=xfffffc00 --status nopcodeop + +:rev32 Rd_VPR64.8B, Rn_VPR64.8B +is b_3131=0 & Q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_rev32(Rn_VPR64.8B, 1:1); +} + +# C7.2.220 REV32 (vector) page C7-1895 line 106218 MATCH x2e200800/mask=xbf3ffc00 +# CONSTRUCT x6e600800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev32/1@2 +# AUNIT --inst x6e600800/mask=xfffffc00 --status nopcodeop + +:rev32 Rd_VPR128.8H, Rn_VPR128.8H +is b_3131=0 & Q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_rev32(Rn_VPR128.8H, 2:1); +} + +# C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 +# CONSTRUCT x4e200800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@1 +# AUNIT --inst x4e200800/mask=xfffffc00 --status nopcodeop + +:rev64 Rd_VPR128.16B, Rn_VPR128.16B +is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_rev64(Rn_VPR128.16B, 1:1); +} + +# C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 +# CONSTRUCT x0ea00800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@4 +# AUNIT --inst x0ea00800/mask=xfffffc00 --status nopcodeop + +:rev64 Rd_VPR64.2S, Rn_VPR64.2S +is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_rev64(Rn_VPR64.2S, 4:1); +} + +# C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 +# CONSTRUCT x0e600800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@2 +# AUNIT --inst x0e600800/mask=xfffffc00 --status nopcodeop + +:rev64 Rd_VPR64.4H, Rn_VPR64.4H +is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_rev64(Rn_VPR64.4H, 2:1); +} + +# C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 +# CONSTRUCT x4ea00800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@4 +# AUNIT --inst x4ea00800/mask=xfffffc00 --status nopcodeop + +:rev64 Rd_VPR128.4S, Rn_VPR128.4S +is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_rev64(Rn_VPR128.4S, 4:1); +} + +# C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 +# CONSTRUCT x0e200800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@1 +# AUNIT --inst x0e200800/mask=xfffffc00 --status nopcodeop + +:rev64 Rd_VPR64.8B, Rn_VPR64.8B +is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_rev64(Rn_VPR64.8B, 1:1); +} + +# C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 +# CONSTRUCT x4e600800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@2 +# AUNIT --inst x4e600800/mask=xfffffc00 --status nopcodeop + +:rev64 Rd_VPR128.8H, Rn_VPR128.8H +is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_rev64(Rn_VPR128.8H, 2:1); +} + +# C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 +# CONSTRUCT x4f088c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn2/3@2 +# AUNIT --inst x4f088c00/mask=xfff8fc00 --status nopcodeop --comment "nointround" + +:rshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_rshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 +# CONSTRUCT x0f208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn/3@8 +# AUNIT --inst x0f208c00/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:rshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_rshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 +# CONSTRUCT x0f108c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn/3@4 +# AUNIT --inst x0f108c00/mask=xfff0fc00 --status nopcodeop --comment "nointround" + +:rshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_rshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 +# CONSTRUCT x4f208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn2/3@8 +# AUNIT --inst x4f208c00/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:rshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_rshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 +# CONSTRUCT x0f088c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn/3@2 +# AUNIT --inst x0f088c00/mask=xfff8fc00 --status nopcodeop --comment "nointround" + +:rshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_rshrn(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 +# CONSTRUCT x4f108c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn2/3@4 +# AUNIT --inst x4f108c00/mask=xfff0fc00 --status nopcodeop --comment "nointround" + +:rshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_rshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 +# CONSTRUCT x6e206000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn2/3@2 +# AUNIT --inst x6e206000/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:rsubhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_rsubhn2(Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 +# CONSTRUCT x6ea06000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn2/3@8 +# AUNIT --inst x6ea06000/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:rsubhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_rsubhn2(Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 +# CONSTRUCT x6e606000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn2/3@4 +# AUNIT --inst x6e606000/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:rsubhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_rsubhn2(Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 +# CONSTRUCT x2ea06000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn/3@8 +# AUNIT --inst x2ea06000/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:rsubhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_rsubhn(Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 +# CONSTRUCT x2e606000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn/3@4 +# AUNIT --inst x2e606000/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:rsubhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_rsubhn(Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 +# CONSTRUCT x2e206000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn/3@2 +# AUNIT --inst x2e206000/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:rsubhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_rsubhn(Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 +# CONSTRUCT x4e207c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@1 +# AUNIT --inst x4e207c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:saba Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xf & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_saba(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 +# CONSTRUCT x0ea07c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@4 +# AUNIT --inst x0ea07c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:saba Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xf & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_saba(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 +# CONSTRUCT x0e607c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@2 +# AUNIT --inst x0e607c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:saba Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xf & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_saba(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 +# CONSTRUCT x4ea07c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@4 +# AUNIT --inst x4ea07c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:saba Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xf & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_saba(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 +# CONSTRUCT x0e207c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@1 +# AUNIT --inst x0e207c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:saba Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xf & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_saba(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 +# CONSTRUCT x4e607c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@2 +# AUNIT --inst x4e607c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:saba Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xf & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_saba(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 +# CONSTRUCT x0ea05000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $-@8 $abs@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal/3@4 +# AUNIT --inst x0ea05000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x5 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 + TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; + # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 8 + TMPQ4[0,64] = MP_INT_ABS(TMPQ3[0,64]); + TMPQ4[64,64] = MP_INT_ABS(TMPQ3[64,64]); + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 +# CONSTRUCT x0e605000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $-@4 $abs@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal/3@2 +# AUNIT --inst x0e605000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x5 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 + TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; + # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 4 + TMPQ4[0,32] = MP_INT_ABS(TMPQ3[0,32]); + TMPQ4[32,32] = MP_INT_ABS(TMPQ3[32,32]); + TMPQ4[64,32] = MP_INT_ABS(TMPQ3[64,32]); + TMPQ4[96,32] = MP_INT_ABS(TMPQ3[96,32]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 +# CONSTRUCT x0e205000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 $-@2 $abs@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal/3@1 +# AUNIT --inst x0e205000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x5 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 + TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; + # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 2 + TMPQ4[0,16] = MP_INT_ABS(TMPQ3[0,16]); + TMPQ4[16,16] = MP_INT_ABS(TMPQ3[16,16]); + TMPQ4[32,16] = MP_INT_ABS(TMPQ3[32,16]); + TMPQ4[48,16] = MP_INT_ABS(TMPQ3[48,16]); + TMPQ4[64,16] = MP_INT_ABS(TMPQ3[64,16]); + TMPQ4[80,16] = MP_INT_ABS(TMPQ3[80,16]); + TMPQ4[96,16] = MP_INT_ABS(TMPQ3[96,16]); + TMPQ4[112,16] = MP_INT_ABS(TMPQ3[112,16]); + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ4 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ4[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 +# CONSTRUCT x4ea05000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $-@8 $abs@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal2/3@4 +# AUNIT --inst x4ea05000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x5 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; + # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 8 + TMPQ6[0,64] = MP_INT_ABS(TMPQ5[0,64]); + TMPQ6[64,64] = MP_INT_ABS(TMPQ5[64,64]); + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ6 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ6[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ6[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 +# CONSTRUCT x4e605000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $-@4 $abs@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal2/3@2 +# AUNIT --inst x4e605000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x5 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; + # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 4 + TMPQ6[0,32] = MP_INT_ABS(TMPQ5[0,32]); + TMPQ6[32,32] = MP_INT_ABS(TMPQ5[32,32]); + TMPQ6[64,32] = MP_INT_ABS(TMPQ5[64,32]); + TMPQ6[96,32] = MP_INT_ABS(TMPQ5[96,32]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ6 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ6[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ6[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ6[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ6[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 +# CONSTRUCT x4e205000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 $-@2 $abs@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal2/3@1 +# AUNIT --inst x4e205000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x5 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 + TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; + # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 2 + TMPQ6[0,16] = MP_INT_ABS(TMPQ5[0,16]); + TMPQ6[16,16] = MP_INT_ABS(TMPQ5[16,16]); + TMPQ6[32,16] = MP_INT_ABS(TMPQ5[32,16]); + TMPQ6[48,16] = MP_INT_ABS(TMPQ5[48,16]); + TMPQ6[64,16] = MP_INT_ABS(TMPQ5[64,16]); + TMPQ6[80,16] = MP_INT_ABS(TMPQ5[80,16]); + TMPQ6[96,16] = MP_INT_ABS(TMPQ5[96,16]); + TMPQ6[112,16] = MP_INT_ABS(TMPQ5[112,16]); + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ6 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ6[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ6[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ6[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ6[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ6[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ6[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ6[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ6[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 +# CONSTRUCT x4e207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@1 +# AUNIT --inst x4e207400/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:sabd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sabd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 +# CONSTRUCT x0ea07400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $-@4 ARG3 ARG2 $-@4 2:4 &=$* ARG2 ARG3 $sless@4 &=$*@4 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@4 +# AUNIT --inst x0ea07400/mask=xffe0fc00 --status pass --comment "abd" +# This abd instruction is implemented correctly to document a correct +# way to implement the signed absolute difference semantic. + +:sabd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix TMPD1 = Rn_VPR64.2S - Rm_VPR64.2S on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] - Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] - Rm_VPR64.2S[32,32]; + # simd infix TMPD2 = Rm_VPR64.2S - Rn_VPR64.2S on lane size 4 + TMPD2[0,32] = Rm_VPR64.2S[0,32] - Rn_VPR64.2S[0,32]; + TMPD2[32,32] = Rm_VPR64.2S[32,32] - Rn_VPR64.2S[32,32]; + # simd infix TMPD2 = TMPD2 * 2:4 on lane size 4 + TMPD2[0,32] = TMPD2[0,32] * 2:4; + TMPD2[32,32] = TMPD2[32,32] * 2:4; + # simd infix TMPD3 = Rn_VPR64.2S s< Rm_VPR64.2S on lane size 4 + TMPD3[0,32] = zext(Rn_VPR64.2S[0,32] s< Rm_VPR64.2S[0,32]); + TMPD3[32,32] = zext(Rn_VPR64.2S[32,32] s< Rm_VPR64.2S[32,32]); + # simd infix TMPD2 = TMPD2 * TMPD3 on lane size 4 + TMPD2[0,32] = TMPD2[0,32] * TMPD3[0,32]; + TMPD2[32,32] = TMPD2[32,32] * TMPD3[32,32]; + # simd infix Rd_VPR64.2S = TMPD1 + TMPD2 on lane size 4 + Rd_VPR64.2S[0,32] = TMPD1[0,32] + TMPD2[0,32]; + Rd_VPR64.2S[32,32] = TMPD1[32,32] + TMPD2[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 +# CONSTRUCT x0e607400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@2 +# AUNIT --inst x0e607400/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:sabd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sabd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 +# CONSTRUCT x4ea07400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@4 +# AUNIT --inst x4ea07400/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:sabd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sabd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 +# CONSTRUCT x0e207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@1 +# AUNIT --inst x0e207400/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:sabd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sabd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 +# CONSTRUCT x4e607400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@2 +# AUNIT --inst x4e607400/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:sabd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sabd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 +# CONSTRUCT x0ea07000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $-@8 =$abs@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl/3@4 +# AUNIT --inst x0ea07000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabdl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x7 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 + TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; + # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ3) on lane size 8 + Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ3[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ3[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 +# CONSTRUCT x0e607000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $-@4 =$abs@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl/3@2 +# AUNIT --inst x0e607000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabdl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x7 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 + TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; + # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ3) on lane size 4 + Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ3[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ3[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ3[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ3[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 +# CONSTRUCT x0e207000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 $-@2 =$abs@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl/3@1 +# AUNIT --inst x0e207000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabdl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x7 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 + TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; + # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ3) on lane size 2 + Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ3[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ3[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ3[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ3[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ3[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ3[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ3[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ3[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 +# CONSTRUCT x4ea07000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $-@8 =$abs@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl2/3@4 +# AUNIT --inst x4ea07000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabdl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x7 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; + # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ5) on lane size 8 + Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ5[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ5[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 +# CONSTRUCT x4e607000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $-@4 =$abs@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl2/3@2 +# AUNIT --inst x4e607000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabdl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x7 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; + # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ5) on lane size 4 + Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ5[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ5[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ5[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ5[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 +# CONSTRUCT x4e207000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 $-@2 =$abs@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl2/3@1 +# AUNIT --inst x4e207000/mask=xffe0fc00 --status pass --comment "ext abd" + +:sabdl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x7 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 + TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; + # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ5) on lane size 2 + Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ5[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ5[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ5[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ5[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ5[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ5[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ5[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ5[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 +# CONSTRUCT x0e206800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG2 =#+ &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@1 +# AUNIT --inst x0e206800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when 4H when size = 00 , Q = 0 Ta=VPR64.4H Tb=VPR64.8B e1=1 e2=2 s2=16 + +:sadalp Rd_VPR64.4H, Rn_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011010 & Rd_VPR64.4H & Rn_VPR64.8B & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = +(Rn_VPR64.8B) on pairs lane size (1 to 2) + local tmp2 = Rn_VPR64.8B[0,8]; + local tmp4 = sext(tmp2); + local tmp3 = Rn_VPR64.8B[8,8]; + local tmp5 = sext(tmp3); + TMPD1[0,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[16,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.8B[24,8]; + tmp5 = sext(tmp3); + TMPD1[16,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[32,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.8B[40,8]; + tmp5 = sext(tmp3); + TMPD1[32,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[48,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.8B[56,8]; + tmp5 = sext(tmp3); + TMPD1[48,16] = tmp4 + tmp5; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 +# CONSTRUCT x4e206800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 =#+ &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@1 +# AUNIT --inst x4e206800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when 8H when size = 00 , Q = 1 Ta=VPR128.8H Tb=VPR128.16B e1=1 e2=2 s2=32 + +:sadalp Rd_VPR128.8H, Rn_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011010 & Rd_VPR128.8H & Rn_VPR128.16B & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = +(Rn_VPR128.16B) on pairs lane size (1 to 2) + local tmp2 = Rn_VPR128.16B[0,8]; + local tmp4 = sext(tmp2); + local tmp3 = Rn_VPR128.16B[8,8]; + local tmp5 = sext(tmp3); + TMPQ1[0,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[16,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[24,8]; + tmp5 = sext(tmp3); + TMPQ1[16,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[32,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[40,8]; + tmp5 = sext(tmp3); + TMPQ1[32,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[48,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[56,8]; + tmp5 = sext(tmp3); + TMPQ1[48,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[64,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[72,8]; + tmp5 = sext(tmp3); + TMPQ1[64,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[80,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[88,8]; + tmp5 = sext(tmp3); + TMPQ1[80,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[96,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[104,8]; + tmp5 = sext(tmp3); + TMPQ1[96,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[112,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[120,8]; + tmp5 = sext(tmp3); + TMPQ1[112,16] = tmp4 + tmp5; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 +# CONSTRUCT x0e606800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG2 =#+ &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@2 +# AUNIT --inst x0e606800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when 2S when size = 01 , Q = 0 Ta=VPR64.2S Tb=VPR64.4H e1=2 e2=4 s2=16 + +:sadalp Rd_VPR64.2S, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011010 & Rd_VPR64.2S & Rn_VPR64.4H & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = +(Rn_VPR64.4H) on pairs lane size (2 to 4) + local tmp2 = Rn_VPR64.4H[0,16]; + local tmp4 = sext(tmp2); + local tmp3 = Rn_VPR64.4H[16,16]; + local tmp5 = sext(tmp3); + TMPD1[0,32] = tmp4 + tmp5; + tmp2 = Rn_VPR64.4H[32,16]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.4H[48,16]; + tmp5 = sext(tmp3); + TMPD1[32,32] = tmp4 + tmp5; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 +# CONSTRUCT x4e606800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 =#+ &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@2 +# AUNIT --inst x4e606800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when 4S when size = 01 , Q = 1 Ta=VPR128.4S Tb=VPR128.8H e1=2 e2=4 s2=32 + +:sadalp Rd_VPR128.4S, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011010 & Rd_VPR128.4S & Rn_VPR128.8H & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = +(Rn_VPR128.8H) on pairs lane size (2 to 4) + local tmp2 = Rn_VPR128.8H[0,16]; + local tmp4 = sext(tmp2); + local tmp3 = Rn_VPR128.8H[16,16]; + local tmp5 = sext(tmp3); + TMPQ1[0,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[32,16]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.8H[48,16]; + tmp5 = sext(tmp3); + TMPQ1[32,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[64,16]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.8H[80,16]; + tmp5 = sext(tmp3); + TMPQ1[64,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[96,16]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.8H[112,16]; + tmp5 = sext(tmp3); + TMPQ1[96,32] = tmp4 + tmp5; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 +# CONSTRUCT x0ea06800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG2 =#+ &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@4 +# AUNIT --inst x0ea06800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when 1D when size = 10 , Q = 0 Ta=VPR64.1D Tb=VPR64.2S e1=4 e2=8 s2=16 + +:sadalp Rd_VPR64.1D, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011010 & Rd_VPR64.1D & Rn_VPR64.2S & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = +(Rn_VPR64.2S) on pairs lane size (4 to 8) + local tmp2 = Rn_VPR64.2S[0,32]; + local tmp4 = sext(tmp2); + local tmp3 = Rn_VPR64.2S[32,32]; + local tmp5 = sext(tmp3); + TMPD1 = tmp4 + tmp5; + # simd infix Rd_VPR64.1D = Rd_VPR64.1D + TMPD1 on lane size 8 + Rd_VPR64.1D = Rd_VPR64.1D + TMPD1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 +# CONSTRUCT x4ea06800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 =#+ &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@4 +# AUNIT --inst x4ea06800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when 2D when size = 10 , Q = 1 Ta=VPR128.2D Tb=VPR128.4S e1=4 e2=8 s2=32 + +:sadalp Rd_VPR128.2D, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011010 & Rd_VPR128.2D & Rn_VPR128.4S & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = +(Rn_VPR128.4S) on pairs lane size (4 to 8) + local tmp2 = Rn_VPR128.4S[0,32]; + local tmp4 = sext(tmp2); + local tmp3 = Rn_VPR128.4S[32,32]; + local tmp5 = sext(tmp3); + TMPQ1[0,64] = tmp4 + tmp5; + tmp2 = Rn_VPR128.4S[64,32]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.4S[96,32]; + tmp5 = sext(tmp3); + TMPQ1[64,64] = tmp4 + tmp5; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 +# CONSTRUCT x0ea00000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 =$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saddl/2@4 +# AUNIT --inst x0ea00000/mask=xffe0fc00 --status pass --comment "ext" + +:saddl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x0 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix Rd_VPR128.2D = TMPQ1 + TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] + TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 +# CONSTRUCT x0e600000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddl/2@2 +# AUNIT --inst x0e600000/mask=xffe0fc00 --status pass --comment "ext" + +:saddl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x0 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix Rd_VPR128.4S = TMPQ1 + TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] + TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 +# CONSTRUCT x0e200000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 =$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddl/2@1 +# AUNIT --inst x0e200000/mask=xffe0fc00 --status pass --comment "ext" + +:saddl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x0 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR128.8H = TMPQ1 + TMPQ2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16] + TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] + TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] + TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] + TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] + TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] + TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] + TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] + TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 +# CONSTRUCT x4ea00000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 =$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddl2/2@4 +# AUNIT --inst x4ea00000/mask=xffe0fc00 --status pass --comment "ext" + +:saddl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x0 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); + # simd infix Rd_VPR128.2D = TMPQ2 + TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] + TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 +# CONSTRUCT x4e600000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddl2/2@2 +# AUNIT --inst x4e600000/mask=xffe0fc00 --status pass --comment "ext" + +:saddl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x0 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); + # simd infix Rd_VPR128.4S = TMPQ2 + TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] + TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 +# CONSTRUCT x4e200000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 =$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddl2/2@1 +# AUNIT --inst x4e200000/mask=xffe0fc00 --status pass --comment "ext" + +:saddl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x0 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); + # simd infix Rd_VPR128.8H = TMPQ2 + TMPQ4 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] + TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] + TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] + TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] + TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] + TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] + TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] + TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] + TMPQ4[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 +# CONSTRUCT x0ea02800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#+@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@4 +# AUNIT --inst x0ea02800/mask=xfffffc00 --status pass --comment "ext" + +:saddlp Rd_VPR64.1D, Rn_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.1D & Zd +{ + TMPD1 = Rn_VPR64.2S; + # sipd infix Rd_VPR64.1D = +(TMPD1) on pairs lane size (4 to 8) + local tmp2 = TMPD1[0,32]; + local tmp4 = sext(tmp2); + local tmp3 = TMPD1[32,32]; + local tmp5 = sext(tmp3); + Rd_VPR64.1D = tmp4 + tmp5; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 +# CONSTRUCT x0e602800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#+@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@2 +# AUNIT --inst x0e602800/mask=xfffffc00 --status pass --comment "ext" + +:saddlp Rd_VPR64.2S, Rn_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.2S & Zd +{ + TMPD1 = Rn_VPR64.4H; + # sipd infix Rd_VPR64.2S = +(TMPD1) on pairs lane size (2 to 4) + local tmp2 = TMPD1[0,16]; + local tmp4 = sext(tmp2); + local tmp3 = TMPD1[16,16]; + local tmp5 = sext(tmp3); + Rd_VPR64.2S[0,32] = tmp4 + tmp5; + tmp2 = TMPD1[32,16]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[48,16]; + tmp5 = sext(tmp3); + Rd_VPR64.2S[32,32] = tmp4 + tmp5; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 +# CONSTRUCT x0e202800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#+@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@1 +# AUNIT --inst x0e202800/mask=xfffffc00 --status pass --comment "ext" + +:saddlp Rd_VPR64.4H, Rn_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.4H & Zd +{ + TMPD1 = Rn_VPR64.8B; + # sipd infix Rd_VPR64.4H = +(TMPD1) on pairs lane size (1 to 2) + local tmp2 = TMPD1[0,8]; + local tmp4 = sext(tmp2); + local tmp3 = TMPD1[8,8]; + local tmp5 = sext(tmp3); + Rd_VPR64.4H[0,16] = tmp4 + tmp5; + tmp2 = TMPD1[16,8]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[24,8]; + tmp5 = sext(tmp3); + Rd_VPR64.4H[16,16] = tmp4 + tmp5; + tmp2 = TMPD1[32,8]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[40,8]; + tmp5 = sext(tmp3); + Rd_VPR64.4H[32,16] = tmp4 + tmp5; + tmp2 = TMPD1[48,8]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[56,8]; + tmp5 = sext(tmp3); + Rd_VPR64.4H[48,16] = tmp4 + tmp5; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 +# CONSTRUCT x4ea02800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#+@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@4 +# AUNIT --inst x4ea02800/mask=xfffffc00 --status pass --comment "ext" + +:saddlp Rd_VPR128.2D, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPQ1 = Rn_VPR128.4S; + # sipd infix Rd_VPR128.2D = +(TMPQ1) on pairs lane size (4 to 8) + local tmp2 = TMPQ1[0,32]; + local tmp4 = sext(tmp2); + local tmp3 = TMPQ1[32,32]; + local tmp5 = sext(tmp3); + Rd_VPR128.2D[0,64] = tmp4 + tmp5; + tmp2 = TMPQ1[64,32]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[96,32]; + tmp5 = sext(tmp3); + Rd_VPR128.2D[64,64] = tmp4 + tmp5; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 +# CONSTRUCT x4e602800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#+@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@2 +# AUNIT --inst x4e602800/mask=xfffffc00 --status pass --comment "ext" + +:saddlp Rd_VPR128.4S, Rn_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPQ1 = Rn_VPR128.8H; + # sipd infix Rd_VPR128.4S = +(TMPQ1) on pairs lane size (2 to 4) + local tmp2 = TMPQ1[0,16]; + local tmp4 = sext(tmp2); + local tmp3 = TMPQ1[16,16]; + local tmp5 = sext(tmp3); + Rd_VPR128.4S[0,32] = tmp4 + tmp5; + tmp2 = TMPQ1[32,16]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[48,16]; + tmp5 = sext(tmp3); + Rd_VPR128.4S[32,32] = tmp4 + tmp5; + tmp2 = TMPQ1[64,16]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[80,16]; + tmp5 = sext(tmp3); + Rd_VPR128.4S[64,32] = tmp4 + tmp5; + tmp2 = TMPQ1[96,16]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[112,16]; + tmp5 = sext(tmp3); + Rd_VPR128.4S[96,32] = tmp4 + tmp5; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 +# CONSTRUCT x4e202800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#+@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@1 +# AUNIT --inst x4e202800/mask=xfffffc00 --status pass --comment "ext" + +:saddlp Rd_VPR128.8H, Rn_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPQ1 = Rn_VPR128.16B; + # sipd infix Rd_VPR128.8H = +(TMPQ1) on pairs lane size (1 to 2) + local tmp2 = TMPQ1[0,8]; + local tmp4 = sext(tmp2); + local tmp3 = TMPQ1[8,8]; + local tmp5 = sext(tmp3); + Rd_VPR128.8H[0,16] = tmp4 + tmp5; + tmp2 = TMPQ1[16,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[24,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[16,16] = tmp4 + tmp5; + tmp2 = TMPQ1[32,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[40,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[32,16] = tmp4 + tmp5; + tmp2 = TMPQ1[48,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[56,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[48,16] = tmp4 + tmp5; + tmp2 = TMPQ1[64,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[72,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[64,16] = tmp4 + tmp5; + tmp2 = TMPQ1[80,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[88,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[80,16] = tmp4 + tmp5; + tmp2 = TMPQ1[96,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[104,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[96,16] = tmp4 + tmp5; + tmp2 = TMPQ1[112,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[120,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[112,16] = tmp4 + tmp5; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.231 SADDLV page C7-1917 line 107472 MATCH x0e303800/mask=xbf3ffc00 +# CONSTRUCT x4eb03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlv/1@4 +# AUNIT --inst x4eb03800/mask=xfffffc00 --status nopcodeop --comment "ext" + +:saddlv Rd_FPR64, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.4S & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_saddlv(Rn_VPR128.4S, 4:1); +} + +# C7.2.231 SADDLV page C7-1917 line 107472 MATCH x0e303800/mask=xbf3ffc00 +# CONSTRUCT x4e303800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlv/1@1 +# AUNIT --inst x4e303800/mask=xfffffc00 --status nopcodeop --comment "ext" + +:saddlv Rd_FPR16, Rn_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.16B & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_saddlv(Rn_VPR128.16B, 1:1); +} + +# C7.2.231 SADDLV page C7-1917 line 107472 MATCH x0e303800/mask=xbf3ffc00 +# CONSTRUCT x0e303800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlv/1@1 +# AUNIT --inst x0e303800/mask=xfffffc00 --status nopcodeop --comment "ext" + +:saddlv Rd_FPR16, Rn_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.8B & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_saddlv(Rn_VPR64.8B, 1:1); +} + +# C7.2.231 SADDLV page C7-1917 line 107472 MATCH x0e303800/mask=xbf3ffc00 +# CONSTRUCT x0e703800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlv/1@2 +# AUNIT --inst x0e703800/mask=xfffffc00 --status nopcodeop --comment "ext" + +:saddlv Rd_FPR32, Rn_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.4H & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_saddlv(Rn_VPR64.4H, 2:1); +} + +# C7.2.231 SADDLV page C7-1917 line 107472 MATCH x0e303800/mask=xbf3ffc00 +# CONSTRUCT x4e703800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_saddlv/1@2 +# AUNIT --inst x4e703800/mask=xfffffc00 --status nopcodeop --comment "ext" + +:saddlv Rd_FPR32, Rn_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.8H & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_saddlv(Rn_VPR128.8H, 2:1); +} + +# C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 +# CONSTRUCT x0ea01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $sext@4:16 =$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw/2@4 +# AUNIT --inst x0ea01000/mask=xffe0fc00 --status pass --comment "ext" + +:saddw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 +# CONSTRUCT x0e601000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $sext@2:16 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw/2@2 +# AUNIT --inst x0e601000/mask=xffe0fc00 --status pass --comment "ext" + +:saddw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 +# CONSTRUCT x0e201000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $sext@1:16 =$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw/2@1 +# AUNIT --inst x0e201000/mask=xffe0fc00 --status pass --comment "ext" + +:saddw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 +# CONSTRUCT x4ea01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $sext@4:16 =$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw2/2@4 +# AUNIT --inst x4ea01000/mask=xffe0fc00 --status pass --comment "ext" + +:saddw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPD1 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 +# CONSTRUCT x4e601000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $sext@2:16 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw2/2@2 +# AUNIT --inst x4e601000/mask=xffe0fc00 --status pass --comment "ext" + +:saddw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPD1 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 +# CONSTRUCT x4e201000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $sext@1:16 =$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw2/2@1 +# AUNIT --inst x4e201000/mask=xffe0fc00 --status pass --comment "ext" + +:saddw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPD1 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ2 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x5f00e400/mask=xff80fc00 +# CONSTRUCT x5f40e400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2 +# AUNIT --inst x5f40e400/mask=xffc0fc00 --status nopcodeop --comment "nofpround" + +:scvtf Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_scvtf(Rn_FPR64, Imm_shr_imm64:4); +} + +# C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x5f00e400/mask=xff80fc00 +# CONSTRUCT x5f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2 +# AUNIT --inst x5f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" + +:scvtf Rd_FPR32, Rn_FPR32, Imm_shr_imm32 +is b_3031=1 & u=0 & b_2428=0x1f & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_scvtf(Rn_FPR32, Imm_shr_imm32:4); +} + +# C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x5f00e400/mask=xff80fc00 +# CONSTRUCT x5f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2 +# AUNIT --inst x5f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" + +:scvtf Rd_FPR16, Rn_FPR16, Imm_shr_imm16 +is b_3031=1 & u=0 & b_2428=0x1f & b_2023=1 & Imm_shr_imm16 & b_1115=0x1c & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_scvtf(Rn_FPR16, Imm_shr_imm16:4); +} + +# C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 +# CONSTRUCT x4f40e400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2@8 +# AUNIT --inst x4f40e400/mask=xffc0fc00 --status nopcodeop --comment "nofpround" + +:scvtf Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_scvtf(Rn_VPR128.2D, Imm_shr_imm64:4, 8:1); +} + +# C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 +# CONSTRUCT x0f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2@4 +# AUNIT --inst x0f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" + +:scvtf Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_scvtf(Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); +} + +# C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 +# CONSTRUCT x4f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2@4 +# AUNIT --inst x4f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" + +:scvtf Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_scvtf(Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); +} + +# C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 +# CONSTRUCT x0f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2@2 +# AUNIT --inst x0f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" + +:scvtf Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm32 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_scvtf(Rn_VPR64.4H, Imm_shr_imm32:4, 2:1); +} + +# C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 +# CONSTRUCT x4f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2@2 +# AUNIT --inst x4f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" + +:scvtf Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm32 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_scvtf(Rn_VPR128.8H, Imm_shr_imm32:4, 2:1); +} + +# C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x5e21d800/mask=xffbffc00 +# CONSTRUCT x5e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =int2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 +# AUNIT --inst x5e21d800/mask=xfffffc00 --status fail --comment "nofpround" + +:scvtf Rd_FPR32, Rn_FPR32 +is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = int2float(Rn_FPR32); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x5e21d800/mask=xffbffc00 +# CONSTRUCT x5e61d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =int2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 +# AUNIT --inst x5e61d800/mask=xfffffc00 --status pass --comment "nofpround" + +:scvtf Rd_FPR64, Rn_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = int2float(Rn_FPR64); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x0e21d800/mask=xbfbffc00 +# CONSTRUCT x4e61d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1@8 +# AUNIT --inst x4e61d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" + +:scvtf Rd_VPR128.2D, Rn_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_scvtf(Rn_VPR128.2D, 8:1); +} + +# C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x0e21d800/mask=xbfbffc00 +# CONSTRUCT x0e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1@4 +# AUNIT --inst x0e21d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" + +:scvtf Rd_VPR64.2S, Rn_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_scvtf(Rn_VPR64.2S, 4:1); +} + +# C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x0e21d800/mask=xbfbffc00 +# CONSTRUCT x4e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1@4 +# AUNIT --inst x4e21d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" + +:scvtf Rd_VPR128.4S, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_scvtf(Rn_VPR128.4S, 4:1); +} + +# C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x5e79d800/mask=xfffffc00 +# CONSTRUCT x5e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =int2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 +# AUNIT --inst x5e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" +# Scalar half precision variant + +:scvtf Rd_FPR16, Rn_FPR16 +is b_1031=0b0101111001111001110110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = int2float(Rn_FPR16); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x0e79d800/mask=xbffffc00 +# CONSTRUCT x0e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1@2 +# AUNIT --inst x0e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" +# Vector half precision variant when Q=0 suf=VPR64.4H + +:scvtf Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_1029=0b00111001111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_scvtf(Rn_VPR64.4H, 2:1); +} + +# C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x0e79d800/mask=xbffffc00 +# CONSTRUCT x4e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1@2 +# AUNIT --inst x4e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" +# Vector half precision variant when Q=1 suf=VPR128.8H + +:scvtf Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_1029=0b00111001111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_scvtf(Rn_VPR128.8H, 2:1); +} + +# C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 +# CONSTRUCT x1ec28000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 int2float:2 FBits16 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits16 =NEON_scvtf/2 +# AUNIT --inst x1ec28000/mask=xffff8000 --status noqemu --comment "nofpround" +# 32-bit to half-precision variant when sf == 0 && type == 11 + +:scvtf Rd_FPR16, Rn_GPR32, FBitsOp +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=2 & b_15=1 & FBitsOp & FBits16 & Rn_GPR32 & Rd_FPR16 & Zd +{ + local tmp1:2 = int2float(Rn_GPR32); + Rd_FPR16 = tmp1 f/ FBits16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 +# CONSTRUCT x9ec20000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 int2float:2 FBits16 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits16 =NEON_scvtf/2 +# AUNIT --inst x9ec20000/mask=xffff0000 --status noqemu --comment "nofpround" +# 64-bit to half-precision variant when sf == 1 && type == 11 + +:scvtf Rd_FPR16, Rn_GPR64, FBitsOp +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=2 & FBitsOp & FBits16 & Rn_GPR64 & Rd_FPR16 & Zd +{ + local tmp1:2 = int2float(Rn_GPR64); + Rd_FPR16 = tmp1 f/ FBits16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 +# CONSTRUCT x1e428000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 int2float:8 FBits64 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_scvtf/2 +# AUNIT --inst x1e428000/mask=xffff8000 --status pass --comment "nofpround" + +:scvtf Rd_FPR64, Rn_GPR32, FBitsOp +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=2 & b_15=1 & FBitsOp & FBits64 & Rn_GPR32 & Rd_FPR64 & Zd +{ + local tmp1:8 = int2float(Rn_GPR32); + Rd_FPR64 = tmp1 f/ FBits64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 +# CONSTRUCT x1e028000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 int2float FBits32 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_scvtf/2 +# AUNIT --inst x1e028000/mask=xffff8000 --status fail --comment "nofpround" + +:scvtf Rd_FPR32, Rn_GPR32, FBitsOp +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=2 & b_15=1 & FBitsOp & FBits32 & Rn_GPR32 & Rd_FPR32 & Zd +{ + local tmp1:4 = int2float(Rn_GPR32); + Rd_FPR32 = tmp1 f/ FBits32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 +# CONSTRUCT x9e420000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 int2float FBits64 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_scvtf/2 +# AUNIT --inst x9e420000/mask=xffff0000 --status fail --comment "nofpround" + +:scvtf Rd_FPR64, Rn_GPR64, FBitsOp +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=2 & FBitsOp & FBits64 & Rn_GPR64 & Rd_FPR64 & Zd +{ + local tmp1:8 = int2float(Rn_GPR64); + Rd_FPR64 = tmp1 f/ FBits64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 +# CONSTRUCT x9e020000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 int2float:4 FBits32 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_scvtf/2 +# AUNIT --inst x9e020000/mask=xffff0000 --status fail --comment "nofpround" + +:scvtf Rd_FPR32, Rn_GPR64, FBitsOp +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=2 & FBitsOp & FBits32 & Rn_GPR64 & Rd_FPR32 & Rd_FPR64 & Zd +{ + local tmp1:4 = int2float(Rn_GPR64); + Rd_FPR32 = tmp1 f/ FBits32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 +# CONSTRUCT x1ee20000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =int2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 +# AUNIT --inst x1ee20000/mask=xfffffc00 --status noqemu --comment "nofpround" + +:scvtf Rd_FPR16, Rn_GPR32 +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR32 & Rd_FPR16 & Zd +{ + Rd_FPR16 = int2float(Rn_GPR32); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 +# CONSTRUCT x9ee20000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =int2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 +# AUNIT --inst x9ee20000/mask=xfffffc00 --status noqemu --comment "nofpround" + +:scvtf Rd_FPR16, Rn_GPR64 +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR64 & Rd_FPR16 & Zd +{ + Rd_FPR16 = int2float(Rn_GPR64); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 +# CONSTRUCT x1e620000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =int2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 +# AUNIT --inst x1e620000/mask=xfffffc00 --status pass --comment "nofpround" + +:scvtf Rd_FPR64, Rn_GPR32 +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR32 & Rd_FPR64 & Zd +{ + Rd_FPR64 = int2float(Rn_GPR32); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 +# CONSTRUCT x9e620000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =int2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 +# AUNIT --inst x9e620000/mask=xfffffc00 --status pass --comment "nofpround" + +:scvtf Rd_FPR64, Rn_GPR64 +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = int2float(Rn_GPR64); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 +# CONSTRUCT x1e220000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =int2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 +# AUNIT --inst x1e220000/mask=xfffffc00 --status fail --comment "nofpround" + +:scvtf Rd_FPR32, Rn_GPR32 +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = int2float(Rn_GPR32); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 +# CONSTRUCT x9e220000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =int2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 +# AUNIT --inst x9e220000/mask=xfffffc00 --status fail --comment "nofpround" + +:scvtf Rd_FPR32, Rn_GPR64 +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR64 & Rd_FPR32 & Zd +{ + Rd_FPR32 = int2float(Rn_GPR64); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.237 SDOT (by element) page C7-1931 line 108271 MATCH x0f00e000/mask=xbf00f400 +# CONSTRUCT x0f80e000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 Re_VPR128.S.vIndex =NEON_sdot/2@1 +# AUNIT --inst x0f80e000/mask=xffc0f400 --status noqemu +# Vector variant when Q=0 Ta=64.2S Tb=64.8B + +:sdot Rd_VPR64.2S, Rn_VPR64.8B, Re_VPR128.B.vIndex +is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR64.2S & Rn_VPR64.8B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd +{ + local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + Rd_VPR64.2S = NEON_sdot(Rn_VPR64.8B, tmp1, 1:1); +} + +# C7.2.237 SDOT (by element) page C7-1931 line 108271 MATCH x0f00e000/mask=xbf00f400 +# CONSTRUCT x4f80e000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 Re_VPR128.S.vIndex =NEON_sdot/2@1 +# AUNIT --inst x4f80e000/mask=xffc0f400 --status noqemu +# Vector variant when Q=1 Ta=128.4S Tb=128.16B + +:sdot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.B.vIndex +is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR128.4S & Rn_VPR128.16B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd +{ + local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + Rd_VPR128.4S = NEON_sdot(Rn_VPR128.16B, tmp1, 1:1); +} + +# C7.2.238 SDOT (vector) page C7-1933 line 108370 MATCH x0e009400/mask=xbf20fc00 +# CONSTRUCT x0e809400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sdot/2@1 +# AUNIT --inst x0e809400/mask=xffe0fc00 --status noqemu +# Three registers of the same type variant when Q=0 Ta=64.2S Tb=64.8B + +:sdot Rd_VPR64.2S, Rn_VPR64.8B, Rm_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR64.2S & Rn_VPR64.8B & Rm_VPR64.8B & Zd +{ + Rd_VPR64.2S = NEON_sdot(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.238 SDOT (vector) page C7-1933 line 108370 MATCH x0e009400/mask=xbf20fc00 +# CONSTRUCT x4e809400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sdot/2@1 +# AUNIT --inst x4e809400/mask=xffe0fc00 --status noqemu +# Three registers of the same type variant when Q=1 Ta=128.4S Tb=128.16B + +:sdot Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR128.4S & Rn_VPR128.16B & Rm_VPR128.16B & Zd +{ + Rd_VPR128.4S = NEON_sdot(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.239 SHA1C page C7-1935 line 108468 MATCH x5e000000/mask=xffe0fc00 +# CONSTRUCT x5e000000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha1c/3@4 +# AUNIT --inst x5e000000/mask=xffe0fc00 --status noqemu + +:sha1c Rd_VPR128, Rn_FPR32, Rm_VPR128.4S +is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b000000 & Rn_FPR32 & Rd_VPR128 & Zd +{ + Rd_VPR128 = NEON_sha1c(Rd_VPR128, Rn_FPR32, Rm_VPR128.4S, 4:1); +} + +# C7.2.240 SHA1H page C7-1936 line 108537 MATCH x5e280800/mask=xfffffc00 +# CONSTRUCT x5e280800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 30:1 =<< +# SMACRO(pseudo) ARG1 ARG2 =NEON_sha1h/1 +# AUNIT --inst x5e280800/mask=xfffffc00 --status noqemu + +:sha1h Rd_FPR32, Rn_FPR32 +is b_2431=0b01011110 & b_2223=0b00 & b_1721=0b10100 & b_1216=0b00000 & b_1011=0b10 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = Rn_FPR32 << 30:1; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.241 SHA1M page C7-1937 line 108594 MATCH x5e002000/mask=xffe0fc00 +# CONSTRUCT x5e002000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha1m/3@4 +# AUNIT --inst x5e002000/mask=xffe0fc00 --status noqemu + +:sha1m Rd_VPR128, Rn_FPR32, Rm_VPR128.4S +is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b001000 & Rn_FPR32 & Rd_VPR128 & Zd +{ + Rd_VPR128 = NEON_sha1m(Rd_VPR128, Rn_FPR32, Rm_VPR128.4S, 4:1); +} + +# C7.2.242 SHA1P page C7-1938 line 108663 MATCH x5e001000/mask=xffe0fc00 +# CONSTRUCT x5e001000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha1p/3@4 +# AUNIT --inst x5e001000/mask=xffe0fc00 --status noqemu + +:sha1p Rd_VPR128, Rn_FPR32, Rm_VPR128.4S +is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b000100 & Rn_FPR32 & Rd_VPR128 & Zd +{ + Rd_VPR128 = NEON_sha1p(Rd_VPR128, Rn_FPR32, Rm_VPR128.4S, 4:1); +} + +# C7.2.243 SHA1SU0 page C7-1939 line 108732 MATCH x5e003000/mask=xffe0fc00 +# CONSTRUCT x5e003000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha1su0/3@4 +# AUNIT --inst x5e003000/mask=xffe0fc00 --status noqemu + +:sha1su0 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b001100 & Rn_VPR128.4S & Rd_VPR128.4S & Rd_VPR128 & Zd +{ + Rd_VPR128.4S = NEON_sha1su0(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.244 SHA1SU1 page C7-1940 line 108798 MATCH x5e281800/mask=xfffffc00 +# CONSTRUCT x5e281800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sha1su1/2@4 +# AUNIT --inst x5e281800/mask=xfffffc00 --status noqemu + +:sha1su1 Rd_VPR128.4S, Rn_VPR128.4S +is b_2431=0b01011110 & b_2223=0b00 & b_2121=1 & b_1620=0b01000 & b_1015=0b000110 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sha1su1(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); +} + +# C7.2.245 SHA256H2 page C7-1941 line 108862 MATCH x5e005000/mask=xffe0fc00 +# CONSTRUCT x5e005000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha256h2/3@4 +# AUNIT --inst x5e005000/mask=xffe0fc00 --status noqemu + +:sha256h2 Rd_VPR128, Rn_VPR128, Rm_VPR128.4S +is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b010100 & Rn_VPR128 & Rd_VPR128 & Zd +{ + Rd_VPR128 = NEON_sha256h2(Rd_VPR128, Rn_VPR128, Rm_VPR128.4S, 4:1); +} + +# C7.2.246 SHA256H page C7-1942 line 108922 MATCH x5e004000/mask=xffe0fc00 +# CONSTRUCT x5e004000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha256h/3@4 +# AUNIT --inst x5e004000/mask=xffe0fc00 --status noqemu + +:sha256h Rd_VPR128, Rn_VPR128, Rm_VPR128.4S +is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b010000 & Rn_VPR128 & Rd_VPR128 & Zd +{ + Rd_VPR128 = NEON_sha256h(Rd_VPR128, Rn_VPR128, Rm_VPR128.4S, 4:1); +} + +# C7.2.247 SHA256SU0 page C7-1943 line 108982 MATCH x5e282800/mask=xfffffc00 +# CONSTRUCT x5e282800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sha256su0/2@4 +# AUNIT --inst x5e282800/mask=xfffffc00 --status noqemu + +:sha256su0 Rd_VPR128.4S, Rn_VPR128.4S +is b_2431=0b01011110 & b_2223=0b00 & b_2121=1 & b_1620=0b01000 & b_1015=0b001010 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sha256su0(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); +} + +# C7.2.248 SHA256SU1 page C7-1944 line 109048 MATCH x5e006000/mask=xffe0fc00 +# CONSTRUCT x5e006000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha256su1/3@4 +# AUNIT --inst x5e006000/mask=xffe0fc00 --status noqemu + +:sha256su1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b011000 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sha256su1(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.249 SHA512H page C7-1946 line 109138 MATCH xce608000/mask=xffe0fc00 +# CONSTRUCT xce608000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha512h/3@8 +# AUNIT --inst xce608000/mask=xffe0fc00 --status noqemu + +:sha512h Rd_VPR128, Rn_VPR128, Rm_VPR128.2D +is b_2131=0b11001110011 & b_1015=0b100000 & Rd_VPR128 & Rn_VPR128 & Rm_VPR128.2D & Zd +{ + Rd_VPR128 = NEON_sha512h(Rd_VPR128, Rn_VPR128, Rm_VPR128.2D, 8:1); +} + +# C7.2.250 SHA512H2 page C7-1948 line 109227 MATCH xce608400/mask=xffe0fc00 +# CONSTRUCT xce608400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha512h2/3@8 +# AUNIT --inst xce608400/mask=xffe0fc00 --status noqemu + +:sha512h2 Rd_VPR128, Rn_VPR128, Rm_VPR128.2D +is b_2131=0b11001110011 & b_1015=0b100001 & Rd_VPR128 & Rn_VPR128 & Rm_VPR128.2D & Zd +{ + Rd_VPR128 = NEON_sha512h2(Rd_VPR128, Rn_VPR128, Rm_VPR128.2D, 8:1); +} + +# C7.2.251 SHA512SU0 page C7-1950 line 109313 MATCH xcec08000/mask=xfffffc00 +# CONSTRUCT xcec08000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sha512su0/2@8 +# AUNIT --inst xcec08000/mask=xfffffc00 --status noqemu + +:sha512su0 Rd_VPR128.2D, Rn_VPR128.2D +is b_1031=0b1100111011000000100000 & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sha512su0(Rd_VPR128.2D, Rn_VPR128.2D, 8:1); +} + +# C7.2.252 SHA512SU1 page C7-1951 line 109383 MATCH xce608800/mask=xffe0fc00 +# CONSTRUCT xce608800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha512su1/3@8 +# AUNIT --inst xce608800/mask=xffe0fc00 --status noqemu + +:sha512su1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_2131=0b11001110011 & b_1015=0b100010 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sha512su1(Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 +# CONSTRUCT x4e200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@1 +# AUNIT --inst x4e200400/mask=xffe0fc00 --status nopcodeop + +:shadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_shadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 +# CONSTRUCT x0ea00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@4 +# AUNIT --inst x0ea00400/mask=xffe0fc00 --status nopcodeop + +:shadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_shadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 +# CONSTRUCT x0e600400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@2 +# AUNIT --inst x0e600400/mask=xffe0fc00 --status nopcodeop + +:shadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_shadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 +# CONSTRUCT x4ea00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@4 +# AUNIT --inst x4ea00400/mask=xffe0fc00 --status nopcodeop + +:shadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_shadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 +# CONSTRUCT x0e200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@1 +# AUNIT --inst x0e200400/mask=xffe0fc00 --status nopcodeop + +:shadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_shadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 +# CONSTRUCT x4e600400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@2 +# AUNIT --inst x4e600400/mask=xffe0fc00 --status nopcodeop + +:shadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_shadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.254 SHL page C7-1955 line 109567 MATCH x5f005400/mask=xff80fc00 +# CONSTRUCT x5f405400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2 +# AUNIT --inst x5f405400/mask=xffc0fc00 --status nopcodeop + +:shl Rd_FPR64, Rn_FPR64, Imm_imm0_63 +is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_shl(Rn_FPR64, Imm_imm0_63:1); +} + +# C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 +# CONSTRUCT x4f085400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:1 =$<<@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@1 +# AUNIT --inst x4f085400/mask=xfff8fc00 --status pass + +:shl Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + local tmp1:1 = Imm_uimm3; + # simd infix Rd_VPR128.16B = Rn_VPR128.16B << tmp1 on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] << tmp1; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] << tmp1; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] << tmp1; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] << tmp1; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] << tmp1; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] << tmp1; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] << tmp1; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] << tmp1; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] << tmp1; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] << tmp1; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] << tmp1; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] << tmp1; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] << tmp1; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] << tmp1; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] << tmp1; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] << tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 +# CONSTRUCT x4f405400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:8 =$<<@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@8 +# AUNIT --inst x4f405400/mask=xffc0fc00 --status pass + +:shl Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + local tmp1:8 = Imm_imm0_63; + # simd infix Rd_VPR128.2D = Rn_VPR128.2D << tmp1 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] << tmp1; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] << tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 +# CONSTRUCT x0f205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 =$<<@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@4 +# AUNIT --inst x0f205400/mask=xffe0fc00 --status pass + +:shl Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + local tmp1:4 = Imm_uimm5; + # simd infix Rd_VPR64.2S = Rn_VPR64.2S << tmp1 on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] << tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] << tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 +# CONSTRUCT x0f105400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:2 =$<<@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@2 +# AUNIT --inst x0f105400/mask=xfff0fc00 --status pass + +:shl Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + local tmp1:2 = Imm_uimm4; + # simd infix Rd_VPR64.4H = Rn_VPR64.4H << tmp1 on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] << tmp1; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] << tmp1; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] << tmp1; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] << tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 +# CONSTRUCT x4f205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 =$<<@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@4 +# AUNIT --inst x4f205400/mask=xffe0fc00 --status pass + +:shl Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + local tmp1:4 = Imm_uimm5; + # simd infix Rd_VPR128.4S = Rn_VPR128.4S << tmp1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] << tmp1; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] << tmp1; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] << tmp1; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] << tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 +# CONSTRUCT x0f085400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:1 =$<<@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@1 +# AUNIT --inst x0f085400/mask=xfff8fc00 --status pass + +:shl Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + local tmp1:1 = Imm_uimm3; + # simd infix Rd_VPR64.8B = Rn_VPR64.8B << tmp1 on lane size 1 + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] << tmp1; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] << tmp1; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] << tmp1; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] << tmp1; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] << tmp1; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] << tmp1; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] << tmp1; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] << tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 +# CONSTRUCT x4f105400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:2 =$<<@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@2 +# AUNIT --inst x4f105400/mask=xfff0fc00 --status pass + +:shl Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + local tmp1:2 = Imm_uimm4; + # simd infix Rd_VPR128.8H = Rn_VPR128.8H << tmp1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] << tmp1; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] << tmp1; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] << tmp1; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] << tmp1; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] << tmp1; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] << tmp1; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] << tmp1; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] << tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 +# CONSTRUCT x2ea13800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 zext:8 =$<<@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll/2@4 +# AUNIT --inst x2ea13800/mask=xfffffc00 --status pass --comment "ext" + +:shll Rd_VPR128.2D, Rn_VPR64.2S, Imm_uimm_exact32 +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & Imm_uimm_exact32 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + local tmp2:8 = zext(Imm_uimm_exact32); + # simd infix Rd_VPR128.2D = TMPQ1 << tmp2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] << tmp2; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 +# CONSTRUCT x2e613800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3:4 =$<<@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll/2@2 +# AUNIT --inst x2e613800/mask=xfffffc00 --status pass --comment "ext" + +:shll Rd_VPR128.4S, Rn_VPR64.4H, Imm_uimm_exact16 +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & Imm_uimm_exact16 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd infix Rd_VPR128.4S = TMPQ1 << Imm_uimm_exact16:4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] << Imm_uimm_exact16:4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 +# CONSTRUCT x2e213800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@1:16 ARG3:2 =$<<@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll/2@1 +# AUNIT --inst x2e213800/mask=xfffffc00 --status pass --comment "ext" + +:shll Rd_VPR128.8H, Rn_VPR64.8B, Imm_uimm_exact8 +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & Imm_uimm_exact8 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); + # simd infix Rd_VPR128.8H = TMPQ1 << Imm_uimm_exact8:2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] << Imm_uimm_exact8:2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 +# CONSTRUCT x6ea13800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 zext:8 =$<<@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll2/2@4 +# AUNIT --inst x6ea13800/mask=xfffffc00 --status pass --comment "ext" + +:shll2 Rd_VPR128.2D, Rn_VPR128.4S, Imm_uimm_exact32 +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & Imm_uimm_exact32 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + local tmp3:8 = zext(Imm_uimm_exact32); + # simd infix Rd_VPR128.2D = TMPQ2 << tmp3 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] << tmp3; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 +# CONSTRUCT x6e613800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3:4 =$<<@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll2/2@2 +# AUNIT --inst x6e613800/mask=xfffffc00 --status pass --comment "ext" + +:shll2 Rd_VPR128.4S, Rn_VPR128.8H, Imm_uimm_exact16 +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & Imm_uimm_exact16 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + # simd infix Rd_VPR128.4S = TMPQ2 << Imm_uimm_exact16:4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] << Imm_uimm_exact16:4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 +# CONSTRUCT x6e213800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3:2 =$<<@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll2/2@1 +# AUNIT --inst x6e213800/mask=xfffffc00 --status pass --comment "ext" + +:shll2 Rd_VPR128.8H, Rn_VPR128.16B, Imm_uimm_exact8 +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & Imm_uimm_exact8 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + # simd infix Rd_VPR128.8H = TMPQ2 << Imm_uimm_exact8:2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] << Imm_uimm_exact8:2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 +# CONSTRUCT x0f208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 $>>@8 =$zext@8:8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn/2@8 +# AUNIT --inst x0f208400/mask=xffe0fc00 --status pass --comment "ext" + +:shrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + local tmp1:8 = zext(Imm_shr_imm32); + # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; + # simd resize Rd_VPR64.2S = zext(TMPQ1) (lane size 8 to 4) + Rd_VPR64.2S[0,32] = TMPQ1[0,32]; + Rd_VPR64.2S[32,32] = TMPQ1[64,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 +# CONSTRUCT x0f108400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:4 $>>@4 =$zext@4:16 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn/2@4 +# AUNIT --inst x0f108400/mask=xfff0fc00 --status pass --comment "ext" + +:shrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S >> Imm_shr_imm16:4 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> Imm_shr_imm16:4; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> Imm_shr_imm16:4; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> Imm_shr_imm16:4; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> Imm_shr_imm16:4; + # simd resize Rd_VPR64.4H = zext(TMPQ1) (lane size 4 to 2) + Rd_VPR64.4H[0,16] = TMPQ1[0,16]; + Rd_VPR64.4H[16,16] = TMPQ1[32,16]; + Rd_VPR64.4H[32,16] = TMPQ1[64,16]; + Rd_VPR64.4H[48,16] = TMPQ1[96,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 +# CONSTRUCT x0f088400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 $>>@2 =$zext@2:8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn/2@2 +# AUNIT --inst x0f088400/mask=xfff8fc00 --status pass --comment "ext" + +:shrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm8:2 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm8:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm8:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm8:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm8:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm8:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm8:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm8:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm8:2; + # simd resize Rd_VPR64.8B = zext(TMPQ1) (lane size 2 to 1) + Rd_VPR64.8B[0,8] = TMPQ1[0,8]; + Rd_VPR64.8B[8,8] = TMPQ1[16,8]; + Rd_VPR64.8B[16,8] = TMPQ1[32,8]; + Rd_VPR64.8B[24,8] = TMPQ1[48,8]; + Rd_VPR64.8B[32,8] = TMPQ1[64,8]; + Rd_VPR64.8B[40,8] = TMPQ1[80,8]; + Rd_VPR64.8B[48,8] = TMPQ1[96,8]; + Rd_VPR64.8B[56,8] = TMPQ1[112,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 +# CONSTRUCT x4f208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 $>>@8 $zext@8:8 1:1 &=$copy +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn2/2@8 +# AUNIT --inst x4f208400/mask=xffe0fc00 --status pass --comment "ext" + +:shrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + local tmp1:8 = zext(Imm_shr_imm32); + # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; + # simd resize TMPD2 = zext(TMPQ1) (lane size 8 to 4) + TMPD2[0,32] = TMPQ1[0,32]; + TMPD2[32,32] = TMPQ1[64,32]; + # simd copy Rd_VPR128.4S element 1:1 = TMPD2 (lane size 8) + Rd_VPR128.4S[64,64] = TMPD2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 +# CONSTRUCT x4f108400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:4 $>>@4 $zext@4:8 1:1 &=$copy +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn2/2@4 +# AUNIT --inst x4f108400/mask=xfff0fc00 --status pass --comment "ext" + +:shrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S >> Imm_shr_imm16:4 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> Imm_shr_imm16:4; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> Imm_shr_imm16:4; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> Imm_shr_imm16:4; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> Imm_shr_imm16:4; + # simd resize TMPD2 = zext(TMPQ1) (lane size 4 to 2) + TMPD2[0,16] = TMPQ1[0,16]; + TMPD2[16,16] = TMPQ1[32,16]; + TMPD2[32,16] = TMPQ1[64,16]; + TMPD2[48,16] = TMPQ1[96,16]; + # simd copy Rd_VPR128.8H element 1:1 = TMPD2 (lane size 8) + Rd_VPR128.8H[64,64] = TMPD2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 +# CONSTRUCT x4f088400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 $>>@2 $zext@2:8 1:1 &=$copy +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn2/2@2 +# AUNIT --inst x4f088400/mask=xfff8fc00 --status pass --comment "ext" + +:shrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm8:2 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm8:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm8:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm8:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm8:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm8:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm8:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm8:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm8:2; + # simd resize TMPD2 = zext(TMPQ1) (lane size 2 to 1) + TMPD2[0,8] = TMPQ1[0,8]; + TMPD2[8,8] = TMPQ1[16,8]; + TMPD2[16,8] = TMPQ1[32,8]; + TMPD2[24,8] = TMPQ1[48,8]; + TMPD2[32,8] = TMPQ1[64,8]; + TMPD2[40,8] = TMPQ1[80,8]; + TMPD2[48,8] = TMPQ1[96,8]; + TMPD2[56,8] = TMPQ1[112,8]; + # simd copy Rd_VPR128.16B element 1:1 = TMPD2 (lane size 8) + Rd_VPR128.16B[64,64] = TMPD2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 +# CONSTRUCT x4e202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@1 +# AUNIT --inst x4e202400/mask=xffe0fc00 --status nopcodeop + +:shsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_shsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 +# CONSTRUCT x0ea02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@4 +# AUNIT --inst x0ea02400/mask=xffe0fc00 --status nopcodeop + +:shsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_shsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 +# CONSTRUCT x0e602400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@2 +# AUNIT --inst x0e602400/mask=xffe0fc00 --status nopcodeop + +:shsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_shsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 +# CONSTRUCT x4ea02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@4 +# AUNIT --inst x4ea02400/mask=xffe0fc00 --status nopcodeop + +:shsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_shsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 +# CONSTRUCT x0e202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@1 +# AUNIT --inst x0e202400/mask=xffe0fc00 --status nopcodeop + +:shsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_shsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 +# CONSTRUCT x4e602400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@2 +# AUNIT --inst x4e602400/mask=xffe0fc00 --status nopcodeop + +:shsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_shsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 +# CONSTRUCT x6f085400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@1 +# AUNIT --inst x6f085400/mask=xfff8fc00 --status nopcodeop + +:sli Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sli(Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3:1, 1:1); +} + +# C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 +# CONSTRUCT x6f405400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@8 +# AUNIT --inst x6f405400/mask=xffc0fc00 --status nopcodeop + +:sli Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sli(Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63:1, 8:1); +} + +# C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 +# CONSTRUCT x2f205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@4 +# AUNIT --inst x2f205400/mask=xffe0fc00 --status nopcodeop + +:sli Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sli(Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5:1, 4:1); +} + +# C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 +# CONSTRUCT x2f105400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@2 +# AUNIT --inst x2f105400/mask=xfff0fc00 --status nopcodeop + +:sli Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sli(Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4:1, 2:1); +} + +# C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 +# CONSTRUCT x6f205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@4 +# AUNIT --inst x6f205400/mask=xffe0fc00 --status nopcodeop + +:sli Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sli(Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5:1, 4:1); +} + +# C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 +# CONSTRUCT x2f085400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@1 +# AUNIT --inst x2f085400/mask=xfff8fc00 --status nopcodeop + +:sli Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sli(Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3:1, 1:1); +} + +# C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 +# CONSTRUCT x6f105400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@2 +# AUNIT --inst x6f105400/mask=xfff0fc00 --status nopcodeop + +:sli Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sli(Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4:1, 2:1); +} + +# C7.2.258 SLI page C7-1963 line 110042 MATCH x7f005400/mask=xff80fc00 +# CONSTRUCT x7f405400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3 +# AUNIT --inst x7f405400/mask=xffc0fc00 --status nopcodeop + +:sli Rd_VPR64, Rn_VPR64, Imm_uimm5 +is b_2331=0b011111110 & b_22=1 & b_1015=0b010101 & Rd_VPR64 & Rn_VPR64 & Imm_uimm5 & Zd +{ + Rd_VPR64 = NEON_sli(Rd_VPR64, Rn_VPR64, Imm_uimm5:1); +} + +# C7.2.259 SM3PARTW1 page C7-1966 line 110207 MATCH xce60c000/mask=xffe0fc00 +# CONSTRUCT xce60c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3partw1/3@4 +# AUNIT --inst xce60c000/mask=xffe0fc00 --status noqemu + +:sm3partw1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_2131=0b11001110011 & b_1015=0b110000 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sm3partw1(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.260 SM3PARTW2 page C7-1968 line 110294 MATCH xce60c400/mask=xffe0fc00 +# CONSTRUCT xce60c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3partw2/3@4 +# AUNIT --inst xce60c400/mask=xffe0fc00 --status noqemu + +:sm3partw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_2131=0b11001110011 & b_1015=0b110001 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sm3partw2(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.261 SM3SS1 page C7-1970 line 110380 MATCH xce400000/mask=xffe08000 +# CONSTRUCT xce400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_sm3ss1/3@4 +# AUNIT --inst xce400000/mask=xffe08000 --status noqemu + +:sm3ss1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, Ra_VPR128.4S +is b_2131=0b11001110010 & b_15=0 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Ra_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sm3ss1(Rn_VPR128.4S, Rm_VPR128.4S, Ra_VPR128.4S, 4:1); +} + +# C7.2.247 SM3TT1A page C7-1529 line 88534 KEEPWITH + +sm3imm2: b_1213 is b_1213 { export *[const]:4 b_1213; } +Re_VPR128.S.sm3imm2: Re_VPR128.S^"["^sm3imm2^"]" is Re_VPR128.S & sm3imm2 { export Re_VPR128.S; } + +# C7.2.262 SM3TT1A page C7-1972 line 110466 MATCH xce408000/mask=xffe0cc00 +# CONSTRUCT xce408000/mask=xffe0cc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3tt1a/3@4 +# AUNIT --inst xce408000/mask=xffe0cc00 --status noqemu + +:sm3tt1a Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.sm3imm2 +is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b00 & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd +{ + local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); + Rd_VPR128.4S = NEON_sm3tt1a(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); +} + +# C7.2.263 SM3TT1B page C7-1974 line 110572 MATCH xce408400/mask=xffe0cc00 +# CONSTRUCT xce408400/mask=xffe0cc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3tt1b/3@4 +# AUNIT --inst xce408400/mask=xffe0cc00 --status noqemu + +:sm3tt1b Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.sm3imm2 +is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b01 & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd +{ + local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); + Rd_VPR128.4S = NEON_sm3tt1b(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); +} + +# C7.2.264 SM3TT2A page C7-1976 line 110678 MATCH xce408800/mask=xffe0cc00 +# CONSTRUCT xce408800/mask=xffe0cc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3tt2a/3@4 +# AUNIT --inst xce408800/mask=xffe0cc00 --status noqemu + +:sm3tt2a Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.sm3imm2 +is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b10 & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd +{ + local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); + Rd_VPR128.4S = NEON_sm3tt2a(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); +} + +# C7.2.265 SM3TT2B page C7-1978 line 110783 MATCH xce408c00/mask=xffe0cc00 +# CONSTRUCT xce408c00/mask=xffe0cc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3tt2b/3@4 +# AUNIT --inst xce408c00/mask=xffe0cc00 --status noqemu + +:sm3tt2b Rd_VPR128.S, Rn_VPR128.S, Re_VPR128.S.sm3imm2 +is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b11 & Rd_VPR128.S & Rn_VPR128.S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd +{ + local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); + Rd_VPR128.S = NEON_sm3tt2b(Rd_VPR128.S, Rn_VPR128.S, tmp1, 4:1); +} + +# C7.2.266 SM4E page C7-1980 line 110888 MATCH xcec08400/mask=xfffffc00 +# CONSTRUCT xcec08400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sm4e/2@4 +# AUNIT --inst xcec08400/mask=xfffffc00 --status noqemu + +:sm4e Rd_VPR128.4S, Rn_VPR128.4S +is b_1031=0b1100111011000000100001 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sm4e(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); +} + +# C7.2.267 SM4EKEY page C7-1982 line 110982 MATCH xce60c800/mask=xffe0fc00 +# CONSTRUCT xce60c800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm4ekey/3@4 +# AUNIT --inst xce60c800/mask=xffe0fc00 --status noqemu + +:sm4ekey Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_2131=0b11001110011 & b_1015=0b110010 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sm4ekey(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 +# CONSTRUCT x4e206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@1 +# AUNIT --inst x4e206400/mask=xffe0fc00 --status nopcodeop + +:smax Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xc & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_smax(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 +# CONSTRUCT x0ea06400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@4 +# AUNIT --inst x0ea06400/mask=xffe0fc00 --status nopcodeop + +:smax Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xc & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_smax(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 +# CONSTRUCT x0e606400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@2 +# AUNIT --inst x0e606400/mask=xffe0fc00 --status nopcodeop + +:smax Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xc & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_smax(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 +# CONSTRUCT x4ea06400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@4 +# AUNIT --inst x4ea06400/mask=xffe0fc00 --status nopcodeop + +:smax Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xc & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_smax(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 +# CONSTRUCT x0e206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@1 +# AUNIT --inst x0e206400/mask=xffe0fc00 --status nopcodeop + +:smax Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xc & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_smax(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 +# CONSTRUCT x4e606400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@2 +# AUNIT --inst x4e606400/mask=xffe0fc00 --status nopcodeop + +:smax Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xc & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_smax(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 +# CONSTRUCT x4e20a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@1 +# AUNIT --inst x4e20a400/mask=xffe0fc00 --status nopcodeop + +:smaxp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_smax(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 +# CONSTRUCT x0ea0a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smaxp/2@4 +# AUNIT --inst x0ea0a400/mask=xffe0fc00 --status nopcodeop + +:smaxp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_smaxp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 +# CONSTRUCT x0e60a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smaxp/2@2 +# AUNIT --inst x0e60a400/mask=xffe0fc00 --status nopcodeop + +:smaxp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_smaxp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 +# CONSTRUCT x4ea0a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smaxp/2@4 +# AUNIT --inst x4ea0a400/mask=xffe0fc00 --status nopcodeop + +:smaxp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_smaxp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 +# CONSTRUCT x0e20a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smaxp/2@1 +# AUNIT --inst x0e20a400/mask=xffe0fc00 --status nopcodeop + +:smaxp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_smaxp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 +# CONSTRUCT x4e60a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smaxp/2@2 +# AUNIT --inst x4e60a400/mask=xffe0fc00 --status nopcodeop + +:smaxp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_smaxp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.270 SMAXV page C7-1988 line 111280 MATCH x0e30a800/mask=xbf3ffc00 +# CONSTRUCT x4e30a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_smaxv/1@1 +# AUNIT --inst x4e30a800/mask=xfffffc00 --status nopcodeop + +:smaxv Rd_FPR8, Rn_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_smaxv(Rn_VPR128.16B, 1:1); +} + +# C7.2.270 SMAXV page C7-1988 line 111280 MATCH x0e30a800/mask=xbf3ffc00 +# CONSTRUCT x0e30a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_smaxv/1@1 +# AUNIT --inst x0e30a800/mask=xfffffc00 --status nopcodeop + +:smaxv Rd_FPR8, Rn_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_smaxv(Rn_VPR64.8B, 1:1); +} + +# C7.2.270 SMAXV page C7-1988 line 111280 MATCH x0e30a800/mask=xbf3ffc00 +# CONSTRUCT x0e70a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_smaxv/1@2 +# AUNIT --inst x0e70a800/mask=xfffffc00 --status nopcodeop + +:smaxv Rd_FPR16, Rn_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_smaxv(Rn_VPR64.4H, 2:1); +} + +# C7.2.270 SMAXV page C7-1988 line 111280 MATCH x0e30a800/mask=xbf3ffc00 +# CONSTRUCT x4e70a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_smaxv/1@2 +# AUNIT --inst x4e70a800/mask=xfffffc00 --status nopcodeop + +:smaxv Rd_FPR16, Rn_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_smaxv(Rn_VPR128.8H, 2:1); +} + +# C7.2.270 SMAXV page C7-1988 line 111280 MATCH x0e30a800/mask=xbf3ffc00 +# CONSTRUCT x4eb0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_smaxv/1@4 +# AUNIT --inst x4eb0a800/mask=xfffffc00 --status nopcodeop + +:smaxv Rd_FPR32, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_smaxv(Rn_VPR128.4S, 4:1); +} + +# C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 +# CONSTRUCT x4e206c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@1 +# AUNIT --inst x4e206c00/mask=xffe0fc00 --status nopcodeop + +:smin Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xd & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_smin(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 +# CONSTRUCT x0ea06c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@4 +# AUNIT --inst x0ea06c00/mask=xffe0fc00 --status nopcodeop + +:smin Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xd & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_smin(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 +# CONSTRUCT x0e606c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@2 +# AUNIT --inst x0e606c00/mask=xffe0fc00 --status nopcodeop + +:smin Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xd & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_smin(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 +# CONSTRUCT x4ea06c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@4 +# AUNIT --inst x4ea06c00/mask=xffe0fc00 --status nopcodeop + +:smin Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xd & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_smin(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 +# CONSTRUCT x0e206c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@1 +# AUNIT --inst x0e206c00/mask=xffe0fc00 --status nopcodeop + +:smin Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xd & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_smin(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 +# CONSTRUCT x4e606c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@2 +# AUNIT --inst x4e606c00/mask=xffe0fc00 --status nopcodeop + +:smin Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xd & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_smin(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 +# CONSTRUCT x4e20ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@1 +# AUNIT --inst x4e20ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:sminp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x15 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sminp(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 +# CONSTRUCT x0ea0ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@4 +# AUNIT --inst x0ea0ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:sminp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x15 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sminp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 +# CONSTRUCT x0e60ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@2 +# AUNIT --inst x0e60ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:sminp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x15 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sminp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 +# CONSTRUCT x4ea0ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@4 +# AUNIT --inst x4ea0ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:sminp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x15 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sminp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 +# CONSTRUCT x0e20ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@1 +# AUNIT --inst x0e20ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:sminp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x15 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sminp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 +# CONSTRUCT x4e60ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@2 +# AUNIT --inst x4e60ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:sminp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x15 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sminp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.273 SMINV page C7-1994 line 111583 MATCH x0e31a800/mask=xbf3ffc00 +# CONSTRUCT x4e31a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_sminv/1@1 +# AUNIT --inst x4e31a800/mask=xfffffc00 --status nopcodeop + +:sminv Rd_FPR8, Rn_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_sminv(Rn_VPR128.16B, 1:1); +} + +# C7.2.273 SMINV page C7-1994 line 111583 MATCH x0e31a800/mask=xbf3ffc00 +# CONSTRUCT x0e31a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_sminv/1@1 +# AUNIT --inst x0e31a800/mask=xfffffc00 --status nopcodeop + +:sminv Rd_FPR8, Rn_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_sminv(Rn_VPR64.8B, 1:1); +} + +# C7.2.273 SMINV page C7-1994 line 111583 MATCH x0e31a800/mask=xbf3ffc00 +# CONSTRUCT x0e71a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_sminv/1@2 +# AUNIT --inst x0e71a800/mask=xfffffc00 --status nopcodeop + +:sminv Rd_FPR16, Rn_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_sminv(Rn_VPR64.4H, 2:1); +} + +# C7.2.273 SMINV page C7-1994 line 111583 MATCH x0e31a800/mask=xbf3ffc00 +# CONSTRUCT x4e71a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_sminv/1@2 +# AUNIT --inst x4e71a800/mask=xfffffc00 --status nopcodeop + +:sminv Rd_FPR16, Rn_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_sminv(Rn_VPR128.8H, 2:1); +} + +# C7.2.273 SMINV page C7-1994 line 111583 MATCH x0e31a800/mask=xbf3ffc00 +# CONSTRUCT x4eb1a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_sminv/1@4 +# AUNIT --inst x4eb1a800/mask=xfffffc00 --status nopcodeop + +:sminv Rd_FPR32, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_sminv(Rn_VPR128.4S, 4:1); +} + +# C7.2.274 SMLAL, SMLAL2 (by element) page C7-1996 line 111684 MATCH x0f002000/mask=xbf00f400 +# CONSTRUCT x0f802000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal/3@4 +# AUNIT --inst x0f802000/mask=xffc0f400 --status pass --comment "ext" + +:smlal Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.274 SMLAL, SMLAL2 (by element) page C7-1996 line 111684 MATCH x0f002000/mask=xbf00f400 +# CONSTRUCT x0f402000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal/3@2 +# AUNIT --inst x0f402000/mask=xffc0f400 --status pass --comment "ext" + +:smlal Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.274 SMLAL, SMLAL2 (by element) page C7-1996 line 111684 MATCH x0f002000/mask=xbf00f400 +# CONSTRUCT x4f802000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 $* &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal2/3@4 +# AUNIT --inst x4f802000/mask=xffc0f400 --status pass --comment "ext" + +:smlal2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Rn_VPR128 & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp3:4 = Re_VPR128.S.vIndex; + local tmp4:8 = sext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.274 SMLAL, SMLAL2 (by element) page C7-1996 line 111684 MATCH x0f002000/mask=xbf00f400 +# CONSTRUCT x4f402000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 $* &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal2/3@2 +# AUNIT --inst x4f402000/mask=xffc0f400 --status pass --comment "ext" + +:smlal2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Rn_VPR128 & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp4:4 = sext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 +# CONSTRUCT x0ea08000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $*@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal/3@4 +# AUNIT --inst x0ea08000/mask=xffe0fc00 --status pass --comment "ext" + +:smlal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x8 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 +# CONSTRUCT x0e608000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $*@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal/3@2 +# AUNIT --inst x0e608000/mask=xffe0fc00 --status pass --comment "ext" + +:smlal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x8 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 +# CONSTRUCT x0e208000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 $*@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal/3@1 +# AUNIT --inst x0e208000/mask=xffe0fc00 --status pass --comment "ext" + +:smlal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x8 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 + TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ3 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ3[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ3[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ3[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ3[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ3[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ3[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ3[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ3[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 +# CONSTRUCT x4ea08000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $*@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal2/3@4 +# AUNIT --inst x4ea08000/mask=xffe0fc00 --status pass --comment "ext" + +:smlal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x8 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ5 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ5[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ5[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 +# CONSTRUCT x4e608000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $*@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal2/3@2 +# AUNIT --inst x4e608000/mask=xffe0fc00 --status pass --comment "ext" + +:smlal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x8 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ5 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ5[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ5[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ5[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ5[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 +# CONSTRUCT x4e208000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 $*@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal2/3@1 +# AUNIT --inst x4e208000/mask=xffe0fc00 --status pass --comment "ext" + +:smlal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x8 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 + TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ5 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ5[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ5[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ5[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ5[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ5[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ5[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ5[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ5[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.276 SMLSL, SMLSL2 (by element) page C7-2001 line 111970 MATCH x0f006000/mask=xbf00f400 +# CONSTRUCT x0f806000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl/3@4 +# AUNIT --inst x0f806000/mask=xffc0f400 --status pass --comment "ext" + +:smlsl Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.276 SMLSL, SMLSL2 (by element) page C7-2001 line 111970 MATCH x0f006000/mask=xbf00f400 +# CONSTRUCT x0f406000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl/3@2 +# AUNIT --inst x0f406000/mask=xffc0f400 --status pass --comment "ext" + +:smlsl Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.276 SMLSL, SMLSL2 (by element) page C7-2001 line 111970 MATCH x0f006000/mask=xbf00f400 +# CONSTRUCT x4f806000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 $* &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl2/3@4 +# AUNIT --inst x4f806000/mask=xffc0f400 --status pass --comment "ext" + +:smlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp3:4 = Re_VPR128.S.vIndex; + local tmp4:8 = sext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.276 SMLSL, SMLSL2 (by element) page C7-2001 line 111970 MATCH x0f006000/mask=xbf00f400 +# CONSTRUCT x4f406000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 $* &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl2/3@2 +# AUNIT --inst x4f406000/mask=xffc0f400 --status pass --comment "ext" + +:smlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp4:4 = sext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 +# CONSTRUCT x0ea0a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $*@8 &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl/3@4 +# AUNIT --inst x0ea0a000/mask=xffe0fc00 --status pass --comment "ext" + +:smlsl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xa & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 +# CONSTRUCT x0e60a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $*@4 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl/3@2 +# AUNIT --inst x0e60a000/mask=xffe0fc00 --status pass --comment "ext" + +:smlsl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xa & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 +# CONSTRUCT x0e20a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 $*@2 &=$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl/3@1 +# AUNIT --inst x0e20a000/mask=xffe0fc00 --status pass --comment "ext" + +:smlsl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xa & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 + TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ3 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ3[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ3[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ3[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ3[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ3[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ3[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ3[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ3[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 +# CONSTRUCT x4ea0a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $*@8 &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl2/3@4 +# AUNIT --inst x4ea0a000/mask=xffe0fc00 --status pass --comment "ext" + +:smlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xa & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ5 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ5[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ5[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 +# CONSTRUCT x4e60a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $*@4 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl2/3@2 +# AUNIT --inst x4e60a000/mask=xffe0fc00 --status pass --comment "ext" + +:smlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xa & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ5 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ5[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ5[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ5[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ5[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 +# CONSTRUCT x4e20a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 $*@2 &=$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl2/3@1 +# AUNIT --inst x4e20a000/mask=xffe0fc00 --status pass --comment "ext" + +:smlsl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xa & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 + TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ5 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ5[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ5[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ5[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ5[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ5[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ5[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ5[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ5[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.279 SMOV page C7-2007 line 112311 MATCH x0e002c00/mask=xbfe0fc00 +# CONSTRUCT x0e012c00/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =sext +# SMACRO(pseudo) ARG1 ARG2 =NEON_smov/1 +# AUNIT --inst x0e012c00/mask=xffe1fc00 --status pass + +:smov Rd_GPR32, Rn_VPR128.B.imm_neon_uimm4 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 +{ + # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 + local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; + Rd_GPR32 = sext(tmp1); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.279 SMOV page C7-2007 line 112311 MATCH x0e002c00/mask=xbfe0fc00 +# CONSTRUCT x0e022c00/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =sext +# SMACRO(pseudo) ARG1 ARG2 =NEON_smov/1 +# AUNIT --inst x0e022c00/mask=xffe3fc00 --status pass + +:smov Rd_GPR32, Rn_VPR128.H.imm_neon_uimm3 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 +{ + # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 + local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; + Rd_GPR32 = sext(tmp1); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.279 SMOV page C7-2007 line 112311 MATCH x0e002c00/mask=xbfe0fc00 +# CONSTRUCT x4e012c00/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =sext +# SMACRO(pseudo) ARG1 ARG2 =NEON_smov/1 +# AUNIT --inst x4e012c00/mask=xffe1fc00 --status pass + +:smov Rd_GPR64, Rn_VPR128.B.imm_neon_uimm4 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 +{ + # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 + local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; + Rd_GPR64 = sext(tmp1); +} + +# C7.2.279 SMOV page C7-2007 line 112311 MATCH x0e002c00/mask=xbfe0fc00 +# CONSTRUCT x4e022c00/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =sext +# SMACRO(pseudo) ARG1 ARG2 =NEON_smov/1 +# AUNIT --inst x4e022c00/mask=xffe3fc00 --status pass + +:smov Rd_GPR64, Rn_VPR128.H.imm_neon_uimm3 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 +{ + # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 + local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; + Rd_GPR64 = sext(tmp1); +} + +# C7.2.279 SMOV page C7-2007 line 112311 MATCH x0e002c00/mask=xbfe0fc00 +# CONSTRUCT x4e042c00/mask=xffe7fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =sext +# SMACRO(pseudo) ARG1 ARG2 =NEON_smov/1 +# AUNIT --inst x4e042c00/mask=xffe7fc00 --status pass + +:smov Rd_GPR64, Rn_VPR128.S.imm_neon_uimm2 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 +{ + # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 + local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; + Rd_GPR64 = sext(tmp1); +} + +# C7.2.280 SMULL, SMULL2 (by element) page C7-2009 line 112428 MATCH x0f00a000/mask=xbf00f400 +# CONSTRUCT x0f80a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull/2@4 +# AUNIT --inst x0f80a000/mask=xffc0f400 --status pass --comment "ext" + +:smull Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + # simd infix Rd_VPR128.2D = TMPQ1 * tmp3 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] * tmp3; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] * tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.280 SMULL, SMULL2 (by element) page C7-2009 line 112428 MATCH x0f00a000/mask=xbf00f400 +# CONSTRUCT x0f40a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull/2@2 +# AUNIT --inst x0f40a000/mask=xffc0f400 --status pass --comment "ext" + +:smull Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + # simd infix Rd_VPR128.4S = TMPQ1 * tmp3 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] * tmp3; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] * tmp3; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] * tmp3; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] * tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.280 SMULL, SMULL2 (by element) page C7-2009 line 112428 MATCH x0f00a000/mask=xbf00f400 +# CONSTRUCT x4f80a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull2/2@4 +# AUNIT --inst x4f80a000/mask=xffc0f400 --status pass --comment "ext" + +:smull2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Rn_VPR128 & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp3:4 = Re_VPR128.S.vIndex; + local tmp4:8 = sext(tmp3); + # simd infix Rd_VPR128.2D = TMPQ2 * tmp4 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] * tmp4; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] * tmp4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.280 SMULL, SMULL2 (by element) page C7-2009 line 112428 MATCH x0f00a000/mask=xbf00f400 +# CONSTRUCT x4f40a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull2/2@2 +# AUNIT --inst x4f40a000/mask=xffc0f400 --status pass --comment "ext" + +:smull2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Rn_VPR128 & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp4:4 = sext(tmp3); + # simd infix Rd_VPR128.4S = TMPQ2 * tmp4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] * tmp4; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] * tmp4; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] * tmp4; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] * tmp4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 +# CONSTRUCT x0ea0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 =$*@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull/2@4 +# AUNIT --inst x0ea0c000/mask=xffe0fc00 --status pass --comment "ext" + +:smull Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xc & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix Rd_VPR128.2D = TMPQ1 * TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 +# CONSTRUCT x0e60c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 =$*@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull/2@2 +# AUNIT --inst x0e60c000/mask=xffe0fc00 --status pass --comment "ext" + +:smull Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xc & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix Rd_VPR128.4S = TMPQ1 * TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 +# CONSTRUCT x0e20c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 =$*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull/2@1 +# AUNIT --inst x0e20c000/mask=xffe0fc00 --status pass --comment "ext" + +:smull Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xc & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR128.8H = TMPQ1 * TMPQ2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 +# CONSTRUCT x4ea0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 =$*@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull2/2@4 +# AUNIT --inst x4ea0c000/mask=xffe0fc00 --status pass --comment "ext" + +:smull2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xc & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Rn_VPR128 & Rm_VPR128 & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); + # simd infix Rd_VPR128.2D = TMPQ2 * TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 +# CONSTRUCT x4e60c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 =$*@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull2/2@2 +# AUNIT --inst x4e60c000/mask=xffe0fc00 --status pass --comment "ext" + +:smull2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xc & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Rn_VPR128 & Rm_VPR128 & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); + # simd infix Rd_VPR128.4S = TMPQ2 * TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 +# CONSTRUCT x4e20c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 =$*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull2/2@1 +# AUNIT --inst x4e20c000/mask=xffe0fc00 --status pass --comment "ext" + +:smull2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xc & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Rn_VPR128 & Rm_VPR128 & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); + # simd infix Rd_VPR128.8H = TMPQ2 * TMPQ4 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x5e207800/mask=xff3ffc00 +# CONSTRUCT x5e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =abs +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1 +# AUNIT --inst x5e207800/mask=xfffffc00 --status pass --comment "nointsat" +# Scalar variant when size = 00 Q = 1 aa=1 suf=FPR8 +# Note: in some implemented semantics that ignore saturation (where it +# makes a difference), there is an error in about 50% of the lanes. + +:sqabs Rd_FPR8, Rn_FPR8 +is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_FPR8 & Rn_FPR8 & Zd +{ + Rd_FPR8 = MP_INT_ABS(Rn_FPR8); + zext_zb(Zd); # zero upper 31 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x5e207800/mask=xff3ffc00 +# CONSTRUCT x5e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =abs +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1 +# AUNIT --inst x5e607800/mask=xfffffc00 --status pass --comment "nointsat" +# Scalar variant when size = 01 Q = 1 aa=1 suf=FPR16 + +:sqabs Rd_FPR16, Rn_FPR16 +is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = MP_INT_ABS(Rn_FPR16); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x5e207800/mask=xff3ffc00 +# CONSTRUCT x5ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =abs +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1 +# AUNIT --inst x5ea07800/mask=xfffffc00 --status pass --comment "nointsat" +# Scalar variant when size = 10 Q = 1 aa=1 suf=FPR32 + +:sqabs Rd_FPR32, Rn_FPR32 +is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = MP_INT_ABS(Rn_FPR32); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x5e207800/mask=xff3ffc00 +# CONSTRUCT x5ee07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =abs +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1 +# AUNIT --inst x5ee07800/mask=xfffffc00 --status pass --comment "nointsat" +# Scalar variant when size = 11 Q = 1 aa=1 suf=FPR64 + +:sqabs Rd_FPR64, Rn_FPR64 +is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = MP_INT_ABS(Rn_FPR64); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 +# CONSTRUCT x0e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@1 +# AUNIT --inst x0e207800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when size = 00 Q = 0 aa=0 esize=1 suf=VPR64.8B + +:sqabs Rd_VPR64.8B, Rn_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd +{ + # simd unary Rd_VPR64.8B = MP_INT_ABS(Rn_VPR64.8B) on lane size 1 + Rd_VPR64.8B[0,8] = MP_INT_ABS(Rn_VPR64.8B[0,8]); + Rd_VPR64.8B[8,8] = MP_INT_ABS(Rn_VPR64.8B[8,8]); + Rd_VPR64.8B[16,8] = MP_INT_ABS(Rn_VPR64.8B[16,8]); + Rd_VPR64.8B[24,8] = MP_INT_ABS(Rn_VPR64.8B[24,8]); + Rd_VPR64.8B[32,8] = MP_INT_ABS(Rn_VPR64.8B[32,8]); + Rd_VPR64.8B[40,8] = MP_INT_ABS(Rn_VPR64.8B[40,8]); + Rd_VPR64.8B[48,8] = MP_INT_ABS(Rn_VPR64.8B[48,8]); + Rd_VPR64.8B[56,8] = MP_INT_ABS(Rn_VPR64.8B[56,8]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 +# CONSTRUCT x4e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@1 +# AUNIT --inst x4e207800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when size = 00 Q = 1 aa=0 esize=1 suf=VPR128.16B + +:sqabs Rd_VPR128.16B, Rn_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd +{ + # simd unary Rd_VPR128.16B = MP_INT_ABS(Rn_VPR128.16B) on lane size 1 + Rd_VPR128.16B[0,8] = MP_INT_ABS(Rn_VPR128.16B[0,8]); + Rd_VPR128.16B[8,8] = MP_INT_ABS(Rn_VPR128.16B[8,8]); + Rd_VPR128.16B[16,8] = MP_INT_ABS(Rn_VPR128.16B[16,8]); + Rd_VPR128.16B[24,8] = MP_INT_ABS(Rn_VPR128.16B[24,8]); + Rd_VPR128.16B[32,8] = MP_INT_ABS(Rn_VPR128.16B[32,8]); + Rd_VPR128.16B[40,8] = MP_INT_ABS(Rn_VPR128.16B[40,8]); + Rd_VPR128.16B[48,8] = MP_INT_ABS(Rn_VPR128.16B[48,8]); + Rd_VPR128.16B[56,8] = MP_INT_ABS(Rn_VPR128.16B[56,8]); + Rd_VPR128.16B[64,8] = MP_INT_ABS(Rn_VPR128.16B[64,8]); + Rd_VPR128.16B[72,8] = MP_INT_ABS(Rn_VPR128.16B[72,8]); + Rd_VPR128.16B[80,8] = MP_INT_ABS(Rn_VPR128.16B[80,8]); + Rd_VPR128.16B[88,8] = MP_INT_ABS(Rn_VPR128.16B[88,8]); + Rd_VPR128.16B[96,8] = MP_INT_ABS(Rn_VPR128.16B[96,8]); + Rd_VPR128.16B[104,8] = MP_INT_ABS(Rn_VPR128.16B[104,8]); + Rd_VPR128.16B[112,8] = MP_INT_ABS(Rn_VPR128.16B[112,8]); + Rd_VPR128.16B[120,8] = MP_INT_ABS(Rn_VPR128.16B[120,8]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 +# CONSTRUCT x0e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@2 +# AUNIT --inst x0e607800/mask=xfffffc00 --status pass --comment "nointsat" +# Vector variant when size = 01 Q = 0 aa=0 esize=2 suf=VPR64.4H + +:sqabs Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + # simd unary Rd_VPR64.4H = MP_INT_ABS(Rn_VPR64.4H) on lane size 2 + Rd_VPR64.4H[0,16] = MP_INT_ABS(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = MP_INT_ABS(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = MP_INT_ABS(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = MP_INT_ABS(Rn_VPR64.4H[48,16]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 +# CONSTRUCT x4e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@2 +# AUNIT --inst x4e607800/mask=xfffffc00 --status pass --comment "nointsat" +# Vector variant when size = 01 Q = 1 aa=0 esize=2 suf=VPR128.8H + +:sqabs Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + # simd unary Rd_VPR128.8H = MP_INT_ABS(Rn_VPR128.8H) on lane size 2 + Rd_VPR128.8H[0,16] = MP_INT_ABS(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(Rn_VPR128.8H[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 +# CONSTRUCT x0ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@4 +# AUNIT --inst x0ea07800/mask=xfffffc00 --status pass --comment "nointsat" +# Vector variant when size = 10 Q = 0 aa=0 esize=4 suf=VPR64.2S + +:sqabs Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + # simd unary Rd_VPR64.2S = MP_INT_ABS(Rn_VPR64.2S) on lane size 4 + Rd_VPR64.2S[0,32] = MP_INT_ABS(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = MP_INT_ABS(Rn_VPR64.2S[32,32]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 +# CONSTRUCT x4ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@4 +# AUNIT --inst x4ea07800/mask=xfffffc00 --status pass --comment "nointsat" +# Vector variant when size = 10 Q = 1 aa=0 esize=4 suf=VPR128.4S + +:sqabs Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + # simd unary Rd_VPR128.4S = MP_INT_ABS(Rn_VPR128.4S) on lane size 4 + Rd_VPR128.4S[0,32] = MP_INT_ABS(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(Rn_VPR128.4S[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 +# CONSTRUCT x4ee07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$abs@8 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@8 +# AUNIT --inst x4ee07800/mask=xfffffc00 --status pass --comment "nointsat" +# Vector variant when size = 11 Q = 1 aa=0 esize=8 suf=VPR128.2D + +:sqabs Rd_VPR128.2D, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + # simd unary Rd_VPR128.2D = MP_INT_ABS(Rn_VPR128.2D) on lane size 8 + Rd_VPR128.2D[0,64] = MP_INT_ABS(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(Rn_VPR128.2D[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x5e200c00/mask=xff20fc00 +# CONSTRUCT x5e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2 +# AUNIT --inst x5e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_FPR8, Rn_FPR8, Rm_FPR8 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x1 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_sqadd(Rn_FPR8, Rm_FPR8); +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x5e200c00/mask=xff20fc00 +# CONSTRUCT x5ee00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2 +# AUNIT --inst x5ee00c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x1 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_sqadd(Rn_FPR64, Rm_FPR64); +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x5e200c00/mask=xff20fc00 +# CONSTRUCT x5e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2 +# AUNIT --inst x5e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x1 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_sqadd(Rn_FPR16, Rm_FPR16); +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x5e200c00/mask=xff20fc00 +# CONSTRUCT x5ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2 +# AUNIT --inst x5ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x1 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_sqadd(Rn_FPR32, Rm_FPR32); +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 +# CONSTRUCT x4e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@1 +# AUNIT --inst x4e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x1 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 +# CONSTRUCT x4ee00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@8 +# AUNIT --inst x4ee00c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x1 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sqadd(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 +# CONSTRUCT x0ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@4 +# AUNIT --inst x0ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x1 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 +# CONSTRUCT x0e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@2 +# AUNIT --inst x0e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x1 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 +# CONSTRUCT x4ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@4 +# AUNIT --inst x4ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x1 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 +# CONSTRUCT x0e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@1 +# AUNIT --inst x0e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x1 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 +# CONSTRUCT x4e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@2 +# AUNIT --inst x4e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x1 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x5f003000/mask=xff00f400 +# CONSTRUCT x5f803000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * &=+/2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3 +# AUNIT --inst x5f803000/mask=xffc0f400 --status fail --comment "nointsat" +# scalar variant, size == 10 (always part == 0) + +:sqdmlal Rd_FPR64, Rn_FPR32, Re_VPR128.S.vIndex +is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rd_FPR64 & Rn_FPR32 & Zd +{ + local tmp1:8 = sext(Rn_FPR32); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + local tmp4:8 = tmp1 * tmp3; + local tmp5:8 = tmp4 * 2:8; + Rd_FPR64 = Rd_FPR64 + tmp5; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x5f003000/mask=xff00f400 +# CONSTRUCT x5f403000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 sext:4 ARG2 sext:4 * 2:4 * &=+ +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3 +# AUNIT --inst x5f403000/mask=xffc0f400 --status fail --comment "nointsat" +# scalar variant, size == 01 (always part == 0) + +:sqdmlal Rd_FPR32, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM +is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rd_FPR32 & Rn_FPR16 & Zd +{ + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp2:4 = sext(tmp1); + local tmp3:4 = sext(Rn_FPR16); + local tmp4:4 = tmp2 * tmp3; + local tmp5:4 = tmp4 * 2:4; + Rd_FPR32 = Rd_FPR32 + tmp5; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x0f003000/mask=xbf00f400 +# CONSTRUCT x0f803000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* 2:8 $* &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3@4 +# AUNIT --inst x0f803000/mask=xffc0f400 --status fail --comment "ext nointsat" +# vector variant, Q == 0, size == 10 + +:sqdmlal Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * 2:8; + TMPQ3[64,64] = TMPQ2[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x0f003000/mask=xbf00f400 +# CONSTRUCT x0f403000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* 2:4 $* &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3@2 +# AUNIT --inst x0f403000/mask=xffc0f400 --status fail --comment "ext nointsat" +# vector variant, Q = 0, size == 01 + +:sqdmlal Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * 2:4; + TMPQ3[32,32] = TMPQ2[32,32] * 2:4; + TMPQ3[64,32] = TMPQ2[64,32] * 2:4; + TMPQ3[96,32] = TMPQ2[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x0f003000/mask=xbf00f400 +# CONSTRUCT x4f803000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 $* 2:8 $* &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal2/3@4 +# AUNIT --inst x4f803000/mask=xffc0f400 --status fail --comment "ext nointsat" +# vector variant, Q = 1, size == 10 + +:sqdmlal2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp3:4 = Re_VPR128.S.vIndex; + local tmp4:8 = sext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 + TMPQ4[0,64] = TMPQ3[0,64] * 2:8; + TMPQ4[64,64] = TMPQ3[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x0f003000/mask=xbf00f400 +# CONSTRUCT x4f403000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 $* 2:4 $* &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal2/3@2 +# AUNIT --inst x4f403000/mask=xffc0f400 --status fail --comment "ext nointsat" +# vector variant, Q = 1, size == 01 + +:sqdmlal2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp4:4 = sext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 + TMPQ4[0,32] = TMPQ3[0,32] * 2:4; + TMPQ4[32,32] = TMPQ3[32,32] * 2:4; + TMPQ4[64,32] = TMPQ3[64,32] * 2:4; + TMPQ4[96,32] = TMPQ3[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x5e209000/mask=xff20fc00 +# CONSTRUCT x5ea09000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * &=+ +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3 +# AUNIT --inst x5ea09000/mask=xffe0fc00 --status fail --comment "nointsat" +# scalar variant, size == 10 (always part == 0) + +:sqdmlal Rd_FPR64, Rn_FPR32, Rm_FPR32 +is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & Rd_FPR64 & Rn_FPR32 & Rm_FPR32 & Zd +{ + local tmp1:8 = sext(Rn_FPR32); + local tmp2:8 = sext(Rm_FPR32); + local tmp3:8 = tmp1 * tmp2; + local tmp4:8 = tmp3 * 2:8; + Rd_FPR64 = Rd_FPR64 + tmp4; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x5e209000/mask=xff20fc00 +# CONSTRUCT x5e609000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 * &=+ +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3 +# AUNIT --inst x5e609000/mask=xffe0fc00 --status fail --comment "nointsat" +# scalar variant, size == 01 (always part == 0) + +:sqdmlal Rd_FPR32, Rn_FPR16, Rm_FPR16 +is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & Rd_FPR32 & Rn_FPR16 & Rm_FPR16 & Zd +{ + local tmp1:4 = sext(Rn_FPR16); + local tmp2:4 = sext(Rm_FPR16); + local tmp3:4 = tmp1 * tmp2; + local tmp4:4 = tmp3 * 2:4; + Rd_FPR32 = Rd_FPR32 + tmp4; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x0e209000/mask=xbf20fc00 +# CONSTRUCT x0ea09000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $*@8 2:8 $* &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3@4 +# AUNIT --inst x0ea09000/mask=xffe0fc00 --status fail --comment "ext nointsat" +# vector variant, Q == 0, size == 10 + +:sqdmlal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & Rn_VPR64.2S & Rd_VPR128.2D & Rm_VPR64.2S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; + # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 + TMPQ4[0,64] = TMPQ3[0,64] * 2:8; + TMPQ4[64,64] = TMPQ3[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x0e209000/mask=xbf20fc00 +# CONSTRUCT x0e609000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $*@4 2:4 $* &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3@2 +# AUNIT --inst x0e609000/mask=xffe0fc00 --status fail --comment "ext nointsat" +# vector variant, Q = 0, size == 01 + +:sqdmlal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & Rn_VPR64.4H & Rd_VPR128.4S & Rm_VPR64.4H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; + # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 + TMPQ4[0,32] = TMPQ3[0,32] * 2:4; + TMPQ4[32,32] = TMPQ3[32,32] * 2:4; + TMPQ4[64,32] = TMPQ3[64,32] * 2:4; + TMPQ4[96,32] = TMPQ3[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x0e209000/mask=xbf20fc00 +# CONSTRUCT x4ea09000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $*@8 2:8 $* &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal2/3@4 +# AUNIT --inst x4ea09000/mask=xffe0fc00 --status fail --comment "ext nointsat" +# vector variant, Q = 1, size == 10 + +:sqdmlal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & Rn_VPR128.4S & Rd_VPR128.2D & Rm_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; + # simd infix TMPQ6 = TMPQ5 * 2:8 on lane size 8 + TMPQ6[0,64] = TMPQ5[0,64] * 2:8; + TMPQ6[64,64] = TMPQ5[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ6 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ6[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ6[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x0e209000/mask=xbf20fc00 +# CONSTRUCT x4e609000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $*@4 2:4 $* &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal2/3@2 +# AUNIT --inst x4e609000/mask=xffe0fc00 --status fail --comment "ext nointsat" +# vector variant, Q = 1, size == 01 + +:sqdmlal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & Rn_VPR128.8H & Rd_VPR128.4S & Rm_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; + # simd infix TMPQ6 = TMPQ5 * 2:4 on lane size 4 + TMPQ6[0,32] = TMPQ5[0,32] * 2:4; + TMPQ6[32,32] = TMPQ5[32,32] * 2:4; + TMPQ6[64,32] = TMPQ5[64,32] * 2:4; + TMPQ6[96,32] = TMPQ5[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ6 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ6[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ6[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ6[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ6[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x5f007000/mask=xff00f400 +# CONSTRUCT x5f807000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * &=- +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3 +# AUNIT --inst x5f807000/mask=xffc0f400 --status fail --comment "nointsat" +# scalar variant, size == 10 (always part == 0) + +:sqdmlsl Rd_FPR64, Rn_FPR32, Re_VPR128.S.vIndex +is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rd_FPR64 & Rn_FPR32 & Zd +{ + local tmp1:8 = sext(Rn_FPR32); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + local tmp4:8 = tmp1 * tmp3; + local tmp5:8 = tmp4 * 2:8; + Rd_FPR64 = Rd_FPR64 - tmp5; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x5f007000/mask=xff00f400 +# CONSTRUCT x5f407000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 * &=- +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3 +# AUNIT --inst x5f407000/mask=xffc0f400 --status fail --comment "nointsat" +# scalar variant, size == 01 (always part == 0) + +:sqdmlsl Rd_FPR32, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM +is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rd_FPR32 & Rn_FPR16 & Zd +{ + local tmp1:4 = sext(Rn_FPR16); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + local tmp4:4 = tmp1 * tmp3; + local tmp5:4 = tmp4 * 2:4; + Rd_FPR32 = Rd_FPR32 - tmp5; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x0f007000/mask=xbf00f400 +# CONSTRUCT x0f807000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* 2:8 $* &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3@4 +# AUNIT --inst x0f807000/mask=xffc0f400 --status fail --comment "ext nointsat" +# vector variant, Q == 0, size == 10 + +:sqdmlsl Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * 2:8; + TMPQ3[64,64] = TMPQ2[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x0f007000/mask=xbf00f400 +# CONSTRUCT x0f407000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* 2:4 $* &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3@2 +# AUNIT --inst x0f407000/mask=xffc0f400 --status fail --comment "ext nointsat" +# vector variant, Q = 0, size == 01 + +:sqdmlsl Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * 2:4; + TMPQ3[32,32] = TMPQ2[32,32] * 2:4; + TMPQ3[64,32] = TMPQ2[64,32] * 2:4; + TMPQ3[96,32] = TMPQ2[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x0f007000/mask=xbf00f400 +# CONSTRUCT x4f807000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 $* 2:8 $* &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl2/3@4 +# AUNIT --inst x4f807000/mask=xffc0f400 --status fail --comment "ext nointsat" +# vector variant, Q = 1, size == 10 + +:sqdmlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp3:4 = Re_VPR128.S.vIndex; + local tmp4:8 = sext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 + TMPQ4[0,64] = TMPQ3[0,64] * 2:8; + TMPQ4[64,64] = TMPQ3[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x0f007000/mask=xbf00f400 +# CONSTRUCT x4f407000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 $* 2:4 $* &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl2/3@2 +# AUNIT --inst x4f407000/mask=xffc0f400 --status fail --comment "ext nointsat" +# vector variant, Q = 1, size == 01 + +:sqdmlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp4:4 = sext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 + TMPQ4[0,32] = TMPQ3[0,32] * 2:4; + TMPQ4[32,32] = TMPQ3[32,32] * 2:4; + TMPQ4[64,32] = TMPQ3[64,32] * 2:4; + TMPQ4[96,32] = TMPQ3[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x5e20b000/mask=xff20fc00 +# CONSTRUCT x5ea0b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * &=- +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3 +# AUNIT --inst x5ea0b000/mask=xffe0fc00 --status fail --comment "nointsat" +# scalar variant, size == 10 (always part == 0) + +:sqdmlsl Rd_FPR64, Rn_FPR32, Rm_FPR32 +is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & Rd_FPR64 & Rn_FPR32 & Rm_FPR32 & Zd +{ + local tmp1:8 = sext(Rn_FPR32); + local tmp2:8 = sext(Rm_FPR32); + local tmp3:8 = tmp1 * tmp2; + local tmp4:8 = tmp3 * 2:8; + Rd_FPR64 = Rd_FPR64 - tmp4; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x5e20b000/mask=xff20fc00 +# CONSTRUCT x5e60b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 * &=- +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3 +# AUNIT --inst x5e60b000/mask=xffe0fc00 --status fail --comment "nointsat" +# scalar variant, size == 01 (always part == 0) + +:sqdmlsl Rd_FPR32, Rn_FPR16, Rm_FPR16 +is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & Rd_FPR32 & Rn_FPR16 & Rm_FPR16 & Zd +{ + local tmp1:4 = sext(Rn_FPR16); + local tmp2:4 = sext(Rm_FPR16); + local tmp3:4 = tmp1 * tmp2; + local tmp4:4 = tmp3 * 2:4; + Rd_FPR32 = Rd_FPR32 - tmp4; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x0e20b000/mask=xbf20fc00 +# CONSTRUCT x0ea0b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $*@8 2:8 $* &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3 +# AUNIT --inst x0ea0b000/mask=xffe0fc00 --status fail --comment "ext nointsat" +# vector variant, Q == 0, size == 10 + +:sqdmlsl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & Rn_VPR64.2S & Rd_VPR128.2D & Rm_VPR64.2S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; + # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 + TMPQ4[0,64] = TMPQ3[0,64] * 2:8; + TMPQ4[64,64] = TMPQ3[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x0e20b000/mask=xbf20fc00 +# CONSTRUCT x0e60b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $*@4 2:4 $* &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3@2 +# AUNIT --inst x0e60b000/mask=xffe0fc00 --status fail --comment "ext nointsat" +# vector variant, Q = 0, size == 01 + +:sqdmlsl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & Rn_VPR64.4H & Rd_VPR128.4S & Rm_VPR64.4H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; + # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 + TMPQ4[0,32] = TMPQ3[0,32] * 2:4; + TMPQ4[32,32] = TMPQ3[32,32] * 2:4; + TMPQ4[64,32] = TMPQ3[64,32] * 2:4; + TMPQ4[96,32] = TMPQ3[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x0e20b000/mask=xbf20fc00 +# CONSTRUCT x4ea0b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $*@8 2:8 $* &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl2/3@4 +# AUNIT --inst x4ea0b000/mask=xffe0fc00 --status fail --comment "ext nointsat" +# vector variant, Q = 1, size == 10 + +:sqdmlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & Rn_VPR128.4S & Rd_VPR128.2D & Rm_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; + # simd infix TMPQ6 = TMPQ5 * 2:8 on lane size 8 + TMPQ6[0,64] = TMPQ5[0,64] * 2:8; + TMPQ6[64,64] = TMPQ5[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ6 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ6[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ6[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x0e20b000/mask=xbf20fc00 +# CONSTRUCT x4e60b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $*@4 2:4 $* &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl2/3@2 +# AUNIT --inst x4e60b000/mask=xffe0fc00 --status fail --comment "ext nointsat" +# vector variant, Q = 1, size == 01 + +:sqdmlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & Rn_VPR128.8H & Rd_VPR128.4S & Rm_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; + # simd infix TMPQ6 = TMPQ5 * 2:4 on lane size 4 + TMPQ6[0,32] = TMPQ5[0,32] * 2:4; + TMPQ6[32,32] = TMPQ5[32,32] * 2:4; + TMPQ6[64,32] = TMPQ5[64,32] * 2:4; + TMPQ6[96,32] = TMPQ5[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ6 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ6[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ6[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ6[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ6[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x0f00c000/mask=xbf00f400 +# CONSTRUCT x0f80c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* 2:8 $* &=$shuffle@1-0@3-1:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3@4 +# AUNIT --inst x0f80c000/mask=xffc0f400 --status pass --comment "ext nointsat" + +:sqdmulh Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xc & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * 2:8; + TMPQ3[64,64] = TMPQ2[64,64] * 2:8; + # simd shuffle Rd_VPR64.2S = TMPQ3 (@1-0@3-1) lane size 4 + Rd_VPR64.2S[0,32] = TMPQ3[32,32]; + Rd_VPR64.2S[32,32] = TMPQ3[96,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x0f00c000/mask=xbf00f400 +# CONSTRUCT x0f40c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* 2:4 $* &=$shuffle@1-0@3-1@5-2@7-3:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@2 +# AUNIT --inst x0f40c000/mask=xffc0f400 --status pass --comment "ext nointsat" + +:sqdmulh Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xc & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * 2:4; + TMPQ3[32,32] = TMPQ2[32,32] * 2:4; + TMPQ3[64,32] = TMPQ2[64,32] * 2:4; + TMPQ3[96,32] = TMPQ2[96,32] * 2:4; + # simd shuffle Rd_VPR64.4H = TMPQ3 (@1-0@3-1@5-2@7-3) lane size 2 + Rd_VPR64.4H[0,16] = TMPQ3[16,16]; + Rd_VPR64.4H[16,16] = TMPQ3[48,16]; + Rd_VPR64.4H[32,16] = TMPQ3[80,16]; + Rd_VPR64.4H[48,16] = TMPQ3[112,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x0f00c000/mask=xbf00f400 +# CONSTRUCT x4f80c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:32 ARG3 sext:8 $* 2:8 $* &=$shuffle@1-0@3-1@5-2@7-3:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@4 +# AUNIT --inst x4f80c000/mask=xffc0f400 --status pass --comment "ext nointsat" + +:sqdmulh Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xc & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd resize TMPZ1 = sext(Rn_VPR128.4S) (lane size 4 to 8) + TMPZ1[0,64] = sext(Rn_VPR128.4S[0,32]); + TMPZ1[64,64] = sext(Rn_VPR128.4S[32,32]); + TMPZ1[128,64] = sext(Rn_VPR128.4S[64,32]); + TMPZ1[192,64] = sext(Rn_VPR128.4S[96,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + # simd infix TMPZ2 = TMPZ1 * tmp3 on lane size 8 + TMPZ2[0,64] = TMPZ1[0,64] * tmp3; + TMPZ2[64,64] = TMPZ1[64,64] * tmp3; + TMPZ2[128,64] = TMPZ1[128,64] * tmp3; + TMPZ2[192,64] = TMPZ1[192,64] * tmp3; + # simd infix TMPZ3 = TMPZ2 * 2:8 on lane size 8 + TMPZ3[0,64] = TMPZ2[0,64] * 2:8; + TMPZ3[64,64] = TMPZ2[64,64] * 2:8; + TMPZ3[128,64] = TMPZ2[128,64] * 2:8; + TMPZ3[192,64] = TMPZ2[192,64] * 2:8; + # simd shuffle Rd_VPR128.4S = TMPZ3 (@1-0@3-1@5-2@7-3) lane size 4 + Rd_VPR128.4S[0,32] = TMPZ3[32,32]; + Rd_VPR128.4S[32,32] = TMPZ3[96,32]; + Rd_VPR128.4S[64,32] = TMPZ3[160,32]; + Rd_VPR128.4S[96,32] = TMPZ3[224,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x0f00c000/mask=xbf00f400 +# CONSTRUCT x4f40c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:32 ARG3 sext:4 $* 2:4 $* &=$shuffle@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@2 +# AUNIT --inst x4f40c000/mask=xffc0f400 --status pass --comment "ext nointsat" + +:sqdmulh Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xc & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd resize TMPZ1 = sext(Rn_VPR128.8H) (lane size 2 to 4) + TMPZ1[0,32] = sext(Rn_VPR128.8H[0,16]); + TMPZ1[32,32] = sext(Rn_VPR128.8H[16,16]); + TMPZ1[64,32] = sext(Rn_VPR128.8H[32,16]); + TMPZ1[96,32] = sext(Rn_VPR128.8H[48,16]); + TMPZ1[128,32] = sext(Rn_VPR128.8H[64,16]); + TMPZ1[160,32] = sext(Rn_VPR128.8H[80,16]); + TMPZ1[192,32] = sext(Rn_VPR128.8H[96,16]); + TMPZ1[224,32] = sext(Rn_VPR128.8H[112,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + # simd infix TMPZ2 = TMPZ1 * tmp3 on lane size 4 + TMPZ2[0,32] = TMPZ1[0,32] * tmp3; + TMPZ2[32,32] = TMPZ1[32,32] * tmp3; + TMPZ2[64,32] = TMPZ1[64,32] * tmp3; + TMPZ2[96,32] = TMPZ1[96,32] * tmp3; + TMPZ2[128,32] = TMPZ1[128,32] * tmp3; + TMPZ2[160,32] = TMPZ1[160,32] * tmp3; + TMPZ2[192,32] = TMPZ1[192,32] * tmp3; + TMPZ2[224,32] = TMPZ1[224,32] * tmp3; + # simd infix TMPZ3 = TMPZ2 * 2:4 on lane size 4 + TMPZ3[0,32] = TMPZ2[0,32] * 2:4; + TMPZ3[32,32] = TMPZ2[32,32] * 2:4; + TMPZ3[64,32] = TMPZ2[64,32] * 2:4; + TMPZ3[96,32] = TMPZ2[96,32] * 2:4; + TMPZ3[128,32] = TMPZ2[128,32] * 2:4; + TMPZ3[160,32] = TMPZ2[160,32] * 2:4; + TMPZ3[192,32] = TMPZ2[192,32] * 2:4; + TMPZ3[224,32] = TMPZ2[224,32] * 2:4; + # simd shuffle Rd_VPR128.8H = TMPZ3 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 2 + Rd_VPR128.8H[0,16] = TMPZ3[16,16]; + Rd_VPR128.8H[16,16] = TMPZ3[48,16]; + Rd_VPR128.8H[32,16] = TMPZ3[80,16]; + Rd_VPR128.8H[48,16] = TMPZ3[112,16]; + Rd_VPR128.8H[64,16] = TMPZ3[144,16]; + Rd_VPR128.8H[80,16] = TMPZ3[176,16]; + Rd_VPR128.8H[96,16] = TMPZ3[208,16]; + Rd_VPR128.8H[112,16] = TMPZ3[240,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x5f00c000/mask=xff00f400 +# CONSTRUCT x5f40c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 * 16:1 >>:4 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2 +# AUNIT --inst x5f40c000/mask=xffc0f400 --status pass --comment "nointsat" +# Scalar variant when size=01 suf=FPR16 elem=Re_VPR128Lo.H.vIndexHLM p1=Re_VPR128Lo.H p2=vIndexHLM + +:sqdmulh Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM +is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1100 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd +{ + local tmp1:4 = sext(Rn_FPR16); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + local tmp4:4 = tmp1 * tmp3; + local tmp5:4 = tmp4 * 2:4; + local tmp6:4 = tmp5 >> 16:1; + Rd_FPR16 = tmp6:2; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x5f00c000/mask=xff00f400 +# CONSTRUCT x5f80c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * 32:1 >>:8 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2 +# AUNIT --inst x5f80c000/mask=xffc0f400 --status pass --comment "nointsat" +# Scalar variant when size=10 suf=FPR32 elem=Re_VPR128.S.vIndex p1=Re_VPR128.S p2=vIndex + +:sqdmulh Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex +is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1100 & b_10=0 & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd +{ + local tmp1:8 = sext(Rn_FPR32); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + local tmp4:8 = tmp1 * tmp3; + local tmp5:8 = tmp4 * 2:8; + local tmp6:8 = tmp5 >> 32:1; + Rd_FPR32 = tmp6:4; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x5e20b400/mask=xff20fc00 +# CONSTRUCT x5e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2 +# AUNIT --inst x5e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqdmulh Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x16 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_sqdmulh(Rn_FPR16, Rm_FPR16); +} + +# C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x5e20b400/mask=xff20fc00 +# CONSTRUCT x5ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2 +# AUNIT --inst x5ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqdmulh Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x16 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_sqdmulh(Rn_FPR32, Rm_FPR32); +} + +# C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x0e20b400/mask=xbf20fc00 +# CONSTRUCT x0ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@4 +# AUNIT --inst x0ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqdmulh Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x16 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqdmulh(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x0e20b400/mask=xbf20fc00 +# CONSTRUCT x0e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@2 +# AUNIT --inst x0e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqdmulh Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x16 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqdmulh(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x0e20b400/mask=xbf20fc00 +# CONSTRUCT x4ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@4 +# AUNIT --inst x4ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqdmulh Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x16 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqdmulh(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x0e20b400/mask=xbf20fc00 +# CONSTRUCT x4e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@2 +# AUNIT --inst x4e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqdmulh Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x16 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqdmulh(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x0f00b000/mask=xbf00f400 +# CONSTRUCT x0f80b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* 2:8 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2@4 +# AUNIT --inst x0f80b000/mask=xffc0f400 --status pass --comment "ext nointsat" + +:sqdmull Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xb & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix Rd_VPR128.2D = TMPQ2 * 2:8 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] * 2:8; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] * 2:8; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x0f00b000/mask=xbf00f400 +# CONSTRUCT x4f80b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 $* 2:8 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull2/2@4 +# AUNIT --inst x4f80b000/mask=xffc0f400 --status pass --comment "ext nointsat" + +:sqdmull2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xb & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp3:4 = Re_VPR128.S.vIndex; + local tmp4:8 = sext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix Rd_VPR128.2D = TMPQ3 * 2:8 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ3[0,64] * 2:8; + Rd_VPR128.2D[64,64] = TMPQ3[64,64] * 2:8; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x0f00b000/mask=xbf00f400 +# CONSTRUCT x0f40b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* 2:4 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull2/2@2 +# AUNIT --inst x0f40b000/mask=xffc0f400 --status pass --comment "ext nointsat" + +:sqdmull Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xb & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix Rd_VPR128.4S = TMPQ2 * 2:4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] * 2:4; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] * 2:4; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] * 2:4; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] * 2:4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x0f00b000/mask=xbf00f400 +# CONSTRUCT x4f40b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 $* 2:4 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull2/2@2 +# AUNIT --inst x4f40b000/mask=xffc0f400 --status pass --comment "ext nointsat" + +:sqdmull2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xb & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp4:4 = sext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix Rd_VPR128.4S = TMPQ3 * 2:4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ3[0,32] * 2:4; + Rd_VPR128.4S[32,32] = TMPQ3[32,32] * 2:4; + Rd_VPR128.4S[64,32] = TMPQ3[64,32] * 2:4; + Rd_VPR128.4S[96,32] = TMPQ3[96,32] * 2:4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x5f00b000/mask=xff00f400 +# CONSTRUCT x5f40b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 =* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2 +# AUNIT --inst x5f40b000/mask=xffc0f400 --status pass --comment "nointsat" +# Scalar variant when size=01 Va=FPR32 Vb=FPR16 elem=Re_VPR128Lo.H.vIndexHLM p1=Re_VPR128Lo.H p2=vIndexHLM + +:sqdmull Rd_FPR32, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM +is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1011 & b_10=0 & Rd_FPR32 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd +{ + local tmp1:4 = sext(Rn_FPR16); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + local tmp4:4 = tmp1 * tmp3; + Rd_FPR32 = tmp4 * 2:4; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x5f00b000/mask=xff00f400 +# CONSTRUCT x5f80b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 =* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2 +# AUNIT --inst x5f80b000/mask=xffc0f400 --status pass --comment "nointsat" +# Scalar variant when size=10 Va=FPR64 Vb=FPR32 elem=Re_VPR128.S.vIndex p1=Re_VPR128.S p2=vIndex + +:sqdmull Rd_FPR64, Rn_FPR32, Re_VPR128.S.vIndex +is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1011 & b_10=0 & Rd_FPR64 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd +{ + local tmp1:8 = sext(Rn_FPR32); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + local tmp4:8 = tmp1 * tmp3; + Rd_FPR64 = tmp4 * 2:8; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x0e20d000/mask=xbf20fc00 +# CONSTRUCT x4ea0d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $*@8 2:8 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull2/2@4 +# AUNIT --inst x4ea0d000/mask=xffe0fc00 --status pass --comment "ext nointsat" + +:sqdmull2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xd & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; + # simd infix Rd_VPR128.2D = TMPQ5 * 2:8 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ5[0,64] * 2:8; + Rd_VPR128.2D[64,64] = TMPQ5[64,64] * 2:8; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x0e20d000/mask=xbf20fc00 +# CONSTRUCT x4e60d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $*@4 2:4 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull2/2@2 +# AUNIT --inst x4e60d000/mask=xffe0fc00 --status pass --comment "ext nointsat" + +:sqdmull2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xd & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; + # simd infix Rd_VPR128.4S = TMPQ5 * 2:4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ5[0,32] * 2:4; + Rd_VPR128.4S[32,32] = TMPQ5[32,32] * 2:4; + Rd_VPR128.4S[64,32] = TMPQ5[64,32] * 2:4; + Rd_VPR128.4S[96,32] = TMPQ5[96,32] * 2:4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x0e20d000/mask=xbf20fc00 +# CONSTRUCT x0ea0d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2@4 +# AUNIT --inst x0ea0d000/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqdmull Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xd & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sqdmull(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x0e20d000/mask=xbf20fc00 +# CONSTRUCT x0e60d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2@2 +# AUNIT --inst x0e60d000/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqdmull Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xd & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqdmull(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x5e20d000/mask=xff20fc00 +# CONSTRUCT x5e60d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 =* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2 +# AUNIT --inst x5e60d000/mask=xffe0fc00 --status pass --comment "nointsat" +# Scalar variant when size=01 Va=FPR32 Vb=FPR16 + +:sqdmull Rd_FPR32, Rn_FPR16, Rm_FPR16 +is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b110100 & Rd_FPR32 & Rn_FPR16 & Rm_FPR16 & Zd +{ + local tmp1:4 = sext(Rn_FPR16); + local tmp2:4 = sext(Rm_FPR16); + local tmp3:4 = tmp1 * tmp2; + Rd_FPR32 = tmp3 * 2:4; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x5e20d000/mask=xff20fc00 +# CONSTRUCT x5ea0d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 =* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2 +# AUNIT --inst x5ea0d000/mask=xffe0fc00 --status pass --comment "nointsat" +# Scalar variant when size=10 Va=FPR64 Vb=FPR32 + +:sqdmull Rd_FPR64, Rn_FPR32, Rm_FPR32 +is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b110100 & Rd_FPR64 & Rn_FPR32 & Rm_FPR32 & Zd +{ + local tmp1:8 = sext(Rn_FPR32); + local tmp2:8 = sext(Rm_FPR32); + local tmp3:8 = tmp1 * tmp2; + Rd_FPR64 = tmp3 * 2:8; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x7e207800/mask=xff3ffc00 +# CONSTRUCT x7e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =2comp +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1 +# AUNIT --inst x7e207800/mask=xfffffc00 --status pass --comment "nointsat" +# Scalar variant when size=00 Q=1 aa=1 suf=FPR8 + +:sqneg Rd_FPR8, Rn_FPR8 +is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_FPR8 & Rn_FPR8 & Zd +{ + Rd_FPR8 = - Rn_FPR8; + zext_zb(Zd); # zero upper 31 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x7e207800/mask=xff3ffc00 +# CONSTRUCT x7e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =2comp +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1 +# AUNIT --inst x7e607800/mask=xfffffc00 --status pass --comment "nointsat" +# Scalar variant when size=01 Q=1 aa=1 suf=FPR16 + +:sqneg Rd_FPR16, Rn_FPR16 +is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = - Rn_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x7e207800/mask=xff3ffc00 +# CONSTRUCT x7ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =2comp +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1 +# AUNIT --inst x7ea07800/mask=xfffffc00 --status pass --comment "nointsat" +# Scalar variant when size=10 Q=1 aa=1 suf=FPR32 + +:sqneg Rd_FPR32, Rn_FPR32 +is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = - Rn_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x7e207800/mask=xff3ffc00 +# CONSTRUCT x7ee07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =2comp +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1 +# AUNIT --inst x7ee07800/mask=xfffffc00 --status pass --comment "nointsat" +# Scalar variant when size=11 Q=1 aa=1 suf=FPR64 + +:sqneg Rd_FPR64, Rn_FPR64 +is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = - Rn_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 +# CONSTRUCT x2e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$2comp@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@1 +# AUNIT --inst x2e207800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when when size = 00 , Q = 0 aa=0 esize=1 suf=VPR64.8B + +:sqneg Rd_VPR64.8B, Rn_VPR64.8B +is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd +{ + # simd unary Rd_VPR64.8B = -(Rn_VPR64.8B) on lane size 1 + Rd_VPR64.8B[0,8] = -(Rn_VPR64.8B[0,8]); + Rd_VPR64.8B[8,8] = -(Rn_VPR64.8B[8,8]); + Rd_VPR64.8B[16,8] = -(Rn_VPR64.8B[16,8]); + Rd_VPR64.8B[24,8] = -(Rn_VPR64.8B[24,8]); + Rd_VPR64.8B[32,8] = -(Rn_VPR64.8B[32,8]); + Rd_VPR64.8B[40,8] = -(Rn_VPR64.8B[40,8]); + Rd_VPR64.8B[48,8] = -(Rn_VPR64.8B[48,8]); + Rd_VPR64.8B[56,8] = -(Rn_VPR64.8B[56,8]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 +# CONSTRUCT x6e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$2comp@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@1 +# AUNIT --inst x6e207800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when when size = 00 , Q = 1 aa=0 esize=1 suf=VPR128.16B + +:sqneg Rd_VPR128.16B, Rn_VPR128.16B +is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd +{ + # simd unary Rd_VPR128.16B = -(Rn_VPR128.16B) on lane size 1 + Rd_VPR128.16B[0,8] = -(Rn_VPR128.16B[0,8]); + Rd_VPR128.16B[8,8] = -(Rn_VPR128.16B[8,8]); + Rd_VPR128.16B[16,8] = -(Rn_VPR128.16B[16,8]); + Rd_VPR128.16B[24,8] = -(Rn_VPR128.16B[24,8]); + Rd_VPR128.16B[32,8] = -(Rn_VPR128.16B[32,8]); + Rd_VPR128.16B[40,8] = -(Rn_VPR128.16B[40,8]); + Rd_VPR128.16B[48,8] = -(Rn_VPR128.16B[48,8]); + Rd_VPR128.16B[56,8] = -(Rn_VPR128.16B[56,8]); + Rd_VPR128.16B[64,8] = -(Rn_VPR128.16B[64,8]); + Rd_VPR128.16B[72,8] = -(Rn_VPR128.16B[72,8]); + Rd_VPR128.16B[80,8] = -(Rn_VPR128.16B[80,8]); + Rd_VPR128.16B[88,8] = -(Rn_VPR128.16B[88,8]); + Rd_VPR128.16B[96,8] = -(Rn_VPR128.16B[96,8]); + Rd_VPR128.16B[104,8] = -(Rn_VPR128.16B[104,8]); + Rd_VPR128.16B[112,8] = -(Rn_VPR128.16B[112,8]); + Rd_VPR128.16B[120,8] = -(Rn_VPR128.16B[120,8]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 +# CONSTRUCT x2e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$2comp@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@2 +# AUNIT --inst x2e607800/mask=xfffffc00 --status pass --comment "nointsat" +# Vector variant when when size = 01 , Q = 0 aa=0 esize=2 suf=VPR64.4H + +:sqneg Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + # simd unary Rd_VPR64.4H = -(Rn_VPR64.4H) on lane size 2 + Rd_VPR64.4H[0,16] = -(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = -(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = -(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = -(Rn_VPR64.4H[48,16]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 +# CONSTRUCT x6e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$2comp@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@2 +# AUNIT --inst x6e607800/mask=xfffffc00 --status pass --comment "nointsat" +# Vector variant when when size = 01 , Q = 1 aa=0 esize=2 suf=VPR128.8H + +:sqneg Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + # simd unary Rd_VPR128.8H = -(Rn_VPR128.8H) on lane size 2 + Rd_VPR128.8H[0,16] = -(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = -(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = -(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = -(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = -(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = -(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = -(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = -(Rn_VPR128.8H[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 +# CONSTRUCT x2ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$2comp@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@4 +# AUNIT --inst x2ea07800/mask=xfffffc00 --status pass --comment "nointsat" +# Vector variant when when size = 10 , Q = 0 aa=0 esize=4 suf=VPR64.2S + +:sqneg Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + # simd unary Rd_VPR64.2S = -(Rn_VPR64.2S) on lane size 4 + Rd_VPR64.2S[0,32] = -(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = -(Rn_VPR64.2S[32,32]); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 +# CONSTRUCT x6ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$2comp@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@4 +# AUNIT --inst x6ea07800/mask=xfffffc00 --status pass --comment "nointsat" +# Vector variant when when size = 10 , Q = 1 aa=0 esize=4 suf=VPR128.4S + +:sqneg Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + # simd unary Rd_VPR128.4S = -(Rn_VPR128.4S) on lane size 4 + Rd_VPR128.4S[0,32] = -(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = -(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = -(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = -(Rn_VPR128.4S[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 +# CONSTRUCT x6ee07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =$2comp@8 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@8 +# AUNIT --inst x6ee07800/mask=xfffffc00 --status pass --comment "nointsat" +# Vector variant when when size = 11 , Q = 1 aa=0 esize=8 suf=VPR128.2D + +:sqneg Rd_VPR128.2D, Rn_VPR128.2D +is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + # simd unary Rd_VPR128.2D = -(Rn_VPR128.2D) on lane size 8 + Rd_VPR128.2D[0,64] = -(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = -(Rn_VPR128.2D[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.277 SQRDMLAH (by element) page C7-1598 line 92254 KEEPWITH +# Integer saturating instruction (not implemented) + +sqrdml_subop: "ah" is b_24=0 & b_11=0 { export 0:1; } +sqrdml_subop: "ah" is b_24=1 & b_13=0 { export 0:1; } +sqrdml_subop: "sh" is b_24=0 & b_11=1 { export 1:1; } +sqrdml_subop: "sh" is b_24=1 & b_13=1 { export 1:1; } + +sqrdml_esize: "h" is b_2223=0b01 { export 16:1; } +sqrdml_esize: "s" is b_2223=0b10 { export 32:1; } + +sqrdml_elements: "4h" is b_2223=0b01 & b_30=0 { export 4:1; } +sqrdml_elements: "8h" is b_2223=0b01 & b_30=1 { export 8:1; } +sqrdml_elements: "2s" is b_2223=0b10 & b_30=0 { export 2:1; } +sqrdml_elements: "4s" is b_2223=0b10 & b_30=1 { export 4:1; } + +sqrdml_index: val is b_2223=0b01 & b_21 & b_20 & b_11 [ val = b_11 * 4 + b_21 * 2 + b_20; ] { export * [const]:1 val; } +sqrdml_index: val is b_2223=0b10 & b_21 & b_11 [ val = b_11 * 2 + b_21; ] { export * [const]:1 val; } + +# We could be more specific about the size of the register, which +# depends on the variant and Q (b_30). For now, I've just made them +# all 128 bits. + +sqrdml_vd: Rd_FPR16 is b_28=1 & b_2223=0b01 & Rd_FPR16 & Rd_VPR128 { export Rd_VPR128; } +sqrdml_vd: Rd_FPR32 is b_28=1 & b_2223=0b10 & Rd_FPR32 & Rd_VPR128 { export Rd_VPR128; } +sqrdml_vd: vRd_VPR128^"."^sqrdml_elements is b_28=0 & vRd_VPR128 & Rd_VPR128 & sqrdml_elements { export Rd_VPR128; } + +sqrdml_vn: Rn_FPR16 is b_28=1 & b_2223=0b01 & Rn_FPR16 & Rn_VPR128 { export Rn_VPR128; } +sqrdml_vn: Rn_FPR32 is b_28=1 & b_2223=0b10 & Rn_FPR32 & Rn_VPR128 { export Rn_VPR128; } +sqrdml_vn: vRn_VPR128^"."^sqrdml_elements is b_28=0 & vRn_VPR128 & Rn_VPR128 & sqrdml_elements { export Rn_VPR128; } + +# Decode Vm (in some cases) depending on size + +# cases 34.1, 36.1 +sqrdml_vm: Rm_FPR16 is b_28=1 & b_24=0 & b_2223=0b01 & Rm_FPR16 & Rm_VPR128 { export Rm_VPR128; } +sqrdml_vm: Rm_FPR32 is b_28=1 & b_24=0 & b_2223=0b10 & Rm_FPR32 & Rm_VPR128 { export Rm_VPR128; } + +# cases 34.2, 36.2 +sqrdml_vm: vRm_VPR128^"."^sqrdml_elements is b_28=0 & b_24=0 & vRm_VPR128 & sqrdml_elements & Rm_VPR128 { export Rm_VPR128; } + +sqrdml_vmlo: vRm_VPR128Lo is b_2223=0b01 & vRm_VPR128Lo & Rm_VPR128Lo { export Rm_VPR128Lo; } +sqrdml_vmlo: vRm_VPR128 is b_2223=0b10 & vRm_VPR128 & Rm_VPR128 { export Rm_VPR128; } + +# cases 33, 35 +sqrdml_vm: sqrdml_vmlo^"."^sqrdml_esize[sqrdml_index] is b_24=1 & sqrdml_vmlo & sqrdml_esize & sqrdml_index { export sqrdml_vmlo; } + +# SQRDML(Vd, Vn, Vm, esize, elements, subop[, index]) +# +# performs the SQRDML operation +# +# Vd[e] = SignedSatQ(Vd[e]<>:4 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2 +# AUNIT --inst x5f40d000/mask=xffc0f400 --status fail --comment "nointround nointsat" +# Scalar variant when size=01 suf=FPR16 elem elem=Re_VPR128Lo.H.vIndexHLM p1=Re_VPR128Lo.H p2=vIndexHLM + +:sqrdmulh Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM +is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1101 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd +{ + local tmp1:4 = sext(Rn_FPR16); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = sext(tmp2); + local tmp4:4 = tmp1 * tmp3; + local tmp5:4 = tmp4 * 2:4; + local tmp6:4 = tmp5 >> 16:4; + Rd_FPR16 = tmp6:2; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.297 SQRDMULH (by element) page C7-2055 line 115168 MATCH x5f00d000/mask=xff00f400 +# CONSTRUCT x5f80d000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * 32:8 >>:8 = +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2 +# AUNIT --inst x5f80d000/mask=xffc0f400 --status fail --comment "nointround nointsat" +# Scalar variant when size=10 suf=FPR32 elem=Re_VPR128.S.vIndex p1=Re_VPR128.S p2=vIndex + +:sqrdmulh Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex +is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1101 & b_10=0 & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd +{ + local tmp1:8 = sext(Rn_FPR32); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = sext(tmp2); + local tmp4:8 = tmp1 * tmp3; + local tmp5:8 = tmp4 * 2:8; + local tmp6:8 = tmp5 >> 32:8; + Rd_FPR32 = tmp6:4; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x7e20b400/mask=xff20fc00 +# CONSTRUCT x7e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2 +# AUNIT --inst x7e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrdmulh Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x16 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_sqrdmulh(Rn_FPR16, Rm_FPR16); +} + +# C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x7e20b400/mask=xff20fc00 +# CONSTRUCT x7ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2 +# AUNIT --inst x7ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrdmulh Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x16 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_sqrdmulh(Rn_FPR32, Rm_FPR32); +} + +# C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x2e20b400/mask=xbf20fc00 +# CONSTRUCT x2ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2@4 +# AUNIT --inst x2ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrdmulh Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x16 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqrdmulh(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x2e20b400/mask=xbf20fc00 +# CONSTRUCT x2e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2@2 +# AUNIT --inst x2e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrdmulh Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x16 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqrdmulh(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x2e20b400/mask=xbf20fc00 +# CONSTRUCT x6ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2@4 +# AUNIT --inst x6ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrdmulh Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x16 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqrdmulh(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x2e20b400/mask=xbf20fc00 +# CONSTRUCT x6e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2@2 +# AUNIT --inst x6e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrdmulh Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x16 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqrdmulh(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x5e205c00/mask=xff20fc00 +# CONSTRUCT x5e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2 +# AUNIT --inst x5e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_FPR8, Rn_FPR8, Rm_FPR8 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0xb & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_sqrshl(Rn_FPR8, Rm_FPR8); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x5e205c00/mask=xff20fc00 +# CONSTRUCT x5ee05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2 +# AUNIT --inst x5ee05c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xb & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_sqrshl(Rn_FPR64, Rm_FPR64); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x5e205c00/mask=xff20fc00 +# CONSTRUCT x5e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2 +# AUNIT --inst x5e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0xb & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_sqrshl(Rn_FPR16, Rm_FPR16); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x5e205c00/mask=xff20fc00 +# CONSTRUCT x5ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2 +# AUNIT --inst x5ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0xb & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_sqrshl(Rn_FPR32, Rm_FPR32); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 +# CONSTRUCT x4e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@1 +# AUNIT --inst x4e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xb & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqrshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 +# CONSTRUCT x4ee05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@8 +# AUNIT --inst x4ee05c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xb & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sqrshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 +# CONSTRUCT x0ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@4 +# AUNIT --inst x0ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xb & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqrshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 +# CONSTRUCT x0e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@2 +# AUNIT --inst x0e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xb & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqrshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 +# CONSTRUCT x4ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@4 +# AUNIT --inst x4ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xb & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqrshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 +# CONSTRUCT x0e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@1 +# AUNIT --inst x0e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xb & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqrshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 +# CONSTRUCT x4e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@2 +# AUNIT --inst x4e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xb & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqrshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 +# CONSTRUCT x4f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn2/3@2 +# AUNIT --inst x4f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqrshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 +# CONSTRUCT x0f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn/3@8 +# AUNIT --inst x0f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqrshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 +# CONSTRUCT x0f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn/3@4 +# AUNIT --inst x0f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqrshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 +# CONSTRUCT x4f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn2/3@8 +# AUNIT --inst x4f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqrshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 +# CONSTRUCT x0f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn2/3@2 +# AUNIT --inst x0f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqrshrn2(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 +# CONSTRUCT x4f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn2/3@4 +# AUNIT --inst x4f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqrshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x5f009c00/mask=xff80fc00 +# CONSTRUCT x5f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn/3 +# AUNIT --inst x5f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" +# Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 + +:sqrshrn Rd_FPR8, Rn_FPR16, Imm_shr_imm8 +is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b100111 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd +{ + Rd_FPR8 = NEON_sqrshrn(Rd_FPR8, Rn_FPR16, Imm_shr_imm8:1); +} + +# C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x5f009c00/mask=xff80fc00 +# CONSTRUCT x5f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn/3 +# AUNIT --inst x5f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" +# Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 + +:sqrshrn Rd_FPR16, Rn_FPR32, Imm_shr_imm16 +is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b100111 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd +{ + Rd_FPR16 = NEON_sqrshrn(Rd_FPR16, Rn_FPR32, Imm_shr_imm16:1); +} + +# C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x5f009c00/mask=xff80fc00 +# CONSTRUCT x5f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn/3 +# AUNIT --inst x5f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" +# Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 + +:sqrshrn Rd_FPR32, Rn_FPR64, Imm_shr_imm32 +is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b100111 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd +{ + Rd_FPR32 = NEON_sqrshrn(Rd_FPR32, Rn_FPR64, Imm_shr_imm32:1); +} + +# C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 +# CONSTRUCT x6f088c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun2/3@2 +# AUNIT --inst x6f088c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrun2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqrshrun2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 +# CONSTRUCT x2f208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3@8 +# AUNIT --inst x2f208c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrun Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqrshrun(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 +# CONSTRUCT x2f108c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3@4 +# AUNIT --inst x2f108c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrun Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqrshrun(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 +# CONSTRUCT x6f208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun2/3@8 +# AUNIT --inst x6f208c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrun2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqrshrun2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 +# CONSTRUCT x2f088c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3@2 +# AUNIT --inst x2f088c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrun Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqrshrun(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 +# CONSTRUCT x6f108c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun2/3@4 +# AUNIT --inst x6f108c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" + +:sqrshrun2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqrshrun2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x7f008c00/mask=xff80fc00 +# CONSTRUCT x7f088c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3 +# AUNIT --inst x7f088c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" +# Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 + +:sqrshrun Rd_FPR8, Rn_FPR16, Imm_shr_imm8 +is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100011 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd +{ + Rd_FPR8 = NEON_sqrshrun(Rd_FPR8, Rn_FPR16, Imm_shr_imm8:1); +} + +# C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x7f008c00/mask=xff80fc00 +# CONSTRUCT x7f108c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3 +# AUNIT --inst x7f108c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" +# Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 + +:sqrshrun Rd_FPR16, Rn_FPR32, Imm_shr_imm16 +is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100011 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd +{ + Rd_FPR16 = NEON_sqrshrun(Rd_FPR16, Rn_FPR32, Imm_shr_imm16:1); +} + +# C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x7f008c00/mask=xff80fc00 +# CONSTRUCT x7f208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3 +# AUNIT --inst x7f208c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" +# Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 + +:sqrshrun Rd_FPR32, Rn_FPR64, Imm_shr_imm32 +is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100011 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd +{ + Rd_FPR32 = NEON_sqrshrun(Rd_FPR32, Rn_FPR64, Imm_shr_imm32:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 +# CONSTRUCT x4f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@1 +# AUNIT --inst x4f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqshl(Rn_VPR128.16B, Imm_uimm3:1, 1:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 +# CONSTRUCT x4f407400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@8 +# AUNIT --inst x4f407400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xe & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sqshl(Rn_VPR128.2D, Imm_imm0_63:1, 8:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 +# CONSTRUCT x0f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@4 +# AUNIT --inst x0f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqshl(Rn_VPR64.2S, Imm_uimm5:1, 4:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 +# CONSTRUCT x0f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@2 +# AUNIT --inst x0f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqshl(Rn_VPR64.4H, Imm_uimm4:1, 2:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 +# CONSTRUCT x4f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@4 +# AUNIT --inst x4f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqshl(Rn_VPR128.4S, Imm_uimm5:1, 4:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 +# CONSTRUCT x0f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@1 +# AUNIT --inst x0f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqshl(Rn_VPR64.8B, Imm_uimm3:1, 1:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 +# CONSTRUCT x4f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@2 +# AUNIT --inst x4f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqshl(Rn_VPR128.8H, Imm_uimm4:1, 2:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x5f007400/mask=xff80fc00 +# CONSTRUCT x5f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2 +# AUNIT --inst x5f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=0001 V=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 + +:sqshl Rd_FPR8, Rn_FPR8, Imm_shr_imm8 +is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b011101 & Rd_FPR8 & Rn_FPR8 & Imm_shr_imm8 & Zd +{ + Rd_FPR8 = NEON_sqshl(Rn_FPR8, Imm_shr_imm8:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x5f007400/mask=xff80fc00 +# CONSTRUCT x5f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2 +# AUNIT --inst x5f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=001x V=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 + +:sqshl Rd_FPR16, Rn_FPR16, Imm_shr_imm16 +is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b011101 & Rd_FPR16 & Rn_FPR16 & Imm_shr_imm16 & Zd +{ + Rd_FPR16 = NEON_sqshl(Rn_FPR16, Imm_shr_imm16:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x5f007400/mask=xff80fc00 +# CONSTRUCT x5f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2 +# AUNIT --inst x5f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=01xx V=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 + +:sqshl Rd_FPR32, Rn_FPR32, Imm_shr_imm32 +is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b011101 & Rd_FPR32 & Rn_FPR32 & Imm_shr_imm32 & Zd +{ + Rd_FPR32 = NEON_sqshl(Rn_FPR32, Imm_shr_imm32:1); +} + +# C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x5f007400/mask=xff80fc00 +# CONSTRUCT x5f407400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2 +# AUNIT --inst x5f407400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=1xxx V=FPR64 imm=Imm_shr_imm64 bb=b_22 aa=1 + +:sqshl Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b010111110 & b_22=1 & b_1015=0b011101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd +{ + Rd_FPR64 = NEON_sqshl(Rn_FPR64, Imm_shr_imm64:1); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x5e204c00/mask=xff20fc00 +# CONSTRUCT x5e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2 +# AUNIT --inst x5e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_FPR8, Rn_FPR8, Rm_FPR8 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x9 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_sqshl(Rn_FPR8, Rm_FPR8); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x5e204c00/mask=xff20fc00 +# CONSTRUCT x5ee04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2 +# AUNIT --inst x5ee04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x9 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_sqshl(Rn_FPR64, Rm_FPR64); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x5e204c00/mask=xff20fc00 +# CONSTRUCT x5e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2 +# AUNIT --inst x5e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x9 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_sqshl(Rn_FPR16, Rm_FPR16); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x5e204c00/mask=xff20fc00 +# CONSTRUCT x5ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2 +# AUNIT --inst x5ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x9 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_sqshl(Rn_FPR32, Rm_FPR32); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 +# CONSTRUCT x4e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@1 +# AUNIT --inst x4e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x9 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 +# CONSTRUCT x4ee04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@8 +# AUNIT --inst x4ee04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x9 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sqshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 +# CONSTRUCT x0ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@4 +# AUNIT --inst x0ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x9 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 +# CONSTRUCT x0e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@2 +# AUNIT --inst x0e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x9 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 +# CONSTRUCT x4ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@4 +# AUNIT --inst x4ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x9 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 +# CONSTRUCT x0e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@1 +# AUNIT --inst x0e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x9 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 +# CONSTRUCT x4e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@2 +# AUNIT --inst x4e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x9 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 +# CONSTRUCT x6f086400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@1 +# AUNIT --inst x6f086400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:sqshlu Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xc & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqshlu(Rn_VPR128.16B, Imm_uimm3:1, 1:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 +# CONSTRUCT x6f406400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@8 +# AUNIT --inst x6f406400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" + +:sqshlu Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xc & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sqshlu(Rn_VPR128.2D, Imm_imm0_63:1, 8:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 +# CONSTRUCT x2f206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@4 +# AUNIT --inst x2f206400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshlu Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xc & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqshlu(Rn_VPR64.2S, Imm_uimm5:1, 4:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 +# CONSTRUCT x2f106400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@2 +# AUNIT --inst x2f106400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:sqshlu Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xc & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqshlu(Rn_VPR64.4H, Imm_uimm4:1, 2:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 +# CONSTRUCT x6f206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@4 +# AUNIT --inst x6f206400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshlu Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xc & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqshlu(Rn_VPR128.4S, Imm_uimm5:1, 4:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 +# CONSTRUCT x2f086400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@1 +# AUNIT --inst x2f086400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:sqshlu Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xc & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqshlu(Rn_VPR64.8B, Imm_uimm3:1, 1:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 +# CONSTRUCT x6f106400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@2 +# AUNIT --inst x6f106400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:sqshlu Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xc & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqshlu(Rn_VPR128.8H, Imm_uimm4:1, 2:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x7f006400/mask=xff80fc00 +# CONSTRUCT x7f086400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2 +# AUNIT --inst x7f086400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=0001 V=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 + +:sqshlu Rd_FPR8, Rn_FPR8, Imm_shr_imm8 +is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b011001 & Rd_FPR8 & Rn_FPR8 & Imm_shr_imm8 & Zd +{ + Rd_FPR8 = NEON_sqshlu(Rn_FPR8, Imm_shr_imm8:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x7f006400/mask=xff80fc00 +# CONSTRUCT x7f106400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2 +# AUNIT --inst x7f106400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=001x V=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 + +:sqshlu Rd_FPR16, Rn_FPR16, Imm_shr_imm16 +is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b011001 & Rd_FPR16 & Rn_FPR16 & Imm_shr_imm16 & Zd +{ + Rd_FPR16 = NEON_sqshlu(Rn_FPR16, Imm_shr_imm16:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x7f006400/mask=xff80fc00 +# CONSTRUCT x7f206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2 +# AUNIT --inst x7f206400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=01xx V=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 + +:sqshlu Rd_FPR32, Rn_FPR32, Imm_shr_imm32 +is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b011001 & Rd_FPR32 & Rn_FPR32 & Imm_shr_imm32 & Zd +{ + Rd_FPR32 = NEON_sqshlu(Rn_FPR32, Imm_shr_imm32:1); +} + +# C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x7f006400/mask=xff80fc00 +# CONSTRUCT x7f406400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2 +# AUNIT --inst x7f406400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=1xxx V=FPR64 imm=Imm_shr_imm64 bb=b_22 aa=1 + +:sqshlu Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b011111110 & b_22=1 & b_1015=0b011001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd +{ + Rd_FPR64 = NEON_sqshlu(Rn_FPR64, Imm_shr_imm64:1); +} + +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 +# CONSTRUCT x4f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn2/3@2 +# AUNIT --inst x4f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:sqshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 +# CONSTRUCT x0f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3@8 +# AUNIT --inst x0f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 +# CONSTRUCT x0f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3@4 +# AUNIT --inst x0f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:sqshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 +# CONSTRUCT x4f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn2/3@8 +# AUNIT --inst x4f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 +# CONSTRUCT x0f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3@2 +# AUNIT --inst x0f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:sqshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqshrn(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 +# CONSTRUCT x4f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn2/3@4 +# AUNIT --inst x4f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:sqshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x5f009400/mask=xff80fc00 +# CONSTRUCT x5f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3 +# AUNIT --inst x5f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 + +:sqshrn Rd_FPR8, Rd_FPR16, Imm_shr_imm8 +is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b100101 & Rd_FPR8 & Rd_FPR16 & Imm_shr_imm8 & Zd +{ + Rd_FPR8 = NEON_sqshrn(Rd_FPR8, Rd_FPR16, Imm_shr_imm8:1); +} + +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x5f009400/mask=xff80fc00 +# CONSTRUCT x5f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3 +# AUNIT --inst x5f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 + +:sqshrn Rd_FPR16, Rd_FPR32, Imm_shr_imm16 +is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b100101 & Rd_FPR16 & Rd_FPR32 & Imm_shr_imm16 & Zd +{ + Rd_FPR16 = NEON_sqshrn(Rd_FPR16, Rd_FPR32, Imm_shr_imm16:1); +} + +# C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x5f009400/mask=xff80fc00 +# CONSTRUCT x5f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3 +# AUNIT --inst x5f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 + +:sqshrn Rd_FPR32, Rd_FPR64, Imm_shr_imm32 +is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b100101 & Rd_FPR32 & Rd_FPR64 & Imm_shr_imm32 & Zd +{ + Rd_FPR32 = NEON_sqshrn(Rd_FPR32, Rd_FPR64, Imm_shr_imm32:1); +} + +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 +# CONSTRUCT x6f088400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun2/3@2 +# AUNIT --inst x6f088400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:sqshrun2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqshrun2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 +# CONSTRUCT x2f208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3@8 +# AUNIT --inst x2f208400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshrun Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqshrun(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 +# CONSTRUCT x2f108400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3@4 +# AUNIT --inst x2f108400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:sqshrun Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqshrun(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 +# CONSTRUCT x6f208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun2/3@8 +# AUNIT --inst x6f208400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqshrun2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqshrun2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 +# CONSTRUCT x2f088400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3@2 +# AUNIT --inst x2f088400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:sqshrun Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqshrun(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 +# CONSTRUCT x6f108400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun2/3@4 +# AUNIT --inst x6f108400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:sqshrun2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqshrun2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x7f008400/mask=xff80fc00 +# CONSTRUCT x7f088400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3 +# AUNIT --inst x7f088400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 + +:sqshrun Rd_FPR8, Rd_FPR16, Imm_shr_imm8 +is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100001 & Rd_FPR8 & Rd_FPR16 & Imm_shr_imm8 & Zd +{ + Rd_FPR8 = NEON_sqshrun(Rd_FPR8, Rd_FPR16, Imm_shr_imm8:1); +} + +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x7f008400/mask=xff80fc00 +# CONSTRUCT x7f108400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3 +# AUNIT --inst x7f108400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 + +:sqshrun Rd_FPR16, Rd_FPR32, Imm_shr_imm16 +is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100001 & Rd_FPR16 & Rd_FPR32 & Imm_shr_imm16 & Zd +{ + Rd_FPR16 = NEON_sqshrun(Rd_FPR16, Rd_FPR32, Imm_shr_imm16:1); +} + +# C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x7f008400/mask=xff80fc00 +# CONSTRUCT x7f208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3 +# AUNIT --inst x7f208400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 + +:sqshrun Rd_FPR32, Rd_FPR64, Imm_shr_imm32 +is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100001 & Rd_FPR32 & Rd_FPR64 & Imm_shr_imm32 & Zd +{ + Rd_FPR32 = NEON_sqshrun(Rd_FPR32, Rd_FPR64, Imm_shr_imm32:1); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x5e202c00/mask=xff20fc00 +# CONSTRUCT x5e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2 +# AUNIT --inst x5e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_FPR8, Rn_FPR8, Rm_FPR8 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x5 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_sqsub(Rn_FPR8, Rm_FPR8); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x5e202c00/mask=xff20fc00 +# CONSTRUCT x5ee02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2 +# AUNIT --inst x5ee02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x5 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_sqsub(Rn_FPR64, Rm_FPR64); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x5e202c00/mask=xff20fc00 +# CONSTRUCT x5e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2 +# AUNIT --inst x5e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x5 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_sqsub(Rn_FPR16, Rm_FPR16); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x5e202c00/mask=xff20fc00 +# CONSTRUCT x5ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2 +# AUNIT --inst x5ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x5 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_sqsub(Rn_FPR32, Rm_FPR32); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 +# CONSTRUCT x4e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@1 +# AUNIT --inst x4e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x5 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 +# CONSTRUCT x4ee02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@8 +# AUNIT --inst x4ee02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x5 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sqsub(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 +# CONSTRUCT x0ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@4 +# AUNIT --inst x0ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x5 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 +# CONSTRUCT x0e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@2 +# AUNIT --inst x0e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x5 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 +# CONSTRUCT x4ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@4 +# AUNIT --inst x4ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x5 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 +# CONSTRUCT x0e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@1 +# AUNIT --inst x0e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x5 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 +# CONSTRUCT x4e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@2 +# AUNIT --inst x4e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:sqsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x5 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x5e214800/mask=xff3ffc00 +# CONSTRUCT x5e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2 +# AUNIT --inst x5e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" + +:sqxtn Rd_FPR8, Rn_FPR16 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_FPR16 & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_sqxtn(Rd_FPR8, Rn_FPR16); +} + +# C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x5e214800/mask=xff3ffc00 +# CONSTRUCT x5e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2 +# AUNIT --inst x5e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" + +:sqxtn Rd_FPR16, Rn_FPR32 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_FPR32 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_sqxtn(Rd_FPR16, Rn_FPR32); +} + +# C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x5e214800/mask=xff3ffc00 +# CONSTRUCT x5ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2 +# AUNIT --inst x5ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" + +:sqxtn Rd_FPR32, Rn_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_FPR64 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_sqxtn(Rd_FPR32, Rn_FPR64); +} + +# C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 +# CONSTRUCT x4e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn2/2@2 +# AUNIT --inst x4e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" + +:sqxtn2 Rd_VPR128.16B, Rn_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sqxtn2(Rd_VPR128.16B, Rn_VPR128.8H, 2:1); +} + +# C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 +# CONSTRUCT x4e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn2/2@4 +# AUNIT --inst x4e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" + +:sqxtn2 Rd_VPR128.8H, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sqxtn2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); +} + +# C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 +# CONSTRUCT x4ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn2/2@8 +# AUNIT --inst x4ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" + +:sqxtn2 Rd_VPR128.4S, Rn_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sqxtn2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); +} + +# C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 +# CONSTRUCT x0ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2@8 +# AUNIT --inst x0ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" + +:sqxtn Rd_VPR64.2S, Rn_VPR128.2D +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sqxtn(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); +} + +# C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 +# CONSTRUCT x0e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2@4 +# AUNIT --inst x0e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" + +:sqxtn Rd_VPR64.4H, Rn_VPR128.4S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sqxtn(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); +} + +# C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 +# CONSTRUCT x0e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2@2 +# AUNIT --inst x0e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" + +:sqxtn Rd_VPR64.8B, Rn_VPR128.8H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sqxtn(Rd_VPR64.8B, Rn_VPR128.8H, 2:1); +} + +# C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x7e212800/mask=xff3ffc00 +# CONSTRUCT x7e212800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2 +# AUNIT --inst x7e212800/mask=xfffffc00 --status noqemu --comment "nointsat" +# Scalar variant when size=00 Q=1 bb=1 Ta=FPR16 Tb=FPR8 + +:sqxtun Rd_FPR8, Rn_FPR16 +is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_FPR8 & Rn_FPR16 & Zd +{ + Rd_FPR8 = NEON_sqxtun(Rd_FPR8, Rn_FPR16); +} + +# C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x7e212800/mask=xff3ffc00 +# CONSTRUCT x7e612800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2 +# AUNIT --inst x7e612800/mask=xfffffc00 --status noqemu --comment "nointsat" +# Scalar variant when size=01 Q=1 bb=1 Ta=FPR32 Tb=FPR16 + +:sqxtun Rd_FPR16, Rn_FPR32 +is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_FPR16 & Rn_FPR32 & Zd +{ + Rd_FPR16 = NEON_sqxtun(Rd_FPR16, Rn_FPR32); +} + +# C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x7e212800/mask=xff3ffc00 +# CONSTRUCT x7ea12800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2 +# AUNIT --inst x7ea12800/mask=xfffffc00 --status noqemu --comment "nointsat" +# Scalar variant when size=10 Q=1 bb=1 Ta=FPR64 Tb=FPR32 + +:sqxtun Rd_FPR32, Rn_FPR64 +is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_FPR32 & Rn_FPR64 & Zd +{ + Rd_FPR32 = NEON_sqxtun(Rd_FPR32, Rn_FPR64); +} + +# C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 +# CONSTRUCT x2e212800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2@2 +# AUNIT --inst x2e212800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=00 Q=0 bb=0 Ta=VPR128.8H Tb=VPR64.8B esize=2 + +:sqxtun Rd_VPR64.8B, Rn_VPR128.8H +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_VPR64.8B & Rn_VPR128.8H & Zd +{ + Rd_VPR64.8B = NEON_sqxtun(Rd_VPR64.8B, Rn_VPR128.8H, 2:1); +} + +# C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 +# CONSTRUCT x6e212800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun2/2@2 +# AUNIT --inst x6e212800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=00 Q=1 bb=0 Ta=VPR128.8H Tb=VPR128.16B esize=2 + +:sqxtun2 Rd_VPR128.16B, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_VPR128.16B & Rn_VPR128.8H & Zd +{ + Rd_VPR128.16B = NEON_sqxtun2(Rd_VPR128.16B, Rn_VPR128.8H, 2:1); +} + +# C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 +# CONSTRUCT x2e612800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2@4 +# AUNIT --inst x2e612800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=01 Q=0 bb=0 Ta=VPR128.4S Tb=VPR64.4H esize=4 + +:sqxtun Rd_VPR64.4H, Rn_VPR128.4S +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_VPR64.4H & Rn_VPR128.4S & Zd +{ + Rd_VPR64.4H = NEON_sqxtun(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); +} + +# C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 +# CONSTRUCT x6e612800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun2/2@4 +# AUNIT --inst x6e612800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=01 Q=1 bb=0 Ta=VPR128.4S Tb=VPR128.8H esize=4 + +:sqxtun2 Rd_VPR128.8H, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_VPR128.8H & Rn_VPR128.4S & Zd +{ + Rd_VPR128.8H = NEON_sqxtun2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); +} + +# C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 +# CONSTRUCT x2ea12800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2@8 +# AUNIT --inst x2ea12800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=10 Q=0 bb=0 Ta=VPR128.2D Tb=VPR64.2S esize=8 + +:sqxtun Rd_VPR64.2S, Rn_VPR128.2D +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_VPR64.2S & Rn_VPR128.2D & Zd +{ + Rd_VPR64.2S = NEON_sqxtun(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); +} + +# C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 +# CONSTRUCT x6ea12800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun2/2@8 +# AUNIT --inst x6ea12800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=10 Q=1 bb=0 Ta=VPR128.2D Tb=VPR128.4S esize=8 + +:sqxtun2 Rd_VPR128.4S, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_VPR128.4S & Rn_VPR128.2D & Zd +{ + Rd_VPR128.4S = NEON_sqxtun2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); +} + +# C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 +# CONSTRUCT x4e201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@1 +# AUNIT --inst x4e201400/mask=xffe0fc00 --status nopcodeop + +:srhadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rn_VPR128.16B = NEON_srhadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 +# CONSTRUCT x0ea01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@4 +# AUNIT --inst x0ea01400/mask=xffe0fc00 --status nopcodeop + +:srhadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rn_VPR64.2S = NEON_srhadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 +# CONSTRUCT x0e601400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@2 +# AUNIT --inst x0e601400/mask=xffe0fc00 --status nopcodeop + +:srhadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rn_VPR64.4H = NEON_srhadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 +# CONSTRUCT x4ea01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@4 +# AUNIT --inst x4ea01400/mask=xffe0fc00 --status nopcodeop + +:srhadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rn_VPR128.4S = NEON_srhadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 +# CONSTRUCT x0e201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@1 +# AUNIT --inst x0e201400/mask=xffe0fc00 --status nopcodeop + +:srhadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rn_VPR64.8B = NEON_srhadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 +# CONSTRUCT x4e601400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@2 +# AUNIT --inst x4e601400/mask=xffe0fc00 --status nopcodeop + +:srhadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rn_VPR128.8H = NEON_srhadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 +# CONSTRUCT x6f084400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@1 +# AUNIT --inst x6f084400/mask=xfff8fc00 --status nopcodeop + +:sri Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x8 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sri(Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8:4, 1:1); +} + +# C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 +# CONSTRUCT x6f404400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@8 +# AUNIT --inst x6f404400/mask=xffc0fc00 --status nopcodeop + +:sri Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x8 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sri(Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64:4, 8:1); +} + +# C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 +# CONSTRUCT x2f204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@4 +# AUNIT --inst x2f204400/mask=xffe0fc00 --status nopcodeop + +:sri Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x8 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sri(Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); +} + +# C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 +# CONSTRUCT x2f104400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@2 +# AUNIT --inst x2f104400/mask=xfff0fc00 --status nopcodeop + +:sri Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x8 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sri(Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16:4, 2:1); +} + +# C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 +# CONSTRUCT x6f204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@4 +# AUNIT --inst x6f204400/mask=xffe0fc00 --status nopcodeop + +:sri Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x8 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sri(Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); +} + +# C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 +# CONSTRUCT x2f084400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@1 +# AUNIT --inst x2f084400/mask=xfff8fc00 --status nopcodeop + +:sri Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x8 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sri(Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8:4, 1:1); +} + +# C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 +# CONSTRUCT x6f104400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@2 +# AUNIT --inst x6f104400/mask=xfff0fc00 --status nopcodeop + +:sri Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x8 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sri(Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16:4, 2:1); +} + +# C7.2.311 SRI page C7-2092 line 117324 MATCH x7f004400/mask=xff80fc00 +# CONSTRUCT x7f404400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sri/3 +# AUNIT --inst x7f404400/mask=xffc0fc00 --status nopcodeop + +:sri Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b011111110 & b_22=1 & b_1015=0b010001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd +{ + Rd_FPR64 = NEON_sri(Rd_FPR64, Rn_FPR64, Imm_shr_imm64:1); +} + +# C7.2.312 SRSHL page C7-2095 line 117488 MATCH x5e205400/mask=xff20fc00 +# CONSTRUCT x5ee05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2 +# AUNIT --inst x5ee05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:srshl Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xa & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_srshl(Rn_FPR64, Rm_FPR64); +} + +# C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 +# CONSTRUCT x4e205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@1 +# AUNIT --inst x4e205400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:srshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_srshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 +# CONSTRUCT x4ee05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@8 +# AUNIT --inst x4ee05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:srshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_srshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 +# CONSTRUCT x0ea05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@4 +# AUNIT --inst x0ea05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:srshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_srshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 +# CONSTRUCT x0e605400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@2 +# AUNIT --inst x0e605400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:srshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_srshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 +# CONSTRUCT x4ea05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@4 +# AUNIT --inst x4ea05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:srshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_srshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 +# CONSTRUCT x0e205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@1 +# AUNIT --inst x0e205400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:srshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_srshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 +# CONSTRUCT x4e605400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@2 +# AUNIT --inst x4e605400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:srshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_srshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 +# CONSTRUCT x4f082400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@1 +# AUNIT --inst x4f082400/mask=xfff8fc00 --status nopcodeop --comment "nointround" + +:srshr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_srshr(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); +} + +# C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 +# CONSTRUCT x4f402400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@8 +# AUNIT --inst x4f402400/mask=xffc0fc00 --status nopcodeop --comment "nointround" + +:srshr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x4 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_srshr(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); +} + +# C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 +# CONSTRUCT x0f202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@4 +# AUNIT --inst x0f202400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:srshr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_srshr(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); +} + +# C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 +# CONSTRUCT x0f102400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@2 +# AUNIT --inst x0f102400/mask=xfff0fc00 --status nopcodeop --comment "nointround" + +:srshr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_srshr(Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); +} + +# C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 +# CONSTRUCT x4f202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@4 +# AUNIT --inst x4f202400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:srshr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_srshr(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); +} + +# C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 +# CONSTRUCT x0f082400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@1 +# AUNIT --inst x0f082400/mask=xfff8fc00 --status nopcodeop --comment "nointround" + +:srshr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_srshr(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); +} + +# C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 +# CONSTRUCT x4f102400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@2 +# AUNIT --inst x4f102400/mask=xfff0fc00 --status nopcodeop --comment "nointround" + +:srshr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_srshr(Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); +} + +# C7.2.313 SRSHR page C7-2097 line 117624 MATCH x5f002400/mask=xff80fc00 +# CONSTRUCT x5f402400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2 +# AUNIT --inst x5f402400/mask=xffc0fc00 --status nopcodeop --comment "nointround" + +:srshr Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b010111110 & b_22=1 & b_1015=0b001001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd +{ + Rd_FPR64 = NEON_srshr(Rn_FPR64, Imm_shr_imm64:1); +} + +# C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 +# CONSTRUCT x4f083400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 $s>>@1 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@1 +# AUNIT --inst x4f083400/mask=xfff8fc00 --status fail --comment "nointround" + +:srsra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.16B s>> Imm_shr_imm8:1 on lane size 1 + TMPQ1[0,8] = Rn_VPR128.16B[0,8] s>> Imm_shr_imm8:1; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] s>> Imm_shr_imm8:1; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] s>> Imm_shr_imm8:1; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] s>> Imm_shr_imm8:1; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] s>> Imm_shr_imm8:1; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] s>> Imm_shr_imm8:1; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] s>> Imm_shr_imm8:1; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] s>> Imm_shr_imm8:1; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] s>> Imm_shr_imm8:1; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] s>> Imm_shr_imm8:1; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] s>> Imm_shr_imm8:1; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] s>> Imm_shr_imm8:1; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] s>> Imm_shr_imm8:1; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] s>> Imm_shr_imm8:1; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] s>> Imm_shr_imm8:1; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] s>> Imm_shr_imm8:1; + # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 +# CONSTRUCT x4f403400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 $s>>@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@8 +# AUNIT --inst x4f403400/mask=xffc0fc00 --status fail --comment "nointround" + +:srsra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + # simd infix TMPQ1 = Rn_VPR128.2D s>> tmp1 on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] s>> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] s>> tmp1; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 +# CONSTRUCT x0f203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:4 $s>>@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@4 +# AUNIT --inst x0f203400/mask=xffe0fc00 --status fail --comment "nointround" + +:srsra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix TMPD1 = Rn_VPR64.2S s>> Imm_shr_imm32:4 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] s>> Imm_shr_imm32:4; + TMPD1[32,32] = Rn_VPR64.2S[32,32] s>> Imm_shr_imm32:4; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 +# CONSTRUCT x0f103400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 $s>>@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@2 +# AUNIT --inst x0f103400/mask=xfff0fc00 --status fail --comment "nointround" + +:srsra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix TMPD1 = Rn_VPR64.4H s>> Imm_shr_imm16:2 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] s>> Imm_shr_imm16:2; + TMPD1[16,16] = Rn_VPR64.4H[16,16] s>> Imm_shr_imm16:2; + TMPD1[32,16] = Rn_VPR64.4H[32,16] s>> Imm_shr_imm16:2; + TMPD1[48,16] = Rn_VPR64.4H[48,16] s>> Imm_shr_imm16:2; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 +# CONSTRUCT x4f203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:4 $s>>@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@4 +# AUNIT --inst x4f203400/mask=xffe0fc00 --status fail --comment "nointround" + +:srsra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S s>> Imm_shr_imm32:4 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] s>> Imm_shr_imm32:4; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] s>> Imm_shr_imm32:4; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] s>> Imm_shr_imm32:4; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] s>> Imm_shr_imm32:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 +# CONSTRUCT x0f083400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 $s>>@1 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@1 +# AUNIT --inst x0f083400/mask=xfff8fc00 --status fail --comment "nointround" + +:srsra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix TMPD1 = Rn_VPR64.8B s>> Imm_shr_imm8:1 on lane size 1 + TMPD1[0,8] = Rn_VPR64.8B[0,8] s>> Imm_shr_imm8:1; + TMPD1[8,8] = Rn_VPR64.8B[8,8] s>> Imm_shr_imm8:1; + TMPD1[16,8] = Rn_VPR64.8B[16,8] s>> Imm_shr_imm8:1; + TMPD1[24,8] = Rn_VPR64.8B[24,8] s>> Imm_shr_imm8:1; + TMPD1[32,8] = Rn_VPR64.8B[32,8] s>> Imm_shr_imm8:1; + TMPD1[40,8] = Rn_VPR64.8B[40,8] s>> Imm_shr_imm8:1; + TMPD1[48,8] = Rn_VPR64.8B[48,8] s>> Imm_shr_imm8:1; + TMPD1[56,8] = Rn_VPR64.8B[56,8] s>> Imm_shr_imm8:1; + # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 +# CONSTRUCT x4f103400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 $s>>@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@2 +# AUNIT --inst x4f103400/mask=xfff0fc00 --status fail --comment "nointround" + +:srsra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H s>> Imm_shr_imm16:2 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] s>> Imm_shr_imm16:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] s>> Imm_shr_imm16:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] s>> Imm_shr_imm16:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] s>> Imm_shr_imm16:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] s>> Imm_shr_imm16:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] s>> Imm_shr_imm16:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] s>> Imm_shr_imm16:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] s>> Imm_shr_imm16:2; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.314 SRSRA page C7-2099 line 117760 MATCH x5f003400/mask=xff80fc00 +# CONSTRUCT x5f403400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 s>> &=+ +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3 +# AUNIT --inst x5f403400/mask=xffc0fc00 --status fail --comment "nointround" + +:srsra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b010111110 & b_22=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + local tmp2:8 = Rn_FPR64 s>> tmp1; + Rd_FPR64 = Rd_FPR64 + tmp2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.315 SSHL page C7-2101 line 117896 MATCH x5e204400/mask=xff20fc00 +# CONSTRUCT x5ee04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2 +# AUNIT --inst x5ee04400/mask=xffe0fc00 --status nopcodeop + +:sshl Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x8 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_sshl(Rn_FPR64, Rm_FPR64); +} + +# C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 +# CONSTRUCT x4e204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@1 +# AUNIT --inst x4e204400/mask=xffe0fc00 --status nopcodeop + +:sshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x8 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_sshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 +# CONSTRUCT x4ee04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@8 +# AUNIT --inst x4ee04400/mask=xffe0fc00 --status nopcodeop + +:sshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x8 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_sshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 +# CONSTRUCT x0ea04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@4 +# AUNIT --inst x0ea04400/mask=xffe0fc00 --status nopcodeop + +:sshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x8 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_sshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 +# CONSTRUCT x0e604400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@2 +# AUNIT --inst x0e604400/mask=xffe0fc00 --status nopcodeop + +:sshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x8 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_sshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 +# CONSTRUCT x4ea04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@4 +# AUNIT --inst x4ea04400/mask=xffe0fc00 --status nopcodeop + +:sshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x8 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_sshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 +# CONSTRUCT x0e204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@1 +# AUNIT --inst x0e204400/mask=xffe0fc00 --status nopcodeop + +:sshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x8 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_sshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 +# CONSTRUCT x4e604400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@2 +# AUNIT --inst x4e604400/mask=xffe0fc00 --status nopcodeop + +:sshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x8 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_sshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# CONSTRUCT x4f08a400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3 =var:2 =$<<@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll2/2@1 +# AUNIT --inst x4f08a400/mask=xfff8fc00 --status pass --comment "ext" + +:sshll2 Rd_VPR128.8H, Rn_VPR128.16B, Imm_uimm3 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + local tmp3:2 = Imm_uimm3; + # simd infix Rd_VPR128.8H = TMPQ2 << tmp3 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] << tmp3; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] << tmp3; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] << tmp3; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] << tmp3; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] << tmp3; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] << tmp3; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] << tmp3; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# CONSTRUCT x0f20a400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 =var:8 =$<<@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll/2@4 +# AUNIT --inst x0f20a400/mask=xffe0fc00 --status pass --comment "ext" + +:sshll Rd_VPR128.2D, Rn_VPR64.2S, Imm_uimm5 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + local tmp2:8 = Imm_uimm5; + # simd infix Rd_VPR128.2D = TMPQ1 << tmp2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] << tmp2; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# CONSTRUCT x0f10a400/mask=xfff0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 =var:4 =$<<@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll/2@2 +# AUNIT --inst x0f10a400/mask=xfff0fc00 --status pass --comment "ext" + +:sshll Rd_VPR128.4S, Rn_VPR64.4H, Imm_uimm4 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + local tmp2:4 = Imm_uimm4; + # simd infix Rd_VPR128.4S = TMPQ1 << tmp2 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] << tmp2; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] << tmp2; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] << tmp2; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# CONSTRUCT x4f20a400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 =var:8 =$<<@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll2/2@4 +# AUNIT --inst x4f20a400/mask=xffe0fc00 --status pass --comment "ext" + +:sshll2 Rd_VPR128.2D, Rn_VPR128.4S, Imm_uimm5 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + local tmp3:8 = Imm_uimm5; + # simd infix Rd_VPR128.2D = TMPQ2 << tmp3 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] << tmp3; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# CONSTRUCT x0f08a400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@1:16 ARG3 =var:2 =$<<@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll/2@1 +# AUNIT --inst x0f08a400/mask=xfff8fc00 --status pass --comment "ext" + +:sshll Rd_VPR128.8H, Rn_VPR64.8B, Imm_uimm3 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); + local tmp2:2 = Imm_uimm3; + # simd infix Rd_VPR128.8H = TMPQ1 << tmp2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16] << tmp2; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] << tmp2; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] << tmp2; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] << tmp2; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] << tmp2; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] << tmp2; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] << tmp2; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# CONSTRUCT x4f10a400/mask=xfff0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 =var:4 =$<<@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll2/2@2 +# AUNIT --inst x4f10a400/mask=xfff0fc00 --status pass --comment "ext" + +:sshll2 Rd_VPR128.4S, Rn_VPR128.8H, Imm_uimm4 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + local tmp3:4 = Imm_uimm4; + # simd infix Rd_VPR128.4S = TMPQ2 << tmp3 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] << tmp3; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] << tmp3; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] << tmp3; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.317 SSHR page C7-2106 line 118183 MATCH x5f000400/mask=xff80fc00 +# CONSTRUCT x5f400400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2 +# AUNIT --inst x5f400400/mask=xffc0fc00 --status nopcodeop + +:sshr Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_sshr(Rn_FPR64, Imm_shr_imm64:1); +} + +# C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 +# CONSTRUCT x4f080400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 =$s>>@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@1 +# AUNIT --inst x4f080400/mask=xfff8fc00 --status pass + +:sshr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix Rd_VPR128.16B = Rn_VPR128.16B s>> Imm_shr_imm8:1 on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] s>> Imm_shr_imm8:1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 +# CONSTRUCT x4f400400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 =$s>>@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@8 +# AUNIT --inst x4f400400/mask=xffc0fc00 --status pass + +:sshr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + # simd infix Rd_VPR128.2D = Rn_VPR128.2D s>> tmp1 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] s>> tmp1; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] s>> tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 +# CONSTRUCT x0f200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 =$s>>@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@4 +# AUNIT --inst x0f200400/mask=xffe0fc00 --status pass + +:sshr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + local tmp1:4 = Imm_shr_imm32; + # simd infix Rd_VPR64.2S = Rn_VPR64.2S s>> tmp1 on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] s>> tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] s>> tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 +# CONSTRUCT x0f100400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 =$s>>@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@2 +# AUNIT --inst x0f100400/mask=xfff0fc00 --status pass + +:sshr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rn_VPR64.4H s>> Imm_shr_imm16:2 on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] s>> Imm_shr_imm16:2; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] s>> Imm_shr_imm16:2; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] s>> Imm_shr_imm16:2; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] s>> Imm_shr_imm16:2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 +# CONSTRUCT x4f200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 =$s>>@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@4 +# AUNIT --inst x4f200400/mask=xffe0fc00 --status pass + +:sshr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + local tmp1:4 = Imm_shr_imm32; + # simd infix Rd_VPR128.4S = Rn_VPR128.4S s>> tmp1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] s>> tmp1; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] s>> tmp1; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] s>> tmp1; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] s>> tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 +# CONSTRUCT x0f080400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 =$s>>@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@1 +# AUNIT --inst x0f080400/mask=xfff8fc00 --status pass + +:sshr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix Rd_VPR64.8B = Rn_VPR64.8B s>> Imm_shr_imm8:1 on lane size 1 + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] s>> Imm_shr_imm8:1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 +# CONSTRUCT x4f100400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 =$s>>@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@2 +# AUNIT --inst x4f100400/mask=xfff0fc00 --status pass + +:sshr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rn_VPR128.8H s>> Imm_shr_imm16:2 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] s>> Imm_shr_imm16:2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 +# CONSTRUCT x4f081400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 $s>>@1 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@1 +# AUNIT --inst x4f081400/mask=xfff8fc00 --status pass + +:ssra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.16B s>> Imm_shr_imm8:1 on lane size 1 + TMPQ1[0,8] = Rn_VPR128.16B[0,8] s>> Imm_shr_imm8:1; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] s>> Imm_shr_imm8:1; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] s>> Imm_shr_imm8:1; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] s>> Imm_shr_imm8:1; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] s>> Imm_shr_imm8:1; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] s>> Imm_shr_imm8:1; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] s>> Imm_shr_imm8:1; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] s>> Imm_shr_imm8:1; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] s>> Imm_shr_imm8:1; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] s>> Imm_shr_imm8:1; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] s>> Imm_shr_imm8:1; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] s>> Imm_shr_imm8:1; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] s>> Imm_shr_imm8:1; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] s>> Imm_shr_imm8:1; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] s>> Imm_shr_imm8:1; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] s>> Imm_shr_imm8:1; + # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 +# CONSTRUCT x4f401400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 $s>>@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@8 +# AUNIT --inst x4f401400/mask=xffc0fc00 --status pass + +:ssra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x2 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + # simd infix TMPQ1 = Rn_VPR128.2D s>> tmp1 on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] s>> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] s>> tmp1; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 +# CONSTRUCT x0f201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 $s>>@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@4 +# AUNIT --inst x0f201400/mask=xffe0fc00 --status pass + +:ssra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + local tmp1:4 = Imm_shr_imm32; + # simd infix TMPD1 = Rn_VPR64.2S s>> tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] s>> tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] s>> tmp1; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 +# CONSTRUCT x0f101400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 $s>>@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@2 +# AUNIT --inst x0f101400/mask=xfff0fc00 --status pass + +:ssra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix TMPD1 = Rn_VPR64.4H s>> Imm_shr_imm16:2 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] s>> Imm_shr_imm16:2; + TMPD1[16,16] = Rn_VPR64.4H[16,16] s>> Imm_shr_imm16:2; + TMPD1[32,16] = Rn_VPR64.4H[32,16] s>> Imm_shr_imm16:2; + TMPD1[48,16] = Rn_VPR64.4H[48,16] s>> Imm_shr_imm16:2; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 +# CONSTRUCT x4f201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 $s>>@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@4 +# AUNIT --inst x4f201400/mask=xffe0fc00 --status pass + +:ssra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + local tmp1:4 = Imm_shr_imm32; + # simd infix TMPQ1 = Rn_VPR128.4S s>> tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] s>> tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] s>> tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] s>> tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] s>> tmp1; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 +# CONSTRUCT x0f081400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 $s>>@1 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@1 +# AUNIT --inst x0f081400/mask=xfff8fc00 --status pass + +:ssra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix TMPD1 = Rn_VPR64.8B s>> Imm_shr_imm8:1 on lane size 1 + TMPD1[0,8] = Rn_VPR64.8B[0,8] s>> Imm_shr_imm8:1; + TMPD1[8,8] = Rn_VPR64.8B[8,8] s>> Imm_shr_imm8:1; + TMPD1[16,8] = Rn_VPR64.8B[16,8] s>> Imm_shr_imm8:1; + TMPD1[24,8] = Rn_VPR64.8B[24,8] s>> Imm_shr_imm8:1; + TMPD1[32,8] = Rn_VPR64.8B[32,8] s>> Imm_shr_imm8:1; + TMPD1[40,8] = Rn_VPR64.8B[40,8] s>> Imm_shr_imm8:1; + TMPD1[48,8] = Rn_VPR64.8B[48,8] s>> Imm_shr_imm8:1; + TMPD1[56,8] = Rn_VPR64.8B[56,8] s>> Imm_shr_imm8:1; + # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 +# CONSTRUCT x4f101400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 $s>>@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@2 +# AUNIT --inst x4f101400/mask=xfff0fc00 --status pass + +:ssra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H s>> Imm_shr_imm16:2 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] s>> Imm_shr_imm16:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] s>> Imm_shr_imm16:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] s>> Imm_shr_imm16:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] s>> Imm_shr_imm16:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] s>> Imm_shr_imm16:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] s>> Imm_shr_imm16:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] s>> Imm_shr_imm16:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] s>> Imm_shr_imm16:2; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.318 SSRA page C7-2109 line 118340 MATCH x5f001400/mask=xff80fc00 +# CONSTRUCT x5f401400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 s>> &=+ +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3 +# AUNIT --inst x5f401400/mask=xffc0fc00 --status pass + +:ssra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b010111110 & b_22=1 & b_1015=0b000101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + local tmp2:8 = Rn_FPR64 s>> tmp1; + Rd_FPR64 = Rd_FPR64 + tmp2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 +# CONSTRUCT x4ea02000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 =$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl2/2@4 +# AUNIT --inst x4ea02000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x2 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); + # simd infix Rd_VPR128.2D = TMPQ2 - TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 +# CONSTRUCT x4e602000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 =$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl2/2@2 +# AUNIT --inst x4e602000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x2 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); + # simd infix Rd_VPR128.4S = TMPQ2 - TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 +# CONSTRUCT x4e202000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 =$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl2/2@1 +# AUNIT --inst x4e202000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x2 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); + # simd infix Rd_VPR128.8H = TMPQ2 - TMPQ4 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 +# CONSTRUCT x0ea02000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 =$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl/2@4 +# AUNIT --inst x0ea02000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x2 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix Rd_VPR128.2D = TMPQ1 - TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 +# CONSTRUCT x0e602000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 =$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl/2@2 +# AUNIT --inst x0e602000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x2 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix Rd_VPR128.4S = TMPQ1 - TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 +# CONSTRUCT x0e202000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 =$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl/2@1 +# AUNIT --inst x0e202000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x2 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR128.8H = TMPQ1 - TMPQ2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 +# CONSTRUCT x4ea03000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $sext@4:16 =$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw2/2@4 +# AUNIT --inst x4ea03000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Rm_VPR128 & Zd +{ + TMPD1 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 +# CONSTRUCT x4e603000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $sext@2:16 =$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw2/2@2 +# AUNIT --inst x4e603000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPD1 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 +# CONSTRUCT x4e203000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $sext@1:16 =$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw2/2@1 +# AUNIT --inst x4e203000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPD1 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ2 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 +# CONSTRUCT x0ea03000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $sext@4:16 =$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw/2@4 +# AUNIT --inst x0ea03000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = sext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rm_VPR64.2S[32,32]); + # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 +# CONSTRUCT x0e603000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $sext@2:16 =$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw/2@2 +# AUNIT --inst x0e603000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = sext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rm_VPR64.4H[48,16]); + # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 +# CONSTRUCT x0e203000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $sext@1:16 =$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw/2@1 +# AUNIT --inst x0e203000/mask=xffe0fc00 --status pass --comment "ext" + +:ssubw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = sext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.329 STNP (SIMD&FP) page C7-2145 line 120535 MATCH x2c000000/mask=x3fc00000 +# CONSTRUCT x2c000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =store pop ARG2 ARG3 4 +:8 =store pop +# SMACRO(pseudo) null ARG1 ARG3 =NEON_stnp1/2 null ARG2 ARG3 =NEON_stnp2/2 +# AUNIT --inst x2c000000/mask=xffc00000 --status nomem + +:stnp Rt_FPR32, Rt2_FPR32, addrPairIndexed +is b_3031=0b00 & b_2229=0b10110000 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 +{ + * addrPairIndexed = Rt_FPR32; + local tmp1:8 = addrPairIndexed + 4; + * tmp1 = Rt2_FPR32; +} + +# C7.2.329 STNP (SIMD&FP) page C7-2145 line 120535 MATCH x2c000000/mask=x3fc00000 +# CONSTRUCT x6c000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =store pop ARG2 ARG3 8 +:8 =store pop +# SMACRO(pseudo) null ARG1 ARG3 =NEON_stnp1/2 null ARG2 ARG3 =NEON_stnp2/2 +# AUNIT --inst x6c000000/mask=xffc00000 --status nomem + +:stnp Rt_FPR64, Rt2_FPR64, addrPairIndexed +is b_3031=0b01 & b_2229=0b10110000 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 +{ + * addrPairIndexed = Rt_FPR64; + local tmp1:8 = addrPairIndexed + 8; + * tmp1 = Rt2_FPR64; +} + +# C7.2.329 STNP (SIMD&FP) page C7-2145 line 120535 MATCH x2c000000/mask=x3fc00000 +# CONSTRUCT xac000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =store pop ARG2 ARG3 16 +:8 =store pop +# SMACRO(pseudo) null ARG1 ARG3 =NEON_stnp1/2 null ARG2 ARG3 =NEON_stnp2/2 +# AUNIT --inst xac000000/mask=xffc00000 --status nomem + +:stnp Rt_FPR128, Rt2_FPR128, addrPairIndexed +is b_3031=0b10 & b_2229=0b10110000 & Rt2_FPR64 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 +{ + * addrPairIndexed = Rt_FPR128; + local tmp1:8 = addrPairIndexed + 16; + * tmp1 = Rt2_FPR128; +} + +# C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2c800000/mask=x3fc00000 +# C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2d800000/mask=x3fc00000 +# C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2d000000/mask=x3fc00000 +# C7.2.329 STNP (SIMD&FP) page C7-2145 line 120535 MATCH x2c000000/mask=x3fc00000 +# CONSTRUCT xac000000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =store pop ARG2 ARG3 16 +:8 =store pop +# SMACRO(pseudo) null ARG1 ARG3 =NEON_stp1/2 null ARG2 ARG3 =NEON_stp2/2 +# AUNIT --inst xac000000/mask=xfe400000 --status nomem +# 128-bit variant (post-index, pre-index, and signed offset) + +:stp Rt_FPR128, Rt2_FPR128, addrPairIndexed +is b_3031=0b10 & b_2529=0b10110 & b_22=0 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 +{ + * addrPairIndexed = Rt_FPR128; + local tmp1:8 = addrPairIndexed + 16; + * tmp1 = Rt2_FPR128; +} + +# C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2c800000/mask=x3fc00000 +# C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2d800000/mask=x3fc00000 +# C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2d000000/mask=x3fc00000 +# C7.2.329 STNP (SIMD&FP) page C7-2145 line 120535 MATCH x2c000000/mask=x3fc00000 +# CONSTRUCT x2c000000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =store pop ARG2 ARG3 4 +:8 =store pop +# SMACRO(pseudo) null ARG1 ARG3 =NEON_stp1/2 null ARG2 ARG3 =NEON_stp2/2 +# AUNIT --inst x2c000000/mask=xfe400000 --status nomem +# 32-bit variant (post-index, pre-index, and signed offset) + +:stp Rt_FPR32, Rt2_FPR32, addrPairIndexed +is b_3031=0b00 & b_2529=0b10110 & b_22=0 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 +{ + * addrPairIndexed = Rt_FPR32; + local tmp1:8 = addrPairIndexed + 4; + * tmp1 = Rt2_FPR32; +} + +# C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2c800000/mask=x3fc00000 +# C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2d800000/mask=x3fc00000 +# C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2d000000/mask=x3fc00000 +# C7.2.329 STNP (SIMD&FP) page C7-2145 line 120535 MATCH x2c000000/mask=x3fc00000 +# CONSTRUCT x6c000000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES +# SMACRO ARG1 ARG3 =store pop ARG2 ARG3 8 +:8 =store pop +# SMACRO(pseudo) null ARG1 ARG3 =NEON_stp1/2 null ARG2 ARG3 =NEON_stp2/2 +# AUNIT --inst x6c000000/mask=xfe400000 --status nomem +# 64-bit variant (post-index, pre-index, and signed offset) + +:stp Rt_FPR64, Rt2_FPR64, addrPairIndexed +is b_3031=0b01 & b_2529=0b10110 & b_22=0 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 +{ + * addrPairIndexed = Rt_FPR64; + local tmp1:8 = addrPairIndexed + 8; + * tmp1 = Rt2_FPR64; +} + +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000400/mask=x3f600c00 +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000c00/mask=x3f600c00 +# CONSTRUCT x3c000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 +# AUNIT --inst x3c000400/mask=xffe00400 --status nomem +# Post- and Pre-offset 8-bit variant when size == 00 && opc == 00 F=FPR8 + +:str Rt_FPR8, addrIndexed +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR8 & addrIndexed & Zt +{ + * addrIndexed = Rt_FPR8; +} + +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000400/mask=x3f600c00 +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000c00/mask=x3f600c00 +# CONSTRUCT x7c000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 +# AUNIT --inst x7c000400/mask=xffe00400 --status nomem +# Post- and Pre-offset 16-bit variant when size == 01 && opc == 00 F=FPR16 + +:str Rt_FPR16, addrIndexed +is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR16 & addrIndexed & Zt +{ + * addrIndexed = Rt_FPR16; +} + +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000400/mask=x3f600c00 +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000c00/mask=x3f600c00 +# CONSTRUCT xbc000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 +# AUNIT --inst xbc000400/mask=xffe00400 --status nomem +# Post- and Pre-offset 32-bit variant when size == 10 && opc == 00 F=FPR32 + +:str Rt_FPR32, addrIndexed +is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR32 & addrIndexed & Zt +{ + * addrIndexed = Rt_FPR32; +} + +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000400/mask=x3f600c00 +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000c00/mask=x3f600c00 +# CONSTRUCT xfc000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 +# AUNIT --inst xfc000400/mask=xffe00400 --status nomem +# Post- and Pre-offset 64-bit variant when size == 11 && opc == 00 F=FPR64 + +:str Rt_FPR64, addrIndexed +is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR64 & addrIndexed & Zt +{ + * addrIndexed = Rt_FPR64; +} + +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000400/mask=x3f600c00 +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000c00/mask=x3f600c00 +# CONSTRUCT x3c800400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 +# AUNIT --inst x3c800400/mask=xffe00400 --status nomem +# Post- and Pre-offset 128-bit variant when size == 00 && opc == 10 F=FPR128 + +:str Rt_FPR128, addrIndexed +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=0 & b_10=1 & Rt_FPR128 & addrIndexed & Zt +{ + * addrIndexed = Rt_FPR128; +} + +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3d000000/mask=x3f400000 +# CONSTRUCT x3d000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 +# AUNIT --inst x3d000000/mask=xffc00000 --status nomem +# Unsigned offset 8-bit variant when size == 00 && opc == 00 F=FPR8 + +:str Rt_FPR8, addrUIMM +is b_3031=0b00 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR8 & addrUIMM & Zt +{ + * addrUIMM = Rt_FPR8; +} + +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3d000000/mask=x3f400000 +# CONSTRUCT x7d000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 +# AUNIT --inst x7d000000/mask=xffc00000 --status nomem +# Unsigned offset 16-bit variant when size == 01 && opc == 00 F=FPR16 + +:str Rt_FPR16, addrUIMM +is b_3031=0b01 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR16 & addrUIMM & Zt +{ + * addrUIMM = Rt_FPR16; +} + +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3d000000/mask=x3f400000 +# CONSTRUCT xbd000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 +# AUNIT --inst xbd000000/mask=xffc00000 --status nomem +# Unsigned offset 32-bit variant when size == 10 && opc == 00 F=FPR32 + +:str Rt_FPR32, addrUIMM +is b_3031=0b10 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR32 & addrUIMM & Zt +{ + * addrUIMM = Rt_FPR32; +} + +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3d000000/mask=x3f400000 +# CONSTRUCT xfd000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 +# AUNIT --inst xfd000000/mask=xffc00000 --status nomem +# Unsigned offset 64-bit variant when size == 11 && opc == 00 F=FPR64 + +:str Rt_FPR64, addrUIMM +is b_3031=0b11 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR64 & addrUIMM & Zt +{ + * addrUIMM = Rt_FPR64; +} + +# C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3d000000/mask=x3f400000 +# CONSTRUCT x3d800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 +# AUNIT --inst x3d800000/mask=xffc00000 --status nomem +# Unsigned offset 128-bit variant when size == 00 && opc == 10 F=FPR128 + +:str Rt_FPR128, addrUIMM +is b_3031=0b00 & b_2429=0b111101 & b_2223=0b10 & Rt_FPR128 & addrUIMM & Zt +{ + * addrUIMM = Rt_FPR128; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT x3c200800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst x3c200800/mask=xffe02c00 --status nomem +# 8-fsreg,STR-8-fsreg variant when size == 00 && opc == 00 && option is not 011 bb=b_13 option=0 F=FPR8 G=GPR32 + +:str Rt_FPR8, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR8; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT x3c202800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst x3c202800/mask=xffe02c00 --status nomem +# 8-fsreg,STR-8-fsreg variant when size == 00 && opc == 00 && option is not 011 bb=b_13 option=1 F=FPR8 G=GPR64 + +:str Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR8; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT x3c206800/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst x3c206800/mask=xffe0ec00 --status nomem +# 8-fsreg,STR-8-fsreg variant when size == 00 && opc == 00 && option is 011 bb=b_1315 option=0b011 F=FPR8 G=GPR64 + +:str Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_1315=0b011 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR8; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT x7c200800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst x7c200800/mask=xffe02c00 --status nomem +# 16-fsreg,STR-16-fsreg variant when size == 01 && opc == 00 bb=b_13 option=0 F=FPR16 G=GPR32 + +:str Rt_FPR16, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] +is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR16; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT x7c202800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst x7c202800/mask=xffe02c00 --status nomem +# 16-fsreg,STR-16-fsreg variant when size == 01 && opc == 00 bb=b_13 option=1 F=FPR16 G=GPR64 + +:str Rt_FPR16, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR16; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT xbc200800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst xbc200800/mask=xffe02c00 --status nomem +# 32-fsreg,STR-32-fsreg variant when size == 10 && opc == 00 bb=b_13 option=0 F=FPR32 G=GPR32 + +:str Rt_FPR32, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] +is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR32; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT xbc202800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst xbc202800/mask=xffe02c00 --status nomem +# 32-fsreg,STR-32-fsreg variant when size == 10 && opc == 00 bb=b_13 option=1 F=FPR32 G=GPR64 + +:str Rt_FPR32, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR32; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT xfc200800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst xfc200800/mask=xffe02c00 --status nomem +# 64-fsreg,STR-64-fsreg variant when size == 11 && opc == 00 bb=b_13 option=0 F=FPR64 G=GPR32 + +:str Rt_FPR64, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] +is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR64; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT xfc202800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst xfc202800/mask=xffe02c00 --status nomem +# 64-fsreg,STR-64-fsreg variant when size == 11 && opc == 00 bb=b_13 option=1 F=FPR64 G=GPR64 + +:str Rt_FPR64, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR64; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT x3ca00800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst x3ca00800/mask=xffe02c00 --status nomem +# 128-fsreg,STR-128-fsreg variant when size == 00 && opc == 10 bb=b_13 option=0 F=FPR128 G=GPR32 + +:str Rt_FPR128, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR128; +} + +# C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 +# CONSTRUCT x3ca02800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop +# SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 +# AUNIT --inst x3ca02800/mask=xffe02c00 --status nomem +# 128-fsreg,STR-128-fsreg variant when size == 00 && opc == 10 bb=b_13 option=1 F=FPR128 G=GPR64 + +:str Rt_FPR128, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] +is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd +{ + local tmp1:8 = extend_spec << extend_amount; + local tmp2:8 = Rn_GPR64xsp + tmp1; + * tmp2 = Rt_FPR128; +} + +# C7.2.333 STUR (SIMD&FP) page C7-2157 line 121306 MATCH x3c000000/mask=x3f600c00 +# CONSTRUCT x3c800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_stur/2 +# AUNIT --inst x3c800000/mask=xffe00c00 --status nomem + +:stur Rt_FPR128, addrIndexed +is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=1 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR128 +{ + * addrIndexed = Rt_FPR128; +} + +# C7.2.333 STUR (SIMD&FP) page C7-2157 line 121306 MATCH x3c000000/mask=x3f600c00 +# CONSTRUCT x7c000000/mask=xffc00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_stur/2 +# AUNIT --inst x7c000000/mask=xffc00c00 --status nomem + +:stur Rt_FPR16, addrIndexed +is size.ldstr=1 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_1011=0 & addrIndexed & Rt_FPR16 +{ + * addrIndexed = Rt_FPR16; +} + +# C7.2.333 STUR (SIMD&FP) page C7-2157 line 121306 MATCH x3c000000/mask=x3f600c00 +# CONSTRUCT xbc000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_stur/2 +# AUNIT --inst xbc000000/mask=xffe00c00 --status nomem + +:stur Rt_FPR32, addrIndexed +is size.ldstr=2 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR32 +{ + * addrIndexed = Rt_FPR32; +} + +# C7.2.333 STUR (SIMD&FP) page C7-2157 line 121306 MATCH x3c000000/mask=x3f600c00 +# CONSTRUCT xfc000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_stur/2 +# AUNIT --inst xfc000000/mask=xffe00c00 --status nomem + +:stur Rt_FPR64, addrIndexed +is size.ldstr=3 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR64 +{ + * addrIndexed = Rt_FPR64; +} + +# C7.2.333 STUR (SIMD&FP) page C7-2157 line 121306 MATCH x3c000000/mask=x3f600c00 +# CONSTRUCT x3c000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =store pop +# SMACRO(pseudo) null ARG1 ARG2 =NEON_stur/2 +# AUNIT --inst x3c000000/mask=xffe00c00 --status nomem + +:stur Rt_FPR8, addrIndexed +is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR8 +{ + * addrIndexed = Rt_FPR8; +} + +# C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x7e208400/mask=xff20fc00 +# CONSTRUCT x7ee08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =- +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2 +# AUNIT --inst x7ee08400/mask=xffe0fc00 --status pass + +:sub Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x10 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = Rn_FPR64 - Rm_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 +# CONSTRUCT x6e208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$-@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@1 +# AUNIT --inst x6e208400/mask=xffe0fc00 --status pass + +:sub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x10 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix Rd_VPR128.16B = Rn_VPR128.16B - Rm_VPR128.16B on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] - Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] - Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] - Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] - Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] - Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] - Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] - Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] - Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] - Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] - Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] - Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] - Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] - Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] - Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] - Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] - Rm_VPR128.16B[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 +# CONSTRUCT x6ee08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@8 +# AUNIT --inst x6ee08400/mask=xffe0fc00 --status pass + +:sub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd infix Rd_VPR128.2D = Rn_VPR128.2D - Rm_VPR128.2D on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - Rm_VPR128.2D[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 +# CONSTRUCT x2ea08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@4 +# AUNIT --inst x2ea08400/mask=xffe0fc00 --status pass + +:sub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x10 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix Rd_VPR64.2S = Rn_VPR64.2S - Rm_VPR64.2S on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] - Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] - Rm_VPR64.2S[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 +# CONSTRUCT x2e608400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@2 +# AUNIT --inst x2e608400/mask=xffe0fc00 --status pass + +:sub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x10 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rn_VPR64.4H - Rm_VPR64.4H on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] - Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] - Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] - Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] - Rm_VPR64.4H[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 +# CONSTRUCT x6ea08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@4 +# AUNIT --inst x6ea08400/mask=xffe0fc00 --status pass + +:sub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd infix Rd_VPR128.4S = Rn_VPR128.4S - Rm_VPR128.4S on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - Rm_VPR128.4S[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 +# CONSTRUCT x2e208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$-@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@1 +# AUNIT --inst x2e208400/mask=xffe0fc00 --status pass + +:sub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x10 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix Rd_VPR64.8B = Rn_VPR64.8B - Rm_VPR64.8B on lane size 1 + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] - Rm_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] - Rm_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] - Rm_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] - Rm_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] - Rm_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] - Rm_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] - Rm_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] - Rm_VPR64.8B[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 +# CONSTRUCT x6e608400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@2 +# AUNIT --inst x6e608400/mask=xffe0fc00 --status pass + +:sub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rn_VPR128.8H - Rm_VPR128.8H on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - Rm_VPR128.8H[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 +# CONSTRUCT x4e206000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $-@2 &=$shuffle@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@2 +# AUNIT --inst x4e206000/mask=xffe0fc00 --status pass + +:subhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H - Rm_VPR128.8H on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] - Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] - Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] - Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] - Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] - Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] - Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] - Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] - Rm_VPR128.8H[112,16]; + # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 + Rd_VPR128.16B[64,8] = TMPQ1[8,8]; + Rd_VPR128.16B[72,8] = TMPQ1[24,8]; + Rd_VPR128.16B[80,8] = TMPQ1[40,8]; + Rd_VPR128.16B[88,8] = TMPQ1[56,8]; + Rd_VPR128.16B[96,8] = TMPQ1[72,8]; + Rd_VPR128.16B[104,8] = TMPQ1[88,8]; + Rd_VPR128.16B[112,8] = TMPQ1[104,8]; + Rd_VPR128.16B[120,8] = TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 +# CONSTRUCT x4ea06000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $-@8 &=$shuffle@1-2@3-3:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@8 +# AUNIT --inst x4ea06000/mask=xffe0fc00 --status pass + +:subhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.2D - Rm_VPR128.2D on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] - Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] - Rm_VPR128.2D[64,64]; + # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-2@3-3) lane size 4 + Rd_VPR128.4S[64,32] = TMPQ1[32,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 +# CONSTRUCT x4e606000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $-@4 &=$shuffle@1-4@3-5@5-6@7-7:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_subhn2/3@4 +# AUNIT --inst x4e606000/mask=xffe0fc00 --status pass + +:subhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S - Rm_VPR128.4S on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] - Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] - Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] - Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] - Rm_VPR128.4S[96,32]; + # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-4@3-5@5-6@7-7) lane size 2 + Rd_VPR128.8H[64,16] = TMPQ1[16,16]; + Rd_VPR128.8H[80,16] = TMPQ1[48,16]; + Rd_VPR128.8H[96,16] = TMPQ1[80,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 +# CONSTRUCT x0ea06000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $-@8 &=$shuffle@1-0@3-1:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_subhn/3@8 +# AUNIT --inst x0ea06000/mask=xffe0fc00 --status pass + +:subhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.2D - Rm_VPR128.2D on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] - Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] - Rm_VPR128.2D[64,64]; + # simd shuffle Rd_VPR64.2S = TMPQ1 (@1-0@3-1) lane size 4 + Rd_VPR64.2S[0,32] = TMPQ1[32,32]; + Rd_VPR64.2S[32,32] = TMPQ1[96,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 +# CONSTRUCT x0e606000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $-@4 &=$shuffle@1-0@3-1@5-2@7-3:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_subhn/3@4 +# AUNIT --inst x0e606000/mask=xffe0fc00 --status pass + +:subhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.4S - Rm_VPR128.4S on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] - Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] - Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] - Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] - Rm_VPR128.4S[96,32]; + # simd shuffle Rd_VPR64.4H = TMPQ1 (@1-0@3-1@5-2@7-3) lane size 2 + Rd_VPR64.4H[0,16] = TMPQ1[16,16]; + Rd_VPR64.4H[16,16] = TMPQ1[48,16]; + Rd_VPR64.4H[32,16] = TMPQ1[80,16]; + Rd_VPR64.4H[48,16] = TMPQ1[112,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 +# CONSTRUCT x0e206000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $-@2 &=$shuffle@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_subhn/3@2 +# AUNIT --inst x0e206000/mask=xffe0fc00 --status pass + +:subhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H - Rm_VPR128.8H on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] - Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] - Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] - Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] - Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] - Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] - Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] - Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] - Rm_VPR128.8H[112,16]; + # simd shuffle Rd_VPR64.8B = TMPQ1 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 1 + Rd_VPR64.8B[0,8] = TMPQ1[8,8]; + Rd_VPR64.8B[8,8] = TMPQ1[24,8]; + Rd_VPR64.8B[16,8] = TMPQ1[40,8]; + Rd_VPR64.8B[24,8] = TMPQ1[56,8]; + Rd_VPR64.8B[32,8] = TMPQ1[72,8]; + Rd_VPR64.8B[40,8] = TMPQ1[88,8]; + Rd_VPR64.8B[48,8] = TMPQ1[104,8]; + Rd_VPR64.8B[56,8] = TMPQ1[120,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x5e203800/mask=xff3ffc00 +# CONSTRUCT x5e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=+ +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2 +# AUNIT --inst x5e203800/mask=xfffffc00 --status fail --comment "nointsat" +# Scalar variant when size=00 Q=1 bb=1 V=FPR8 s2=2 + +:suqadd Rd_FPR8, Rn_FPR8 +is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_FPR8 & Rn_FPR8 & Zd +{ + Rd_FPR8 = Rd_FPR8 + Rn_FPR8; + zext_zb(Zd); # zero upper 31 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x5e203800/mask=xff3ffc00 +# CONSTRUCT x5e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=+ +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2 +# AUNIT --inst x5e603800/mask=xfffffc00 --status fail --comment "nointsat" +# Scalar variant when size=01 Q=1 bb=1 V=FPR16 s2=4 + +:suqadd Rd_FPR16, Rn_FPR16 +is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = Rd_FPR16 + Rn_FPR16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x5e203800/mask=xff3ffc00 +# CONSTRUCT x5ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=+ +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2 +# AUNIT --inst x5ea03800/mask=xfffffc00 --status fail --comment "nointsat" +# Scalar variant when size=10 Q=1 bb=1 V=FPR32 s2=8 + +:suqadd Rd_FPR32, Rn_FPR32 +is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = Rd_FPR32 + Rn_FPR32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x5e203800/mask=xff3ffc00 +# CONSTRUCT x5ee03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=+ +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2 +# AUNIT --inst x5ee03800/mask=xfffffc00 --status fail --comment "nointsat" +# Scalar variant when size=11 Q=1 bb=1 V=FPR64 s2=16 + +:suqadd Rd_FPR64, Rn_FPR64 +is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = Rd_FPR64 + Rn_FPR64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 +# CONSTRUCT x0e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@1 +# AUNIT --inst x0e203800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when size=00 Q=0 bb=0 V=VPR64.8B e1=1 s2=16 + +:suqadd Rd_VPR64.8B, Rn_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd +{ + # simd infix Rd_VPR64.8B = Rd_VPR64.8B + Rn_VPR64.8B on lane size 1 + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + Rn_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + Rn_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + Rn_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + Rn_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + Rn_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + Rn_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + Rn_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + Rn_VPR64.8B[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 +# CONSTRUCT x4e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@1 +# AUNIT --inst x4e203800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when size=00 Q=1 bb=0 V=VPR128.16B e1=1 s2=32 + +:suqadd Rd_VPR128.16B, Rn_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd +{ + # simd infix Rd_VPR128.16B = Rd_VPR128.16B + Rn_VPR128.16B on lane size 1 + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + Rn_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + Rn_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + Rn_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + Rn_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + Rn_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + Rn_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + Rn_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + Rn_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + Rn_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + Rn_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + Rn_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + Rn_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + Rn_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + Rn_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + Rn_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + Rn_VPR128.16B[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 +# CONSTRUCT x0e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@2 +# AUNIT --inst x0e603800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when size=01 Q=0 bb=0 V=VPR64.4H e1=2 s2=16 + +:suqadd Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rd_VPR64.4H + Rn_VPR64.4H on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + Rn_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + Rn_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + Rn_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + Rn_VPR64.4H[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 +# CONSTRUCT x4e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@2 +# AUNIT --inst x4e603800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when size=01 Q=1 bb=0 V=VPR128.8H e1=2 s2=32 + +:suqadd Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + Rn_VPR128.8H on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + Rn_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + Rn_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + Rn_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + Rn_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + Rn_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + Rn_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + Rn_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + Rn_VPR128.8H[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 +# CONSTRUCT x0ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@4 +# AUNIT --inst x0ea03800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when size=10 Q=0 bb=0 V=VPR64.2S e1=4 s2=16 + +:suqadd Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + Rn_VPR64.2S on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + Rn_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + Rn_VPR64.2S[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 +# CONSTRUCT x4ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@4 +# AUNIT --inst x4ea03800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when size=10 Q=1 bb=0 V=VPR128.4S e1=4 s2=32 + +:suqadd Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + Rn_VPR128.4S on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + Rn_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + Rn_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + Rn_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + Rn_VPR128.4S[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 +# CONSTRUCT x4ee03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@8 +# AUNIT --inst x4ee03800/mask=xfffffc00 --status fail --comment "nointsat" +# Vector variant when size=11 Q=1 bb=0 V=VPR128.2D e1=8 s2=32 + +:suqadd Rd_VPR128.2D, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + Rn_VPR128.2D on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + Rn_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + Rn_VPR128.2D[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# CONSTRUCT x4f08a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 =$sext@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl2/1@1 +# AUNIT --inst x4f08a400/mask=xfffffc00 --status pass --comment "ext" + +:sxtl2 Rd_VPR128.8H, Rn_VPR128.16B +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize Rd_VPR128.8H = sext(TMPD1) (lane size 1 to 2) + Rd_VPR128.8H[0,16] = sext(TMPD1[0,8]); + Rd_VPR128.8H[16,16] = sext(TMPD1[8,8]); + Rd_VPR128.8H[32,16] = sext(TMPD1[16,8]); + Rd_VPR128.8H[48,16] = sext(TMPD1[24,8]); + Rd_VPR128.8H[64,16] = sext(TMPD1[32,8]); + Rd_VPR128.8H[80,16] = sext(TMPD1[40,8]); + Rd_VPR128.8H[96,16] = sext(TMPD1[48,8]); + Rd_VPR128.8H[112,16] = sext(TMPD1[56,8]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# CONSTRUCT x0f20a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$sext@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl/1@4 +# AUNIT --inst x0f20a400/mask=xfffffc00 --status pass --comment "ext" + +:sxtl Rd_VPR128.2D, Rn_VPR64.2S +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR64.2S; + # simd resize Rd_VPR128.2D = sext(TMPD1) (lane size 4 to 8) + Rd_VPR128.2D[0,64] = sext(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = sext(TMPD1[32,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# CONSTRUCT x0f10a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$sext@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl/1@2 +# AUNIT --inst x0f10a400/mask=xfffffc00 --status pass --comment "ext" + +:sxtl Rd_VPR128.4S, Rn_VPR64.4H +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR64.4H; + # simd resize Rd_VPR128.4S = sext(TMPD1) (lane size 2 to 4) + Rd_VPR128.4S[0,32] = sext(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = sext(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = sext(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = sext(TMPD1[48,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# CONSTRUCT x4f20a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 =$sext@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl2/1@4 +# AUNIT --inst x4f20a400/mask=xfffffc00 --status pass --comment "ext" + +:sxtl2 Rd_VPR128.2D, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize Rd_VPR128.2D = sext(TMPD1) (lane size 4 to 8) + Rd_VPR128.2D[0,64] = sext(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = sext(TMPD1[32,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# CONSTRUCT x0f08a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$sext@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl/1@1 +# AUNIT --inst x0f08a400/mask=xfffffc00 --status pass --comment "ext" + +:sxtl Rd_VPR128.8H, Rn_VPR64.8B +is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Rn_VPR128 & Zd +{ + TMPD1 = Rn_VPR64.8B; + # simd resize Rd_VPR128.8H = sext(TMPD1) (lane size 1 to 2) + Rd_VPR128.8H[0,16] = sext(TMPD1[0,8]); + Rd_VPR128.8H[16,16] = sext(TMPD1[8,8]); + Rd_VPR128.8H[32,16] = sext(TMPD1[16,8]); + Rd_VPR128.8H[48,16] = sext(TMPD1[24,8]); + Rd_VPR128.8H[64,16] = sext(TMPD1[32,8]); + Rd_VPR128.8H[80,16] = sext(TMPD1[40,8]); + Rd_VPR128.8H[96,16] = sext(TMPD1[48,8]); + Rd_VPR128.8H[112,16] = sext(TMPD1[56,8]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 +# C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 +# CONSTRUCT x4f10a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 =$sext@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl2/1@2 +# AUNIT --inst x4f10a400/mask=xfffffc00 --status pass --comment "ext" + +:sxtl2 Rd_VPR128.4S, Rn_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize Rd_VPR128.4S = sext(TMPD1) (lane size 2 to 4) + Rd_VPR128.4S[0,32] = sext(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = sext(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = sext(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = sext(TMPD1[48,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.322 TBL page C7-1717 line 99409 KEEPWITH + +tblx: "tbl" is b_12=0 { local tmp:16 = zext(0:8); export tmp; } +tblx: "tbx" is b_12=1 & Rd_VPR128 { export Rd_VPR128; } + +# C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 +# C7.2.340 TBX page C7-2171 line 122128 MATCH x0e001000/mask=xbfe09c00 +# CONSTRUCT x0e000000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 tblx Rn_VPR128.16B ARG3 =a64_TBL/3 +# SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B ARG3 =NEON_tblx/3@1 +# AUNIT --inst x0e000000/mask=xffe0ec00 --status pass +# Q == 0 && len == 00 8B, Single register table variant + +:^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^"}", Rm_VPR64.8B +is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b00 & Rm_VPR64.8B & Rn_VPR128.16B & Rd_VPR64.8B & tblx & Zd +{ + Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rm_VPR64.8B); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 +# C7.2.340 TBX page C7-2171 line 122128 MATCH x0e001000/mask=xbfe09c00 +# CONSTRUCT x4e000000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 tblx Rn_VPR128.16B ARG3 =a64_TBL/3 +# SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B ARG3 =NEON_tblx/3@1 +# AUNIT --inst x4e000000/mask=xffe0ec00 --status pass +# Q == 1 && len == 00 16B, Single register table variant + +:^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^"}", Rm_VPR128.16B +is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b00 & Rm_VPR128.16B & Rn_VPR128.16B & Rd_VPR128.16B & tblx & Zd +{ + Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rm_VPR128.16B); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 +# C7.2.340 TBX page C7-2171 line 122128 MATCH x0e001000/mask=xbfe09c00 +# CONSTRUCT x0e002000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B ARG4 =a64_TBL/4 +# SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B ARG4 =NEON_tblx/4 +# AUNIT --inst x0e002000/mask=xffe0ec00 --status pass +# Q == 0 && len == 01 8B, Two register table variant + +:^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^"}", Rm_VPR64.8B +is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b01 & Rm_VPR64.8B & Rn_VPR128.16B & Rnn_VPR128.16B & Rd_VPR64.8B & tblx & Zd +{ + Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rm_VPR64.8B); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 +# C7.2.340 TBX page C7-2171 line 122128 MATCH x0e001000/mask=xbfe09c00 +# CONSTRUCT x4e002000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B ARG4 =a64_TBL/4 +# SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B ARG4 =NEON_tblx/4 +# AUNIT --inst x4e002000/mask=xffe0ec00 --status pass +# Q == 1 && len == 01 16B, Two register table variant + +:^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^"}", Rm_VPR128.16B +is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b01 & Rm_VPR128.16B & Rn_VPR128.16B & Rnn_VPR128.16B & Rd_VPR128.16B & tblx & Zd +{ + Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rm_VPR128.16B); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 +# C7.2.340 TBX page C7-2171 line 122128 MATCH x0e001000/mask=xbfe09c00 +# CONSTRUCT x0e004000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B ARG5 =a64_TBL/5 +# SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B ARG5 =NEON_tblx/5 +# AUNIT --inst x0e004000/mask=xffe0ec00 --status pass +# Q == 0 && len == 10 8B, Three register table variant + +:^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^"}", Rm_VPR64.8B +is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b10 & Rm_VPR64.8B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rd_VPR64.8B & tblx & Zd +{ + Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rm_VPR64.8B); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 +# C7.2.340 TBX page C7-2171 line 122128 MATCH x0e001000/mask=xbfe09c00 +# CONSTRUCT x4e004000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B ARG5 =a64_TBL/5 +# SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B ARG5 =NEON_tblx/5 +# AUNIT --inst x4e004000/mask=xffe0ec00 --status pass +# Q == 1 && len == 10 16B, Three register table variant + +:^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^"}", Rm_VPR128.16B +is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b10 & Rm_VPR128.16B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rd_VPR128.16B & tblx & Zd +{ + Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rm_VPR128.16B); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 +# C7.2.340 TBX page C7-2171 line 122128 MATCH x0e001000/mask=xbfe09c00 +# CONSTRUCT x0e006000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B Rnnnn_VPR128.16B ARG6 =a64_TBL/6 +# SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B Rnnnn_VPR128.16B ARG6 =NEON_tblx/6 +# AUNIT --inst x0e006000/mask=xffe0ec00 --status pass +# Q == 0 && len == 11 8B, Four register table variant + +:^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^", "^Rnnnn_VPR128.16B^"}", Rm_VPR64.8B +is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b11 & Rm_VPR64.8B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rnnnn_VPR128.16B & Rd_VPR64.8B & tblx & Zd +{ + Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rnnnn_VPR128.16B, Rm_VPR64.8B); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 +# C7.2.340 TBX page C7-2171 line 122128 MATCH x0e001000/mask=xbfe09c00 +# CONSTRUCT x4e006000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B Rnnnn_VPR128.16B ARG6 =a64_TBL/6 +# SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B Rnnnn_VPR128.16B ARG6 =NEON_tblx/6 +# AUNIT --inst x4e006000/mask=xffe0ec00 --status pass +# Q == 1 && len == 11 16B, Four register table variant + +:^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^", "^Rnnnn_VPR128.16B^"}", Rm_VPR128.16B +is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b11 & Rm_VPR128.16B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rnnnn_VPR128.16B & Rd_VPR128.16B & tblx & Zd +{ + Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rnnnn_VPR128.16B, Rm_VPR128.16B); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 +# CONSTRUCT x4e002800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-2@4-4@6-6@8-8@10-10@12-12@14-14:1 swap &=$shuffle@0-1@2-3@4-5@6-7@8-9@10-11@12-13@14-15:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@1 +# AUNIT --inst x4e002800/mask=xffe0fc00 --status pass + +:trn1 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + TMPQ2 = Rm_VPR128.16B; + TMPQ1 = Rn_VPR128.16B; + # simd shuffle Rd_VPR128.16B = TMPQ1 (@0-0@2-2@4-4@6-6@8-8@10-10@12-12@14-14) lane size 1 + Rd_VPR128.16B[0,8] = TMPQ1[0,8]; + Rd_VPR128.16B[16,8] = TMPQ1[16,8]; + Rd_VPR128.16B[32,8] = TMPQ1[32,8]; + Rd_VPR128.16B[48,8] = TMPQ1[48,8]; + Rd_VPR128.16B[64,8] = TMPQ1[64,8]; + Rd_VPR128.16B[80,8] = TMPQ1[80,8]; + Rd_VPR128.16B[96,8] = TMPQ1[96,8]; + Rd_VPR128.16B[112,8] = TMPQ1[112,8]; + # simd shuffle Rd_VPR128.16B = TMPQ2 (@0-1@2-3@4-5@6-7@8-9@10-11@12-13@14-15) lane size 1 + Rd_VPR128.16B[8,8] = TMPQ2[0,8]; + Rd_VPR128.16B[24,8] = TMPQ2[16,8]; + Rd_VPR128.16B[40,8] = TMPQ2[32,8]; + Rd_VPR128.16B[56,8] = TMPQ2[48,8]; + Rd_VPR128.16B[72,8] = TMPQ2[64,8]; + Rd_VPR128.16B[88,8] = TMPQ2[80,8]; + Rd_VPR128.16B[104,8] = TMPQ2[96,8]; + Rd_VPR128.16B[120,8] = TMPQ2[112,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 +# CONSTRUCT x4ec02800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:8 swap &=$shuffle@0-1:8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@8 +# AUNIT --inst x4ec02800/mask=xffe0fc00 --status pass + +:trn1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPQ2 = Rm_VPR128.2D; + TMPQ1 = Rn_VPR128.2D; + # simd shuffle Rd_VPR128.2D = TMPQ1 (@0-0) lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64]; + # simd shuffle Rd_VPR128.2D = TMPQ2 (@0-1) lane size 8 + Rd_VPR128.2D[64,64] = TMPQ2[0,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 +# CONSTRUCT x0e802800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:4 swap &=$shuffle@0-1:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@4 +# AUNIT --inst x0e802800/mask=xffe0fc00 --status pass + +:trn1 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + TMPD2 = Rm_VPR64.2S; + TMPD1 = Rn_VPR64.2S; + # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0) lane size 4 + Rd_VPR64.2S[0,32] = TMPD1[0,32]; + # simd shuffle Rd_VPR64.2S = TMPD2 (@0-1) lane size 4 + Rd_VPR64.2S[32,32] = TMPD2[0,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 +# CONSTRUCT x0e402800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-2:2 swap &=$shuffle@0-1@2-3:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@2 +# AUNIT --inst x0e402800/mask=xffe0fc00 --status pass + +:trn1 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + TMPD2 = Rm_VPR64.4H; + TMPD1 = Rn_VPR64.4H; + # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@2-2) lane size 2 + Rd_VPR64.4H[0,16] = TMPD1[0,16]; + Rd_VPR64.4H[32,16] = TMPD1[32,16]; + # simd shuffle Rd_VPR64.4H = TMPD2 (@0-1@2-3) lane size 2 + Rd_VPR64.4H[16,16] = TMPD2[0,16]; + Rd_VPR64.4H[48,16] = TMPD2[32,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 +# CONSTRUCT x4e802800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-2:4 swap &=$shuffle@0-1@2-3:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@4 +# AUNIT --inst x4e802800/mask=xffe0fc00 --status pass + +:trn1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPQ2 = Rm_VPR128.4S; + TMPQ1 = Rn_VPR128.4S; + # simd shuffle Rd_VPR128.4S = TMPQ1 (@0-0@2-2) lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32]; + # simd shuffle Rd_VPR128.4S = TMPQ2 (@0-1@2-3) lane size 4 + Rd_VPR128.4S[32,32] = TMPQ2[0,32]; + Rd_VPR128.4S[96,32] = TMPQ2[64,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 +# CONSTRUCT x0e002800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-2@4-4@6-6:1 swap &=$shuffle@0-1@2-3@4-5@6-7:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@1 +# AUNIT --inst x0e002800/mask=xffe0fc00 --status pass + +:trn1 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + TMPD2 = Rm_VPR64.8B; + TMPD1 = Rn_VPR64.8B; + # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@2-2@4-4@6-6) lane size 1 + Rd_VPR64.8B[0,8] = TMPD1[0,8]; + Rd_VPR64.8B[16,8] = TMPD1[16,8]; + Rd_VPR64.8B[32,8] = TMPD1[32,8]; + Rd_VPR64.8B[48,8] = TMPD1[48,8]; + # simd shuffle Rd_VPR64.8B = TMPD2 (@0-1@2-3@4-5@6-7) lane size 1 + Rd_VPR64.8B[8,8] = TMPD2[0,8]; + Rd_VPR64.8B[24,8] = TMPD2[16,8]; + Rd_VPR64.8B[40,8] = TMPD2[32,8]; + Rd_VPR64.8B[56,8] = TMPD2[48,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 +# CONSTRUCT x4e402800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-2@4-4@6-6:2 swap &=$shuffle@0-1@2-3@4-5@6-7:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@2 +# AUNIT --inst x4e402800/mask=xffe0fc00 --status pass + +:trn1 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPQ2 = Rm_VPR128.8H; + TMPQ1 = Rn_VPR128.8H; + # simd shuffle Rd_VPR128.8H = TMPQ1 (@0-0@2-2@4-4@6-6) lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16]; + # simd shuffle Rd_VPR128.8H = TMPQ2 (@0-1@2-3@4-5@6-7) lane size 2 + Rd_VPR128.8H[16,16] = TMPQ2[0,16]; + Rd_VPR128.8H[48,16] = TMPQ2[32,16]; + Rd_VPR128.8H[80,16] = TMPQ2[64,16]; + Rd_VPR128.8H[112,16] = TMPQ2[96,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 +# CONSTRUCT x4e006800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-2@5-4@7-6@9-8@11-10@13-12@15-14:1 swap &=$shuffle@1-1@3-3@5-5@7-7@9-9@11-11@13-13@15-15:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@1 +# AUNIT --inst x4e006800/mask=xffe0fc00 --status pass + +:trn2 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + TMPQ2 = Rm_VPR128.16B; + TMPQ1 = Rn_VPR128.16B; + # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-0@3-2@5-4@7-6@9-8@11-10@13-12@15-14) lane size 1 + Rd_VPR128.16B[0,8] = TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = TMPQ1[120,8]; + # simd shuffle Rd_VPR128.16B = TMPQ2 (@1-1@3-3@5-5@7-7@9-9@11-11@13-13@15-15) lane size 1 + Rd_VPR128.16B[8,8] = TMPQ2[8,8]; + Rd_VPR128.16B[24,8] = TMPQ2[24,8]; + Rd_VPR128.16B[40,8] = TMPQ2[40,8]; + Rd_VPR128.16B[56,8] = TMPQ2[56,8]; + Rd_VPR128.16B[72,8] = TMPQ2[72,8]; + Rd_VPR128.16B[88,8] = TMPQ2[88,8]; + Rd_VPR128.16B[104,8] = TMPQ2[104,8]; + Rd_VPR128.16B[120,8] = TMPQ2[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 +# CONSTRUCT x4ec06800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:8 swap &=$shuffle@1-1:8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@8 +# AUNIT --inst x4ec06800/mask=xffe0fc00 --status pass + +:trn2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPQ2 = Rm_VPR128.2D; + TMPQ1 = Rn_VPR128.2D; + # simd shuffle Rd_VPR128.2D = TMPQ1 (@1-0) lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[64,64]; + # simd shuffle Rd_VPR128.2D = TMPQ2 (@1-1) lane size 8 + Rd_VPR128.2D[64,64] = TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 +# CONSTRUCT x0e806800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:4 swap &=$shuffle@1-1:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@4 +# AUNIT --inst x0e806800/mask=xffe0fc00 --status pass + +:trn2 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + TMPD2 = Rm_VPR64.2S; + TMPD1 = Rn_VPR64.2S; + # simd shuffle Rd_VPR64.2S = TMPD1 (@1-0) lane size 4 + Rd_VPR64.2S[0,32] = TMPD1[32,32]; + # simd shuffle Rd_VPR64.2S = TMPD2 (@1-1) lane size 4 + Rd_VPR64.2S[32,32] = TMPD2[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 +# CONSTRUCT x0e406800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-2:2 swap &=$shuffle@1-1@3-3:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@2 +# AUNIT --inst x0e406800/mask=xffe0fc00 --status pass + +:trn2 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + TMPD2 = Rm_VPR64.4H; + TMPD1 = Rn_VPR64.4H; + # simd shuffle Rd_VPR64.4H = TMPD1 (@1-0@3-2) lane size 2 + Rd_VPR64.4H[0,16] = TMPD1[16,16]; + Rd_VPR64.4H[32,16] = TMPD1[48,16]; + # simd shuffle Rd_VPR64.4H = TMPD2 (@1-1@3-3) lane size 2 + Rd_VPR64.4H[16,16] = TMPD2[16,16]; + Rd_VPR64.4H[48,16] = TMPD2[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 +# CONSTRUCT x4e806800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-2:4 swap &=$shuffle@1-1@3-3:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@4 +# AUNIT --inst x4e806800/mask=xffe0fc00 --status pass + +:trn2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPQ2 = Rm_VPR128.4S; + TMPQ1 = Rn_VPR128.4S; + # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-0@3-2) lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[96,32]; + # simd shuffle Rd_VPR128.4S = TMPQ2 (@1-1@3-3) lane size 4 + Rd_VPR128.4S[32,32] = TMPQ2[32,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 +# CONSTRUCT x0e006800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-2@5-4@7-6:1 swap &=$shuffle@1-1@3-3@5-5@7-7:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@1 +# AUNIT --inst x0e006800/mask=xffe0fc00 --status pass + +:trn2 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + TMPD2 = Rm_VPR64.8B; + TMPD1 = Rn_VPR64.8B; + # simd shuffle Rd_VPR64.8B = TMPD1 (@1-0@3-2@5-4@7-6) lane size 1 + Rd_VPR64.8B[0,8] = TMPD1[8,8]; + Rd_VPR64.8B[16,8] = TMPD1[24,8]; + Rd_VPR64.8B[32,8] = TMPD1[40,8]; + Rd_VPR64.8B[48,8] = TMPD1[56,8]; + # simd shuffle Rd_VPR64.8B = TMPD2 (@1-1@3-3@5-5@7-7) lane size 1 + Rd_VPR64.8B[8,8] = TMPD2[8,8]; + Rd_VPR64.8B[24,8] = TMPD2[24,8]; + Rd_VPR64.8B[40,8] = TMPD2[40,8]; + Rd_VPR64.8B[56,8] = TMPD2[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 +# CONSTRUCT x4e406800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-2@5-4@7-6:2 swap &=$shuffle@1-1@3-3@5-5@7-7:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@2 +# AUNIT --inst x4e406800/mask=xffe0fc00 --status pass + +:trn2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPQ2 = Rm_VPR128.8H; + TMPQ1 = Rn_VPR128.8H; + # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-0@3-2@5-4@7-6) lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[112,16]; + # simd shuffle Rd_VPR128.8H = TMPQ2 (@1-1@3-3@5-5@7-7) lane size 2 + Rd_VPR128.8H[16,16] = TMPQ2[16,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 +# CONSTRUCT x6e207c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@1 +# AUNIT --inst x6e207c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uaba Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xf & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uaba(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 +# CONSTRUCT x2ea07c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@4 +# AUNIT --inst x2ea07c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uaba Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xf & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uaba(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 +# CONSTRUCT x2e607c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@2 +# AUNIT --inst x2e607c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uaba Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xf & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uaba(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 +# CONSTRUCT x6ea07c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@4 +# AUNIT --inst x6ea07c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uaba Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xf & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uaba(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 +# CONSTRUCT x2e207c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@1 +# AUNIT --inst x2e207c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uaba Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xf & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uaba(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 +# CONSTRUCT x6e607c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@2 +# AUNIT --inst x6e607c00/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uaba Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xf & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uaba(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 +# CONSTRUCT x6ea05000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 $-@8 $abs@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal2/3@4 +# AUNIT --inst x6ea05000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x5 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; + # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 8 + TMPQ6[0,64] = MP_INT_ABS(TMPQ5[0,64]); + TMPQ6[64,64] = MP_INT_ABS(TMPQ5[64,64]); + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ6 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ6[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ6[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 +# CONSTRUCT x6e605000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 $-@4 $abs@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal2/3@2 +# AUNIT --inst x6e605000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x5 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; + # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 4 + TMPQ6[0,32] = MP_INT_ABS(TMPQ5[0,32]); + TMPQ6[32,32] = MP_INT_ABS(TMPQ5[32,32]); + TMPQ6[64,32] = MP_INT_ABS(TMPQ5[64,32]); + TMPQ6[96,32] = MP_INT_ABS(TMPQ5[96,32]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ6 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ6[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ6[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ6[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ6[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 +# CONSTRUCT x6e205000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 $-@2 $abs@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal2/3@1 +# AUNIT --inst x6e205000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x5 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 + TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; + # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 2 + TMPQ6[0,16] = MP_INT_ABS(TMPQ5[0,16]); + TMPQ6[16,16] = MP_INT_ABS(TMPQ5[16,16]); + TMPQ6[32,16] = MP_INT_ABS(TMPQ5[32,16]); + TMPQ6[48,16] = MP_INT_ABS(TMPQ5[48,16]); + TMPQ6[64,16] = MP_INT_ABS(TMPQ5[64,16]); + TMPQ6[80,16] = MP_INT_ABS(TMPQ5[80,16]); + TMPQ6[96,16] = MP_INT_ABS(TMPQ5[96,16]); + TMPQ6[112,16] = MP_INT_ABS(TMPQ5[112,16]); + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ6 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ6[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ6[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ6[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ6[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ6[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ6[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ6[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ6[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 +# CONSTRUCT x2ea05000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 $-@8 $abs@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal/3@4 +# AUNIT --inst x2ea05000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x5 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 + TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; + # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 8 + TMPQ4[0,64] = MP_INT_ABS(TMPQ3[0,64]); + TMPQ4[64,64] = MP_INT_ABS(TMPQ3[64,64]); + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 +# CONSTRUCT x2e605000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 $-@4 $abs@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal/3@2 +# AUNIT --inst x2e605000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x5 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 + TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; + # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 4 + TMPQ4[0,32] = MP_INT_ABS(TMPQ3[0,32]); + TMPQ4[32,32] = MP_INT_ABS(TMPQ3[32,32]); + TMPQ4[64,32] = MP_INT_ABS(TMPQ3[64,32]); + TMPQ4[96,32] = MP_INT_ABS(TMPQ3[96,32]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 +# CONSTRUCT x2e205000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 $-@2 $abs@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal/3@1 +# AUNIT --inst x2e205000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x5 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 + TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; + # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 2 + TMPQ4[0,16] = MP_INT_ABS(TMPQ3[0,16]); + TMPQ4[16,16] = MP_INT_ABS(TMPQ3[16,16]); + TMPQ4[32,16] = MP_INT_ABS(TMPQ3[32,16]); + TMPQ4[48,16] = MP_INT_ABS(TMPQ3[48,16]); + TMPQ4[64,16] = MP_INT_ABS(TMPQ3[64,16]); + TMPQ4[80,16] = MP_INT_ABS(TMPQ3[80,16]); + TMPQ4[96,16] = MP_INT_ABS(TMPQ3[96,16]); + TMPQ4[112,16] = MP_INT_ABS(TMPQ3[112,16]); + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ4 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ4[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 +# CONSTRUCT x6e207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@1 +# AUNIT --inst x6e207400/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uabd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uabd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 +# CONSTRUCT x2ea07400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $-@4 ARG3 ARG2 $-@4 2:4 &=$* ARG2 ARG3 $less@4 &=$*@4 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@4 +# AUNIT --inst x2ea07400/mask=xffe0fc00 --status pass --comment "abd" +# This abd instruction is implemented correctly to document a correct +# way to implement the unsigned absolute difference semantic. + +:uabd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + # simd infix TMPD1 = Rn_VPR64.2S - Rm_VPR64.2S on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] - Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] - Rm_VPR64.2S[32,32]; + # simd infix TMPD2 = Rm_VPR64.2S - Rn_VPR64.2S on lane size 4 + TMPD2[0,32] = Rm_VPR64.2S[0,32] - Rn_VPR64.2S[0,32]; + TMPD2[32,32] = Rm_VPR64.2S[32,32] - Rn_VPR64.2S[32,32]; + # simd infix TMPD2 = TMPD2 * 2:4 on lane size 4 + TMPD2[0,32] = TMPD2[0,32] * 2:4; + TMPD2[32,32] = TMPD2[32,32] * 2:4; + # simd infix TMPD3 = Rn_VPR64.2S < Rm_VPR64.2S on lane size 4 + TMPD3[0,32] = zext(Rn_VPR64.2S[0,32] < Rm_VPR64.2S[0,32]); + TMPD3[32,32] = zext(Rn_VPR64.2S[32,32] < Rm_VPR64.2S[32,32]); + # simd infix TMPD2 = TMPD2 * TMPD3 on lane size 4 + TMPD2[0,32] = TMPD2[0,32] * TMPD3[0,32]; + TMPD2[32,32] = TMPD2[32,32] * TMPD3[32,32]; + # simd infix Rd_VPR64.2S = TMPD1 + TMPD2 on lane size 4 + Rd_VPR64.2S[0,32] = TMPD1[0,32] + TMPD2[0,32]; + Rd_VPR64.2S[32,32] = TMPD1[32,32] + TMPD2[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 +# CONSTRUCT x2e607400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@2 +# AUNIT --inst x2e607400/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uabd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uabd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 +# CONSTRUCT x6ea07400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@4 +# AUNIT --inst x6ea07400/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uabd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uabd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 +# CONSTRUCT x2e207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@1 +# AUNIT --inst x2e207400/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uabd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uabd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 +# CONSTRUCT x6e607400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@2 +# AUNIT --inst x6e607400/mask=xffe0fc00 --status nopcodeop --comment "abd" + +:uabd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uabd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 +# CONSTRUCT x6ea07000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 $-@8 =$abs@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl2/2@4 +# AUNIT --inst x6ea07000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabdl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x7 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; + # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ5) on lane size 8 + Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ5[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ5[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 +# CONSTRUCT x6e607000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 $-@4 =$abs@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl2/2@2 +# AUNIT --inst x6e607000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabdl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x7 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; + # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ5) on lane size 4 + Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ5[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ5[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ5[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ5[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 +# CONSTRUCT x6e207000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 $-@2 =$abs@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl2/2@1 +# AUNIT --inst x6e207000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabdl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x7 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); + # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 + TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; + # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ5) on lane size 2 + Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ5[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ5[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ5[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ5[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ5[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ5[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ5[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ5[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 +# CONSTRUCT x2ea07000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 $-@8 =$abs@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl/2@4 +# AUNIT --inst x2ea07000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabdl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x7 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 + TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; + # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ3) on lane size 8 + Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ3[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ3[64,64]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 +# CONSTRUCT x2e607000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 $-@4 =$abs@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl/2@2 +# AUNIT --inst x2e607000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabdl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x7 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Rn_VPR128 & Rm_VPR128 & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 + TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; + # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ3) on lane size 4 + Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ3[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ3[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ3[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ3[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 +# CONSTRUCT x2e207000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 $-@2 =$abs@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl/2@1 +# AUNIT --inst x2e207000/mask=xffe0fc00 --status pass --comment "ext abd" + +:uabdl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x7 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Rn_VPR128 & Rm_VPR128 & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); + # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 + TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; + # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ3) on lane size 2 + Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ3[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ3[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ3[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ3[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ3[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ3[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ3[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ3[112,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 +# CONSTRUCT x6e206800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 =#u+ &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@1 +# AUNIT --inst x6e206800/mask=xfffffc00 --status pass --comment "ext" + +:uadalp Rd_VPR128.8H, Rn_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = +(Rn_VPR128.16B) on pairs lane size (1 to 2) + local tmp2 = Rn_VPR128.16B[0,8]; + local tmp4 = zext(tmp2); + local tmp3 = Rn_VPR128.16B[8,8]; + local tmp5 = zext(tmp3); + TMPQ1[0,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[16,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[24,8]; + tmp5 = zext(tmp3); + TMPQ1[16,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[32,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[40,8]; + tmp5 = zext(tmp3); + TMPQ1[32,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[48,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[56,8]; + tmp5 = zext(tmp3); + TMPQ1[48,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[64,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[72,8]; + tmp5 = zext(tmp3); + TMPQ1[64,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[80,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[88,8]; + tmp5 = zext(tmp3); + TMPQ1[80,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[96,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[104,8]; + tmp5 = zext(tmp3); + TMPQ1[96,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[112,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[120,8]; + tmp5 = zext(tmp3); + TMPQ1[112,16] = tmp4 + tmp5; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 +# CONSTRUCT x2ea06800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG2 =#u+ &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@4 +# AUNIT --inst x2ea06800/mask=xfffffc00 --status pass --comment "ext" + +:uadalp Rd_VPR64.1D, Rn_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.1D & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = +(Rn_VPR64.2S) on pairs lane size (4 to 8) + local tmp2 = Rn_VPR64.2S[0,32]; + local tmp4 = zext(tmp2); + local tmp3 = Rn_VPR64.2S[32,32]; + local tmp5 = zext(tmp3); + TMPD1 = tmp4 + tmp5; + # simd infix Rd_VPR64.1D = Rd_VPR64.1D + TMPD1 on lane size 8 + Rd_VPR64.1D = Rd_VPR64.1D + TMPD1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 +# CONSTRUCT x2e606800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG2 =#u+ &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@2 +# AUNIT --inst x2e606800/mask=xfffffc00 --status pass --comment "ext" + +:uadalp Rd_VPR64.2S, Rn_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.2S & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = +(Rn_VPR64.4H) on pairs lane size (2 to 4) + local tmp2 = Rn_VPR64.4H[0,16]; + local tmp4 = zext(tmp2); + local tmp3 = Rn_VPR64.4H[16,16]; + local tmp5 = zext(tmp3); + TMPD1[0,32] = tmp4 + tmp5; + tmp2 = Rn_VPR64.4H[32,16]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.4H[48,16]; + tmp5 = zext(tmp3); + TMPD1[32,32] = tmp4 + tmp5; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 +# CONSTRUCT x6ea06800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 =#u+ &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@4 +# AUNIT --inst x6ea06800/mask=xfffffc00 --status pass --comment "ext" + +:uadalp Rd_VPR128.2D, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = +(Rn_VPR128.4S) on pairs lane size (4 to 8) + local tmp2 = Rn_VPR128.4S[0,32]; + local tmp4 = zext(tmp2); + local tmp3 = Rn_VPR128.4S[32,32]; + local tmp5 = zext(tmp3); + TMPQ1[0,64] = tmp4 + tmp5; + tmp2 = Rn_VPR128.4S[64,32]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.4S[96,32]; + tmp5 = zext(tmp3); + TMPQ1[64,64] = tmp4 + tmp5; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 +# CONSTRUCT x2e206800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:8 ARG2 =#u+ &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@1 +# AUNIT --inst x2e206800/mask=xfffffc00 --status pass --comment "ext" + +:uadalp Rd_VPR64.4H, Rn_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.4H & Zd +{ + TMPD1 = 0; + # sipd infix TMPD1 = +(Rn_VPR64.8B) on pairs lane size (1 to 2) + local tmp2 = Rn_VPR64.8B[0,8]; + local tmp4 = zext(tmp2); + local tmp3 = Rn_VPR64.8B[8,8]; + local tmp5 = zext(tmp3); + TMPD1[0,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[16,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.8B[24,8]; + tmp5 = zext(tmp3); + TMPD1[16,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[32,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.8B[40,8]; + tmp5 = zext(tmp3); + TMPD1[32,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[48,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.8B[56,8]; + tmp5 = zext(tmp3); + TMPD1[48,16] = tmp4 + tmp5; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 +# CONSTRUCT x6e606800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 var:16 ARG2 =#u+ &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@2 +# AUNIT --inst x6e606800/mask=xfffffc00 --status pass --comment "ext" + +:uadalp Rd_VPR128.4S, Rn_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPQ1 = 0; + # sipd infix TMPQ1 = +(Rn_VPR128.8H) on pairs lane size (2 to 4) + local tmp2 = Rn_VPR128.8H[0,16]; + local tmp4 = zext(tmp2); + local tmp3 = Rn_VPR128.8H[16,16]; + local tmp5 = zext(tmp3); + TMPQ1[0,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[32,16]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.8H[48,16]; + tmp5 = zext(tmp3); + TMPQ1[32,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[64,16]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.8H[80,16]; + tmp5 = zext(tmp3); + TMPQ1[64,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[96,16]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.8H[112,16]; + tmp5 = zext(tmp3); + TMPQ1[96,32] = tmp4 + tmp5; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 +# CONSTRUCT x6ea00000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 =$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl2/2@4 +# AUNIT --inst x6ea00000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x0 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); + # simd infix Rd_VPR128.2D = TMPQ2 + TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] + TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 +# CONSTRUCT x6e600000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl2/2@2 +# AUNIT --inst x6e600000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x0 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); + # simd infix Rd_VPR128.4S = TMPQ2 + TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] + TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 +# CONSTRUCT x6e200000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 =$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl2/2@1 +# AUNIT --inst x6e200000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x0 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); + # simd infix Rd_VPR128.8H = TMPQ2 + TMPQ4 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] + TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] + TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] + TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] + TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] + TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] + TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] + TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] + TMPQ4[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 +# CONSTRUCT x2ea00000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 =$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl/2@4 +# AUNIT --inst x2ea00000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x0 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); + # simd infix Rd_VPR128.2D = TMPQ1 + TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] + TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 +# CONSTRUCT x2e600000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl/2@2 +# AUNIT --inst x2e600000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x0 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); + # simd infix Rd_VPR128.4S = TMPQ1 + TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] + TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 +# CONSTRUCT x2e200000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 =$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl/2@1 +# AUNIT --inst x2e200000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x0 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR128.8H = TMPQ1 + TMPQ2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16] + TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] + TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] + TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] + TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] + TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] + TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] + TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] + TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 +# CONSTRUCT x2e202800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#u+@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@1 +# AUNIT --inst x2e202800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when size = 00 , Q = 0 s=16 e1=1 e2=2 Ta=VPR64.4H Tb=VPR64.8B + +:uaddlp Rd_VPR64.4H, Rn_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001010 & Rd_VPR64.4H & Rn_VPR64.8B & Zd +{ + TMPD1 = Rn_VPR64.8B; + # sipd infix Rd_VPR64.4H = +(TMPD1) on pairs lane size (1 to 2) + local tmp2 = TMPD1[0,8]; + local tmp4 = zext(tmp2); + local tmp3 = TMPD1[8,8]; + local tmp5 = zext(tmp3); + Rd_VPR64.4H[0,16] = tmp4 + tmp5; + tmp2 = TMPD1[16,8]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[24,8]; + tmp5 = zext(tmp3); + Rd_VPR64.4H[16,16] = tmp4 + tmp5; + tmp2 = TMPD1[32,8]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[40,8]; + tmp5 = zext(tmp3); + Rd_VPR64.4H[32,16] = tmp4 + tmp5; + tmp2 = TMPD1[48,8]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[56,8]; + tmp5 = zext(tmp3); + Rd_VPR64.4H[48,16] = tmp4 + tmp5; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 +# CONSTRUCT x6e202800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#u+@1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@1 +# AUNIT --inst x6e202800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when size = 00 , Q = 1 s=32 e1=1 e2=2 Ta=VPR128.8H Tb=VPR128.16B + +:uaddlp Rd_VPR128.8H, Rn_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001010 & Rd_VPR128.8H & Rn_VPR128.16B & Zd +{ + TMPQ1 = Rn_VPR128.16B; + # sipd infix Rd_VPR128.8H = +(TMPQ1) on pairs lane size (1 to 2) + local tmp2 = TMPQ1[0,8]; + local tmp4 = zext(tmp2); + local tmp3 = TMPQ1[8,8]; + local tmp5 = zext(tmp3); + Rd_VPR128.8H[0,16] = tmp4 + tmp5; + tmp2 = TMPQ1[16,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[24,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[16,16] = tmp4 + tmp5; + tmp2 = TMPQ1[32,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[40,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[32,16] = tmp4 + tmp5; + tmp2 = TMPQ1[48,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[56,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[48,16] = tmp4 + tmp5; + tmp2 = TMPQ1[64,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[72,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[64,16] = tmp4 + tmp5; + tmp2 = TMPQ1[80,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[88,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[80,16] = tmp4 + tmp5; + tmp2 = TMPQ1[96,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[104,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[96,16] = tmp4 + tmp5; + tmp2 = TMPQ1[112,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[120,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[112,16] = tmp4 + tmp5; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 +# CONSTRUCT x2e602800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#u+@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@2 +# AUNIT --inst x2e602800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when size = 01 , Q = 0 s=16 e1=2 e2=4 Ta=VPR64.2S Tb=VPR64.4H + +:uaddlp Rd_VPR64.2S, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001010 & Rd_VPR64.2S & Rn_VPR64.4H & Zd +{ + TMPD1 = Rn_VPR64.4H; + # sipd infix Rd_VPR64.2S = +(TMPD1) on pairs lane size (2 to 4) + local tmp2 = TMPD1[0,16]; + local tmp4 = zext(tmp2); + local tmp3 = TMPD1[16,16]; + local tmp5 = zext(tmp3); + Rd_VPR64.2S[0,32] = tmp4 + tmp5; + tmp2 = TMPD1[32,16]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[48,16]; + tmp5 = zext(tmp3); + Rd_VPR64.2S[32,32] = tmp4 + tmp5; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 +# CONSTRUCT x6e602800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#u+@2 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@2 +# AUNIT --inst x6e602800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when size = 01 , Q = 1 s=32 e1=2 e2=4 Ta=VPR128.4S Tb=VPR128.8H + +:uaddlp Rd_VPR128.4S, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001010 & Rd_VPR128.4S & Rn_VPR128.8H & Zd +{ + TMPQ1 = Rn_VPR128.8H; + # sipd infix Rd_VPR128.4S = +(TMPQ1) on pairs lane size (2 to 4) + local tmp2 = TMPQ1[0,16]; + local tmp4 = zext(tmp2); + local tmp3 = TMPQ1[16,16]; + local tmp5 = zext(tmp3); + Rd_VPR128.4S[0,32] = tmp4 + tmp5; + tmp2 = TMPQ1[32,16]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[48,16]; + tmp5 = zext(tmp3); + Rd_VPR128.4S[32,32] = tmp4 + tmp5; + tmp2 = TMPQ1[64,16]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[80,16]; + tmp5 = zext(tmp3); + Rd_VPR128.4S[64,32] = tmp4 + tmp5; + tmp2 = TMPQ1[96,16]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[112,16]; + tmp5 = zext(tmp3); + Rd_VPR128.4S[96,32] = tmp4 + tmp5; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 +# CONSTRUCT x2ea02800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#u+@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@4 +# AUNIT --inst x2ea02800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when size = 10 , Q = 0 s=16 e1=4 e2=8 Ta=VPR64.1D Tb=VPR64.2S + +:uaddlp Rd_VPR64.1D, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001010 & Rd_VPR64.1D & Rn_VPR64.2S & Zd +{ + TMPD1 = Rn_VPR64.2S; + # sipd infix Rd_VPR64.1D = +(TMPD1) on pairs lane size (4 to 8) + local tmp2 = TMPD1[0,32]; + local tmp4 = zext(tmp2); + local tmp3 = TMPD1[32,32]; + local tmp5 = zext(tmp3); + Rd_VPR64.1D = tmp4 + tmp5; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 +# CONSTRUCT x6ea02800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =#u+@4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@4 +# AUNIT --inst x6ea02800/mask=xfffffc00 --status pass --comment "ext" +# Vector variant when size = 10 , Q = 1 s=32 e1=4 e2=8 Ta=VPR128.2D Tb=VPR128.4S + +:uaddlp Rd_VPR128.2D, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001010 & Rd_VPR128.2D & Rn_VPR128.4S & Zd +{ + TMPQ1 = Rn_VPR128.4S; + # sipd infix Rd_VPR128.2D = +(TMPQ1) on pairs lane size (4 to 8) + local tmp2 = TMPQ1[0,32]; + local tmp4 = zext(tmp2); + local tmp3 = TMPQ1[32,32]; + local tmp5 = zext(tmp3); + Rd_VPR128.2D[0,64] = tmp4 + tmp5; + tmp2 = TMPQ1[64,32]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[96,32]; + tmp5 = zext(tmp3); + Rd_VPR128.2D[64,64] = tmp4 + tmp5; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.350 UADDLV page C7-2191 line 123264 MATCH x2e303800/mask=xbf3ffc00 +# CONSTRUCT x6eb03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlv/1@4 +# AUNIT --inst x6eb03800/mask=xfffffc00 --status nopcodeop --comment "ext" + +:uaddlv Rd_FPR64, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.4S & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_uaddlv(Rn_VPR128.4S, 4:1); +} + +# C7.2.350 UADDLV page C7-2191 line 123264 MATCH x2e303800/mask=xbf3ffc00 +# CONSTRUCT x6e303800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlv/1@1 +# AUNIT --inst x6e303800/mask=xfffffc00 --status nopcodeop --comment "ext" + +:uaddlv Rd_FPR16, Rn_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.16B & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_uaddlv(Rn_VPR128.16B, 1:1); +} + +# C7.2.350 UADDLV page C7-2191 line 123264 MATCH x2e303800/mask=xbf3ffc00 +# CONSTRUCT x2e303800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlv/1@1 +# AUNIT --inst x2e303800/mask=xfffffc00 --status nopcodeop --comment "ext" + +:uaddlv Rd_FPR16, Rn_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.8B & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_uaddlv(Rn_VPR64.8B, 1:1); +} + +# C7.2.350 UADDLV page C7-2191 line 123264 MATCH x2e303800/mask=xbf3ffc00 +# CONSTRUCT x2e703800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlv/1@2 +# AUNIT --inst x2e703800/mask=xfffffc00 --status nopcodeop --comment "ext" + +:uaddlv Rd_FPR32, Rn_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.4H & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_uaddlv(Rn_VPR64.4H, 2:1); +} + +# C7.2.350 UADDLV page C7-2191 line 123264 MATCH x2e303800/mask=xbf3ffc00 +# CONSTRUCT x6e703800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlv/1@2 +# AUNIT --inst x6e703800/mask=xfffffc00 --status nopcodeop --comment "ext" + +:uaddlv Rd_FPR32, Rn_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.8H & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_uaddlv(Rn_VPR128.8H, 2:1); +} + +# C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 +# CONSTRUCT x6ea01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $zext@4:16 =$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw2/2@4 +# AUNIT --inst x6ea01000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPD1 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 +# CONSTRUCT x6e601000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $zext@2:16 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw2/2@2 +# AUNIT --inst x6e601000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPD1 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 +# CONSTRUCT x6e201000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $zext@1:16 =$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw2/2@1 +# AUNIT --inst x6e201000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPD1 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ2 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 +# CONSTRUCT x2ea01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $zext@4:16 =$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw/2@4 +# AUNIT --inst x2ea01000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rm_VPR64.2S[32,32]); + # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 +# CONSTRUCT x2e601000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $zext@2:16 =$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw/2@2 +# AUNIT --inst x2e601000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = zext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rm_VPR64.4H[48,16]); + # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 +# CONSTRUCT x2e201000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $zext@1:16 =$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw/2@1 +# AUNIT --inst x2e201000/mask=xffe0fc00 --status pass --comment "ext" + +:uaddw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = zext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x7f00e400/mask=xff80fc00 +# CONSTRUCT x7f40e400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2 +# AUNIT --inst x7f40e400/mask=xffc0fc00 --status nopcodeop --comment "nofpround" + +:ucvtf Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_3031=1 & u=1 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_ucvtf(Rn_FPR64, Imm_shr_imm64:1); +} + +# C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x7f00e400/mask=xff80fc00 +# CONSTRUCT x7f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2 +# AUNIT --inst x7f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" + +:ucvtf Rd_FPR32, Rn_FPR32, Imm_shr_imm32 +is b_3031=1 & u=1 & b_2428=0x1f & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_ucvtf(Rn_FPR32, Imm_shr_imm32:1); +} + +# C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x7f00e400/mask=xff80fc00 +# CONSTRUCT x7f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2 +# AUNIT --inst x7f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" + +:ucvtf Rd_FPR16, Rn_FPR16, Imm_shr_imm16 +is b_3031=1 & u=1 & b_2428=0x1f & b_2023=1 & Imm_shr_imm16 & b_1115=0x1c & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_ucvtf(Rn_FPR16, Imm_shr_imm16:1); +} + +# C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 +# CONSTRUCT x6f40e400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2@8 +# AUNIT --inst x6f40e400/mask=xffc0fc00 --status nopcodeop --comment "nofpround" + +:ucvtf Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_ucvtf(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); +} + +# C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 +# CONSTRUCT x2f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2@4 +# AUNIT --inst x2f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" + +:ucvtf Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_ucvtf(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); +} + +# C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 +# CONSTRUCT x6f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2@4 +# AUNIT --inst x6f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" + +:ucvtf Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_ucvtf(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); +} + +# C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 +# CONSTRUCT x2f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2@2 +# AUNIT --inst x2f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" + +:ucvtf Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_ucvtf(Rn_VPR64.4H, Imm_shr_imm32:1, 2:1); +} + +# C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 +# CONSTRUCT x6f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2@2 +# AUNIT --inst x6f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" + +:ucvtf Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_ucvtf(Rn_VPR128.8H, Imm_shr_imm32:1, 2:1); +} + +# C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x7e21d800/mask=xffbffc00 +# CONSTRUCT x7e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 +# AUNIT --inst x7e21d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" + +:ucvtf Rd_FPR32, Rn_FPR32 +is b_3031=1 & u=1 & b_2428=0x1e & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_ucvtf(Rn_FPR32); +} + +# C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x7e21d800/mask=xffbffc00 +# CONSTRUCT x7e61d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 +# AUNIT --inst x7e61d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" + +:ucvtf Rd_FPR64, Rn_FPR64 +is b_3031=1 & u=1 & b_2428=0x1e & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_ucvtf(Rn_FPR64); +} + +# C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x2e21d800/mask=xbfbffc00 +# CONSTRUCT x2e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1@4 +# AUNIT --inst x2e21d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" + +:ucvtf Rd_VPR64.2S, Rn_VPR64.2S +is sf=0 & q=0 & b_2929=1 & b_2428=0x0e & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1d & b_1011=2 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_ucvtf(Rn_VPR64.2S, 4:1); +} + +# C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x2e21d800/mask=xbfbffc00 +# CONSTRUCT x6e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1@4 +# AUNIT --inst x6e21d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" + +:ucvtf Rd_VPR128.4S, Rn_VPR128.4S +is sf=0 & q=1 & b_2929=1 & b_2428=0x0e & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1d & b_1011=2 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_ucvtf(Rn_VPR128.4S, 4:1); +} + +# C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x2e21d800/mask=xbfbffc00 +# CONSTRUCT x6e61d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1@8 +# AUNIT --inst x6e61d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" + +:ucvtf Rd_VPR128.2D, Rn_VPR128.2D +is sf=0 & q=1 & b_2929=1 & b_2428=0x0e & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x1d & b_1011=2 & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_ucvtf(Rn_VPR128.2D, 8:1); +} + +# C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x7e79d800/mask=xfffffc00 +# CONSTRUCT x7e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 +# AUNIT --inst x7e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" +# Scalar half precision variant + +:ucvtf Rd_FPR16, Rn_FPR16 +is b_1031=0b0111111001111001110110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = NEON_ucvtf(Rn_FPR16); +} + +# C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x2e79d800/mask=xbffffc00 +# CONSTRUCT x2e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1@2 +# AUNIT --inst x2e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" +# Vector half precision variant when Q=0 T=VPR64.4H + +:ucvtf Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_1029=0b10111001111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_ucvtf(Rn_VPR64.4H, 2:1); +} + +# C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x2e79d800/mask=xbffffc00 +# CONSTRUCT x6e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1@2 +# AUNIT --inst x6e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" +# Vector half precision variant when Q=1 T=VPR128.8H + +:ucvtf Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_1029=0b10111001111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_ucvtf(Rn_VPR128.8H, 2:1); +} + +# C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 +# CONSTRUCT x1ec38000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:8 int2float:2 FBits16 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits16 =NEON_ucvtf/2 +# AUNIT --inst x1ec38000/mask=xffff8000 --status noqemu --comment "nofpround" +# if sf == '0' && scale<5> == '0' then UnallocatedEncoding(); + +:ucvtf Rd_FPR16, Rn_GPR32, FBitsOp +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=3 & b_15=1 & FBitsOp & FBits16 & Rn_GPR32 & Rd_FPR16 & Zd +{ + local tmp1:8 = zext(Rn_GPR32); + local tmp2:2 = int2float(tmp1); + Rd_FPR16 = tmp2 f/ FBits16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 +# CONSTRUCT x9ec30000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:9 int2float:2 FBits16 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits16 =NEON_ucvtf/2 +# AUNIT --inst x9ec30000/mask=xffff0000 --status noqemu --comment "nofpround" + +:ucvtf Rd_FPR16, Rn_GPR64, FBitsOp +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=3 & FBitsOp & FBits16 & Rn_GPR64 & Rd_FPR16 & Zd +{ + local tmp1:9 = zext(Rn_GPR64); + local tmp2:2 = int2float(tmp1); + Rd_FPR16 = tmp2 f/ FBits16; + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 +# CONSTRUCT x1e438000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:8 int2float:8 FBits64 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_ucvtf/2 +# AUNIT --inst x1e438000/mask=xffff8000 --status pass --comment "nofpround" +# if sf == '0' && scale<5> == '0' then UnallocatedEncoding(); + +:ucvtf Rd_FPR64, Rn_GPR32, FBitsOp +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=3 & b_15=1 & FBitsOp & FBits64 & Rn_GPR32 & Rd_FPR64 & Zd +{ + local tmp1:8 = zext(Rn_GPR32); + local tmp2:8 = int2float(tmp1); + Rd_FPR64 = tmp2 f/ FBits64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 +# CONSTRUCT x9e430000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:9 int2float:8 FBits64 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_ucvtf/2 +# AUNIT --inst x9e430000/mask=xffff0000 --status fail --comment "nofpround" +# The zext:9 naively force unsigned int before conversion + +:ucvtf Rd_FPR64, Rn_GPR64, FBitsOp +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=3 & FBitsOp & FBits64 & Rn_GPR64 & Rd_FPR64 & Zd +{ + local tmp1:9 = zext(Rn_GPR64); + local tmp2:8 = int2float(tmp1); + Rd_FPR64 = tmp2 f/ FBits64; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 +# CONSTRUCT x1e038000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:8 int2float:4 FBits32 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_ucvtf/2 +# AUNIT --inst x1e038000/mask=xffff8000 --status fail --comment "nofpround" +# if sf == '0' && scale<5> == '0' then UnallocatedEncoding(); + +:ucvtf Rd_FPR32, Rn_GPR32, FBitsOp +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=3 & b_15=1 & FBitsOp & FBits32 & Rn_GPR32 & Rd_FPR32 & Zd +{ + local tmp1:8 = zext(Rn_GPR32); + local tmp2:4 = int2float(tmp1); + Rd_FPR32 = tmp2 f/ FBits32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 +# CONSTRUCT x9e030000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:9 int2float:4 FBits32 =f/ +# SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_ucvtf/2 +# AUNIT --inst x9e030000/mask=xffff0000 --status fail --comment "nofpround" + +:ucvtf Rd_FPR32, Rn_GPR64, FBitsOp +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=3 & FBitsOp & FBits32 & Rn_GPR64 & Rd_FPR32 & Zd +{ + local tmp1:9 = zext(Rn_GPR64); + local tmp2:4 = int2float(tmp1); + Rd_FPR32 = tmp2 f/ FBits32; + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 +# CONSTRUCT x1ee30000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:8 =int2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 +# AUNIT --inst x1ee30000/mask=xfffffc00 --status noqemu --comment "nofpround" + +:ucvtf Rd_FPR16, Rn_GPR32 +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR32 & Rd_FPR16 & Zd +{ + local tmp1:8 = zext(Rn_GPR32); + Rd_FPR16 = int2float(tmp1); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 +# CONSTRUCT x9ee30000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:9 =int2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 +# AUNIT --inst x9ee30000/mask=xfffffc00 --status noqemu --comment "nofpround" + +:ucvtf Rd_FPR16, Rn_GPR64 +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR64 & Rd_FPR16 & Zd +{ + local tmp1:9 = zext(Rn_GPR64); + Rd_FPR16 = int2float(tmp1); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 +# CONSTRUCT x1e630000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:8 =int2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 +# AUNIT --inst x1e630000/mask=xfffffc00 --status pass --comment "nofpround" + +:ucvtf Rd_FPR64, Rn_GPR32 +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR32 & Rd_FPR64 & Zd +{ + local tmp1:8 = zext(Rn_GPR32); + Rd_FPR64 = int2float(tmp1); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 +# CONSTRUCT x9e630000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:9 =int2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 +# AUNIT --inst x9e630000/mask=xfffffc00 --status fail --comment "nofpround" + +:ucvtf Rd_FPR64, Rn_GPR64 +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR64 & Rd_FPR64 & Zd +{ + local tmp1:9 = zext(Rn_GPR64); + Rd_FPR64 = int2float(tmp1); + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 +# CONSTRUCT x1e230000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:8 =int2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 +# AUNIT --inst x1e230000/mask=xfffffc00 --status fail --comment "nofpround" + +:ucvtf Rd_FPR32, Rn_GPR32 +is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR32 & Rd_FPR32 & Zd +{ + local tmp1:8 = zext(Rn_GPR32); + Rd_FPR32 = int2float(tmp1); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 +# CONSTRUCT x9e230000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 zext:9 =int2float +# SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 +# AUNIT --inst x9e230000/mask=xfffffc00 --status fail --comment "nofpround" + +:ucvtf Rd_FPR32, Rn_GPR64 +is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR64 & Rd_FPR32 & Zd +{ + local tmp1:9 = zext(Rn_GPR64); + Rd_FPR32 = int2float(tmp1); + zext_zs(Zd); # zero upper 28 bytes of Zd +} + +# C7.2.356 UDOT (by element) page C7-2205 line 124065 MATCH x2f00e000/mask=xbf00f400 +# CONSTRUCT x2f80e000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 Re_VPR128.S.vIndex =NEON_udot/2@1 +# AUNIT --inst x2f80e000/mask=xffc0f400 --status noqemu +# Vector variant when Q=0 Ta=VPR64.2S Tb=VPR64.8B + +:udot Rd_VPR64.2S, Rn_VPR64.8B, Re_VPR128.B.vIndex +is b_31=0 & b_30=0 & b_2429=0b101111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR64.2S & Rn_VPR64.8B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd +{ + local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + Rd_VPR64.2S = NEON_udot(Rn_VPR64.8B, tmp1, 1:1); +} + +# C7.2.356 UDOT (by element) page C7-2205 line 124065 MATCH x2f00e000/mask=xbf00f400 +# CONSTRUCT x6f80e000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 Re_VPR128.S.vIndex =NEON_udot/2@1 +# AUNIT --inst x6f80e000/mask=xffc0f400 --status noqemu +# Vector variant when Q=1 Ta=VPR128.4S Tb=VPR128.16B + +:udot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.B.vIndex +is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR128.4S & Rn_VPR128.16B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd +{ + local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + Rd_VPR128.4S = NEON_udot(Rn_VPR128.16B, tmp1, 1:1); +} + +# C7.2.357 UDOT (vector) page C7-2207 line 124164 MATCH x2e009400/mask=xbf20fc00 +# CONSTRUCT x2e809400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_udot/2@1 +# AUNIT --inst x2e809400/mask=xffe0fc00 --status noqemu +# Three registers of the same type variant when Q=0 Ta=VPR64.2S Tb=VPR64.8B + +:udot Rd_VPR64.2S, Rn_VPR64.8B, Rm_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR64.2S & Rn_VPR64.8B & Rm_VPR64.8B & Zd +{ + Rd_VPR64.2S = NEON_udot(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.357 UDOT (vector) page C7-2207 line 124164 MATCH x2e009400/mask=xbf20fc00 +# CONSTRUCT x6e809400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_udot/2@1 +# AUNIT --inst x6e809400/mask=xffe0fc00 --status noqemu +# Three registers of the same type variant when Q=1 Ta=VPR128.4S Tb=VPR128.16B + +:udot Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR128.4S & Rn_VPR128.16B & Rm_VPR128.16B & Zd +{ + Rd_VPR128.4S = NEON_udot(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 +# CONSTRUCT x6e200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@1 +# AUNIT --inst x6e200400/mask=xffe0fc00 --status nopcodeop + +:uhadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uhadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 +# CONSTRUCT x2ea00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@4 +# AUNIT --inst x2ea00400/mask=xffe0fc00 --status nopcodeop + +:uhadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uhadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 +# CONSTRUCT x2e600400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@2 +# AUNIT --inst x2e600400/mask=xffe0fc00 --status nopcodeop + +:uhadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uhadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 +# CONSTRUCT x6ea00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@4 +# AUNIT --inst x6ea00400/mask=xffe0fc00 --status nopcodeop + +:uhadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uhadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 +# CONSTRUCT x2e200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@1 +# AUNIT --inst x2e200400/mask=xffe0fc00 --status nopcodeop + +:uhadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uhadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 +# CONSTRUCT x6e600400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@2 +# AUNIT --inst x6e600400/mask=xffe0fc00 --status nopcodeop + +:uhadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uhadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 +# CONSTRUCT x6e202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@1 +# AUNIT --inst x6e202400/mask=xffe0fc00 --status nopcodeop + +:uhsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uhsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 +# CONSTRUCT x2ea02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@4 +# AUNIT --inst x2ea02400/mask=xffe0fc00 --status nopcodeop + +:uhsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uhsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 +# CONSTRUCT x2e602400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@2 +# AUNIT --inst x2e602400/mask=xffe0fc00 --status nopcodeop + +:uhsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uhsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 +# CONSTRUCT x6ea02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@4 +# AUNIT --inst x6ea02400/mask=xffe0fc00 --status nopcodeop + +:uhsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uhsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 +# CONSTRUCT x2e202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@1 +# AUNIT --inst x2e202400/mask=xffe0fc00 --status nopcodeop + +:uhsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uhsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 +# CONSTRUCT x6e602400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@2 +# AUNIT --inst x6e602400/mask=xffe0fc00 --status nopcodeop + +:uhsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uhsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 +# CONSTRUCT x6e206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@1 +# AUNIT --inst x6e206400/mask=xffe0fc00 --status nopcodeop + +:umax Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xc & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_umax(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 +# CONSTRUCT x2ea06400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@4 +# AUNIT --inst x2ea06400/mask=xffe0fc00 --status nopcodeop + +:umax Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xc & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_umax(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 +# CONSTRUCT x2e606400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@2 +# AUNIT --inst x2e606400/mask=xffe0fc00 --status nopcodeop + +:umax Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xc & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_umax(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 +# CONSTRUCT x6ea06400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@4 +# AUNIT --inst x6ea06400/mask=xffe0fc00 --status nopcodeop + +:umax Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xc & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_umax(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 +# CONSTRUCT x2e206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@1 +# AUNIT --inst x2e206400/mask=xffe0fc00 --status nopcodeop + +:umax Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xc & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_umax(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 +# CONSTRUCT x6e606400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@2 +# AUNIT --inst x6e606400/mask=xffe0fc00 --status nopcodeop + +:umax Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xc & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_umax(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 +# CONSTRUCT x6e20a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@1 +# AUNIT --inst x6e20a400/mask=xffe0fc00 --status nopcodeop + +:umaxp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_umaxp(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 +# CONSTRUCT x2ea0a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@4 +# AUNIT --inst x2ea0a400/mask=xffe0fc00 --status nopcodeop + +:umaxp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_umaxp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 +# CONSTRUCT x2e60a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@2 +# AUNIT --inst x2e60a400/mask=xffe0fc00 --status nopcodeop + +:umaxp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_umaxp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 +# CONSTRUCT x6ea0a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@4 +# AUNIT --inst x6ea0a400/mask=xffe0fc00 --status nopcodeop + +:umaxp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_umaxp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 +# CONSTRUCT x2e20a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@1 +# AUNIT --inst x2e20a400/mask=xffe0fc00 --status nopcodeop + +:umaxp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_umaxp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 +# CONSTRUCT x6e60a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@2 +# AUNIT --inst x6e60a400/mask=xffe0fc00 --status nopcodeop + +:umaxp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_umaxp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.362 UMAXV page C7-2217 line 124662 MATCH x2e30a800/mask=xbf3ffc00 +# CONSTRUCT x6e30a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_umaxv/1@1 +# AUNIT --inst x6e30a800/mask=xfffffc00 --status nopcodeop + +:umaxv Rd_FPR8, Rn_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_umaxv(Rn_VPR128.16B, 1:1); +} + +# C7.2.362 UMAXV page C7-2217 line 124662 MATCH x2e30a800/mask=xbf3ffc00 +# CONSTRUCT x2e30a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_umaxv/1@1 +# AUNIT --inst x2e30a800/mask=xfffffc00 --status nopcodeop + +:umaxv Rd_FPR8, Rn_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_umaxv(Rn_VPR64.8B, 1:1); +} + +# C7.2.362 UMAXV page C7-2217 line 124662 MATCH x2e30a800/mask=xbf3ffc00 +# CONSTRUCT x2e70a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_umaxv/1@2 +# AUNIT --inst x2e70a800/mask=xfffffc00 --status nopcodeop + +:umaxv Rd_FPR16, Rn_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_umaxv(Rn_VPR64.4H, 2:1); +} + +# C7.2.362 UMAXV page C7-2217 line 124662 MATCH x2e30a800/mask=xbf3ffc00 +# CONSTRUCT x6e70a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_umaxv/1@2 +# AUNIT --inst x6e70a800/mask=xfffffc00 --status nopcodeop + +:umaxv Rd_FPR16, Rn_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_umaxv(Rn_VPR128.8H, 2:1); +} + +# C7.2.362 UMAXV page C7-2217 line 124662 MATCH x2e30a800/mask=xbf3ffc00 +# CONSTRUCT x6eb0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_umaxv/1@4 +# AUNIT --inst x6eb0a800/mask=xfffffc00 --status nopcodeop + +:umaxv Rd_FPR32, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_umaxv(Rn_VPR128.4S, 4:1); +} + +# C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 +# CONSTRUCT x6e206c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@1 +# AUNIT --inst x6e206c00/mask=xffe0fc00 --status nopcodeop + +:umin Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xd & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_umin(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 +# CONSTRUCT x2ea06c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@4 +# AUNIT --inst x2ea06c00/mask=xffe0fc00 --status nopcodeop + +:umin Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xd & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_umin(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 +# CONSTRUCT x2e606c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@2 +# AUNIT --inst x2e606c00/mask=xffe0fc00 --status nopcodeop + +:umin Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xd & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_umin(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 +# CONSTRUCT x6ea06c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@4 +# AUNIT --inst x6ea06c00/mask=xffe0fc00 --status nopcodeop + +:umin Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xd & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_umin(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 +# CONSTRUCT x2e206c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@1 +# AUNIT --inst x2e206c00/mask=xffe0fc00 --status nopcodeop + +:umin Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xd & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_umin(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 +# CONSTRUCT x6e606c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@2 +# AUNIT --inst x6e606c00/mask=xffe0fc00 --status nopcodeop + +:umin Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xd & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_umin(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 +# CONSTRUCT x6e20ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@1 +# AUNIT --inst x6e20ac00/mask=xffe0fc00 --status nopcodeop + +:uminp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x15 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uminp(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 +# CONSTRUCT x2ea0ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@4 +# AUNIT --inst x2ea0ac00/mask=xffe0fc00 --status nopcodeop + +:uminp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x15 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uminp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 +# CONSTRUCT x2e60ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@2 +# AUNIT --inst x2e60ac00/mask=xffe0fc00 --status nopcodeop + +:uminp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x15 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uminp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 +# CONSTRUCT x6ea0ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@4 +# AUNIT --inst x6ea0ac00/mask=xffe0fc00 --status nopcodeop + +:uminp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x15 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uminp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 +# CONSTRUCT x2e20ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@1 +# AUNIT --inst x2e20ac00/mask=xffe0fc00 --status nopcodeop + +:uminp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x15 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uminp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 +# CONSTRUCT x6e60ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@2 +# AUNIT --inst x6e60ac00/mask=xffe0fc00 --status nopcodeop + +:uminp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x15 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uminp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.365 UMINV page C7-2223 line 124965 MATCH x2e31a800/mask=xbf3ffc00 +# CONSTRUCT x6e31a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_uminv/1@1 +# AUNIT --inst x6e31a800/mask=xfffffc00 --status nopcodeop + +:uminv Rd_FPR8, Rn_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_uminv(Rn_VPR128.16B, 1:1); +} + +# C7.2.365 UMINV page C7-2223 line 124965 MATCH x2e31a800/mask=xbf3ffc00 +# CONSTRUCT x2e31a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_uminv/1@1 +# AUNIT --inst x2e31a800/mask=xfffffc00 --status nopcodeop + +:uminv Rd_FPR8, Rn_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_uminv(Rn_VPR64.8B, 1:1); +} + +# C7.2.365 UMINV page C7-2223 line 124965 MATCH x2e31a800/mask=xbf3ffc00 +# CONSTRUCT x2e71a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_uminv/1@2 +# AUNIT --inst x2e71a800/mask=xfffffc00 --status nopcodeop + +:uminv Rd_FPR16, Rn_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_uminv(Rn_VPR64.4H, 2:1); +} + +# C7.2.365 UMINV page C7-2223 line 124965 MATCH x2e31a800/mask=xbf3ffc00 +# CONSTRUCT x6e71a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_uminv/1@2 +# AUNIT --inst x6e71a800/mask=xfffffc00 --status nopcodeop + +:uminv Rd_FPR16, Rn_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_uminv(Rn_VPR128.8H, 2:1); +} + +# C7.2.365 UMINV page C7-2223 line 124965 MATCH x2e31a800/mask=xbf3ffc00 +# CONSTRUCT x6eb1a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_uminv/1@4 +# AUNIT --inst x6eb1a800/mask=xfffffc00 --status nopcodeop + +:uminv Rd_FPR32, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_uminv(Rn_VPR128.4S, 4:1); +} + +# C7.2.366 UMLAL, UMLAL2 (by element) page C7-2225 line 125066 MATCH x2f002000/mask=xbf00f400 +# CONSTRUCT x2f802000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:16 ARG3 zext:8 $* &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@4 +# AUNIT --inst x2f802000/mask=xffc0f400 --status pass --comment "ext" + +:umlal Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = zext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.366 UMLAL, UMLAL2 (by element) page C7-2225 line 125066 MATCH x2f002000/mask=xbf00f400 +# CONSTRUCT x6f802000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3 zext:8 $* &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@4 +# AUNIT --inst x6f802000/mask=xffc0f400 --status pass --comment "ext" + +:umlal2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp3:4 = Re_VPR128.S.vIndex; + local tmp4:8 = zext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.366 UMLAL, UMLAL2 (by element) page C7-2225 line 125066 MATCH x2f002000/mask=xbf00f400 +# CONSTRUCT x2f402000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:16 ARG3 zext:4 $* &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@2 +# AUNIT --inst x2f402000/mask=xffc0f400 --status pass --comment "ext" + +:umlal Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = zext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.366 UMLAL, UMLAL2 (by element) page C7-2225 line 125066 MATCH x2f002000/mask=xbf00f400 +# CONSTRUCT x6f402000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3 zext:4 $* &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal2/3@2 +# AUNIT --inst x6f402000/mask=xffc0f400 --status pass --comment "ext" + +:umlal2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp4:4 = zext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 +# CONSTRUCT x6ea08000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 $*@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal2/3@4 +# AUNIT --inst x6ea08000/mask=xffe0fc00 --status pass --comment "ext" + +:umlal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x8 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ5 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ5[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ5[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 +# CONSTRUCT x6e608000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 $*@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal2/3@2 +# AUNIT --inst x6e608000/mask=xffe0fc00 --status pass --comment "ext" + +:umlal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x8 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ5 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ5[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ5[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ5[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ5[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 +# CONSTRUCT x6e208000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 $*@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal2/3@1 +# AUNIT --inst x6e208000/mask=xffe0fc00 --status pass --comment "ext" + +:umlal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x8 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 + TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ5 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ5[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ5[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ5[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ5[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ5[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ5[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ5[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ5[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 +# CONSTRUCT x2ea08000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 $*@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@4 +# AUNIT --inst x2ea08000/mask=xffe0fc00 --status pass --comment "ext" + +:umlal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x8 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 +# CONSTRUCT x2e608000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 $*@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@2 +# AUNIT --inst x2e608000/mask=xffe0fc00 --status pass --comment "ext" + +:umlal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x8 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 +# CONSTRUCT x2e208000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 $*@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@1 +# AUNIT --inst x2e208000/mask=xffe0fc00 --status pass --comment "ext" + +:umlal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x8 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 + TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ3 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ3[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ3[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ3[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ3[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ3[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ3[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ3[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ3[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.368 UMLSL, UMLSL2 (by element) page C7-2230 line 125350 MATCH x2f006000/mask=xbf00f400 +# CONSTRUCT x2f806000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:16 ARG3 zext:8 $*@8 &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl/3@4 +# AUNIT --inst x2f806000/mask=xffc0f400 --status pass --comment "ext" + +:umlsl Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = zext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.368 UMLSL, UMLSL2 (by element) page C7-2230 line 125350 MATCH x2f006000/mask=xbf00f400 +# CONSTRUCT x6f806000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3 zext:8 $*@8 &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl2/3@4 +# AUNIT --inst x6f806000/mask=xffc0f400 --status pass --comment "ext" + +:umlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp3:4 = Re_VPR128.S.vIndex; + local tmp4:8 = zext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.368 UMLSL, UMLSL2 (by element) page C7-2230 line 125350 MATCH x2f006000/mask=xbf00f400 +# CONSTRUCT x2f406000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:16 ARG3 zext:4 $*@4 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl/3@2 +# AUNIT --inst x2f406000/mask=xffc0f400 --status pass --comment "ext" + +:umlsl Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = zext(tmp2); + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.368 UMLSL, UMLSL2 (by element) page C7-2230 line 125350 MATCH x2f006000/mask=xbf00f400 +# CONSTRUCT x6f406000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3 zext:4 $*@4 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl2/3@2 +# AUNIT --inst x6f406000/mask=xffc0f400 --status pass --comment "ext" + +:umlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp4:4 = zext(tmp3); + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 +# CONSTRUCT x6ea0a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 $*@8 &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl2/3@4 +# AUNIT --inst x6ea0a000/mask=xffe0fc00 --status pass --comment "ext" + +:umlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xa & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ5 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ5[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ5[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 +# CONSTRUCT x6e60a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 $*@4 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl2/3@2 +# AUNIT --inst x6e60a000/mask=xffe0fc00 --status pass --comment "ext" + +:umlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xa & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ5 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ5[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ5[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ5[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ5[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 +# CONSTRUCT x6e20a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 $*@2 &=$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl2/3@1 +# AUNIT --inst x6e20a000/mask=xffe0fc00 --status pass --comment "ext" + +:umlsl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xa & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); + # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 + TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ5 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ5[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ5[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ5[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ5[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ5[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ5[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ5[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ5[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 +# CONSTRUCT x2ea0a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 $*@8 &=$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl/3@4 +# AUNIT --inst x2ea0a000/mask=xffe0fc00 --status pass --comment "ext" + +:umlsl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xa & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 +# CONSTRUCT x2e60a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 $*@4 &=$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl/3@2 +# AUNIT --inst x2e60a000/mask=xffe0fc00 --status pass --comment "ext" + +:umlsl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xa & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Rd_VPR128 & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 +# CONSTRUCT x2e20a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 $*@2 &=$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl/3@1 +# AUNIT --inst x2e20a000/mask=xffe0fc00 --status pass --comment "ext" + +:umlsl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xa & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Rd_VPR128 & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); + # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 + TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ3 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ3[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ3[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ3[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ3[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ3[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ3[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ3[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ3[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.371 UMOV page C7-2236 line 125692 MATCH x0e003c00/mask=xbfe0fc00 +# CONSTRUCT x0e013c00/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =zext:4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_umov/1 +# AUNIT --inst x0e013c00/mask=xffe1fc00 --status pass + +:umov Rd_GPR32, Rn_VPR128.B.imm_neon_uimm4 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 +{ + # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 + local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; + Rd_GPR32 = zext(tmp1); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.371 UMOV page C7-2236 line 125692 MATCH x0e003c00/mask=xbfe0fc00 +# CONSTRUCT x0e023c00/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =zext:4 +# SMACRO(pseudo) ARG1 ARG2 =NEON_umov/1 +# AUNIT --inst x0e023c00/mask=xffe3fc00 --status pass + +:umov Rd_GPR32, Rn_VPR128.H.imm_neon_uimm3 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 & Rd_VPR128 +{ + # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 + local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; + Rd_GPR32 = zext(tmp1); + zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 +} + +# C7.2.372 UMULL, UMULL2 (by element) page C7-2238 line 125820 MATCH x2f00a000/mask=xbf00f400 +# CONSTRUCT x6f80a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3 zext:8 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull2/2@4 +# AUNIT --inst x6f80a000/mask=xffc0f400 --status pass --comment "ext" + +:umull2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp3:4 = Re_VPR128.S.vIndex; + local tmp4:8 = zext(tmp3); + # simd infix Rd_VPR128.2D = TMPQ2 * tmp4 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] * tmp4; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] * tmp4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.372 UMULL, UMULL2 (by element) page C7-2238 line 125820 MATCH x2f00a000/mask=xbf00f400 +# CONSTRUCT x6f40a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3 zext:4 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull2/2@2 +# AUNIT --inst x6f40a000/mask=xffc0f400 --status pass --comment "ext" + +:umull2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp4:4 = zext(tmp3); + # simd infix Rd_VPR128.4S = TMPQ2 * tmp4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] * tmp4; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] * tmp4; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] * tmp4; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] * tmp4; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.372 UMULL, UMULL2 (by element) page C7-2238 line 125820 MATCH x2f00a000/mask=xbf00f400 +# CONSTRUCT x2f80a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:16 ARG3 zext:8 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull/2@4 +# AUNIT --inst x2f80a000/mask=xffc0f400 --status pass --comment "ext" + +:umull Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); + # simd element Re_VPR128.S[vIndex] lane size 4 + local tmp2:4 = Re_VPR128.S.vIndex; + local tmp3:8 = zext(tmp2); + # simd infix Rd_VPR128.2D = TMPQ1 * tmp3 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] * tmp3; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] * tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.372 UMULL, UMULL2 (by element) page C7-2238 line 125820 MATCH x2f00a000/mask=xbf00f400 +# CONSTRUCT x2f40a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:16 ARG3 zext:4 =$* +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull/2@2 +# AUNIT --inst x2f40a000/mask=xffc0f400 --status pass --comment "ext" + +:umull Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); + # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + local tmp3:4 = zext(tmp2); + # simd infix Rd_VPR128.4S = TMPQ1 * tmp3 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] * tmp3; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] * tmp3; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] * tmp3; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] * tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 +# CONSTRUCT x6ea0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 =$*@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull2/2@4 +# AUNIT --inst x6ea0c000/mask=xffe0fc00 --status pass --comment "ext" + +:umull2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xc & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); + # simd infix Rd_VPR128.2D = TMPQ2 * TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 +# CONSTRUCT x6e60c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 =$*@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull2/2@2 +# AUNIT --inst x6e60c000/mask=xffe0fc00 --status pass --comment "ext" + +:umull2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xc & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); + # simd infix Rd_VPR128.4S = TMPQ2 * TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 +# CONSTRUCT x6e20c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 =$*@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull2/2@1 +# AUNIT --inst x6e20c000/mask=xffe0fc00 --status pass --comment "ext" + +:umull2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xc & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); + # simd infix Rd_VPR128.8H = TMPQ2 * TMPQ4 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 +# CONSTRUCT x2ea0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull/2@4 +# AUNIT --inst x2ea0c000/mask=xffe0fc00 --status nopcodeop --comment "ext" + +:umull Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xc & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_umull(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 +# CONSTRUCT x2e60c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull/2@2 +# AUNIT --inst x2e60c000/mask=xffe0fc00 --status nopcodeop --comment "ext" + +:umull Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xc & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_umull(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 +# CONSTRUCT x2e20c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull/2@1 +# AUNIT --inst x2e20c000/mask=xffe0fc00 --status nopcodeop --comment "ext" + +:umull Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xc & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_umull(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x7e200c00/mask=xff20fc00 +# CONSTRUCT x7e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2 +# AUNIT --inst x7e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_FPR8, Rn_FPR8, Rm_FPR8 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x1 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_uqadd(Rn_FPR8, Rm_FPR8); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x7e200c00/mask=xff20fc00 +# CONSTRUCT x7ee00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2 +# AUNIT --inst x7ee00c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x1 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_uqadd(Rn_FPR64, Rm_FPR64); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x7e200c00/mask=xff20fc00 +# CONSTRUCT x7e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2 +# AUNIT --inst x7e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x1 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_uqadd(Rn_FPR16, Rm_FPR16); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x7e200c00/mask=xff20fc00 +# CONSTRUCT x7ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2 +# AUNIT --inst x7ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x1 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_uqadd(Rn_FPR32, Rm_FPR32); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 +# CONSTRUCT x6e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@1 +# AUNIT --inst x6e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x1 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uqadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 +# CONSTRUCT x6ee00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@8 +# AUNIT --inst x6ee00c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x1 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_uqadd(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 +# CONSTRUCT x2ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@4 +# AUNIT --inst x2ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x1 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uqadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 +# CONSTRUCT x2e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@2 +# AUNIT --inst x2e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x1 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uqadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 +# CONSTRUCT x6ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@4 +# AUNIT --inst x6ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x1 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uqadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 +# CONSTRUCT x2e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@1 +# AUNIT --inst x2e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x1 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uqadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 +# CONSTRUCT x6e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@2 +# AUNIT --inst x6e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x1 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uqadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x7e205c00/mask=xff20fc00 +# CONSTRUCT x7e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2 +# AUNIT --inst x7e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_FPR8, Rn_FPR8, Rm_FPR8 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0xb & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_uqrshl(Rn_FPR8, Rm_FPR8); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x7e205c00/mask=xff20fc00 +# CONSTRUCT x7ee05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2 +# AUNIT --inst x7ee05c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xb & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_uqrshl(Rn_FPR64, Rm_FPR64); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x7e205c00/mask=xff20fc00 +# CONSTRUCT x7e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2 +# AUNIT --inst x7e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0xb & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_uqrshl(Rn_FPR16, Rm_FPR16); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x7e205c00/mask=xff20fc00 +# CONSTRUCT x7ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2 +# AUNIT --inst x7ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0xb & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_uqrshl(Rn_FPR32, Rm_FPR32); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 +# CONSTRUCT x6e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@1 +# AUNIT --inst x6e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xb & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uqrshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 +# CONSTRUCT x6ee05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@8 +# AUNIT --inst x6ee05c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xb & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_uqrshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 +# CONSTRUCT x2ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@4 +# AUNIT --inst x2ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xb & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uqrshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 +# CONSTRUCT x2e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@2 +# AUNIT --inst x2e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xb & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uqrshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 +# CONSTRUCT x6ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@4 +# AUNIT --inst x6ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xb & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uqrshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 +# CONSTRUCT x2e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@1 +# AUNIT --inst x2e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xb & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uqrshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 +# CONSTRUCT x6e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@2 +# AUNIT --inst x6e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqrshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xb & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uqrshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 +# CONSTRUCT x6f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn2/2@2 +# AUNIT --inst x6f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" + +:uqrshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uqrshrn2(Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 +# CONSTRUCT x2f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2@8 +# AUNIT --inst x2f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqrshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uqrshrn(Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 +# CONSTRUCT x2f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2@4 +# AUNIT --inst x2f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqrshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uqrshrn(Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 +# CONSTRUCT x6f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2@8 +# AUNIT --inst x6f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqrshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uqrshrn(Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 +# CONSTRUCT x2f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2@2 +# AUNIT --inst x2f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" + +:uqrshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uqrshrn(Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 +# CONSTRUCT x6f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn2/2@4 +# AUNIT --inst x6f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" + +:uqrshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uqrshrn2(Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x7f009c00/mask=xff80fc00 +# CONSTRUCT x7f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2 +# AUNIT --inst x7f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" +# Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 + +:uqrshrn Rd_FPR8, Rn_FPR16, Imm_shr_imm8 +is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100111 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd +{ + Rd_FPR8 = NEON_uqrshrn(Rn_FPR16, Imm_shr_imm8:1); +} + +# C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x7f009c00/mask=xff80fc00 +# CONSTRUCT x7f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2 +# AUNIT --inst x7f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" +# Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 + +:uqrshrn Rd_FPR16, Rn_FPR32, Imm_shr_imm16 +is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100111 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd +{ + Rd_FPR16 = NEON_uqrshrn(Rn_FPR32, Imm_shr_imm16:1); +} + +# C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x7f009c00/mask=xff80fc00 +# CONSTRUCT x7f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2 +# AUNIT --inst x7f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" +# Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 + +:uqrshrn Rd_FPR32, Rn_FPR64, Imm_shr_imm32 +is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100111 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd +{ + Rd_FPR32 = NEON_uqrshrn(Rn_FPR64, Imm_shr_imm32:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 +# CONSTRUCT x6f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@1 +# AUNIT --inst x6f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uqshl(Rn_VPR128.16B, Imm_uimm3:1, 1:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 +# CONSTRUCT x6f407400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@8 +# AUNIT --inst x6f407400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xe & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_uqshl(Rn_VPR128.2D, Imm_imm0_63:1, 8:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 +# CONSTRUCT x2f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@4 +# AUNIT --inst x2f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uqshl(Rn_VPR64.2S, Imm_uimm5:1, 4:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 +# CONSTRUCT x2f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@2 +# AUNIT --inst x2f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uqshl(Rn_VPR64.4H, Imm_uimm4:1, 2:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 +# CONSTRUCT x6f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@4 +# AUNIT --inst x6f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uqshl(Rn_VPR128.4S, Imm_uimm5:1, 4:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 +# CONSTRUCT x2f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@1 +# AUNIT --inst x2f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uqshl(Rn_VPR64.8B, Imm_uimm3:1, 1:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 +# CONSTRUCT x6f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@2 +# AUNIT --inst x6f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uqshl(Rn_VPR128.8H, Imm_uimm4:1, 2:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x7f007400/mask=xff80fc00 +# CONSTRUCT x7f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2 +# AUNIT --inst x7f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=0001 V=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 + +:uqshl Rd_FPR8, Rn_FPR8, Imm_shr_imm8 +is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b011101 & Rd_FPR8 & Rn_FPR8 & Imm_shr_imm8 & Zd +{ + Rd_FPR8 = NEON_uqshl(Rn_FPR8, Imm_shr_imm8:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x7f007400/mask=xff80fc00 +# CONSTRUCT x7f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2 +# AUNIT --inst x7f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=001x V=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 + +:uqshl Rd_FPR16, Rn_FPR16, Imm_shr_imm16 +is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b011101 & Rd_FPR16 & Rn_FPR16 & Imm_shr_imm16 & Zd +{ + Rd_FPR16 = NEON_uqshl(Rn_FPR16, Imm_shr_imm16:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x7f007400/mask=xff80fc00 +# CONSTRUCT x7f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2 +# AUNIT --inst x7f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=01xx V=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 + +:uqshl Rd_FPR32, Rn_FPR32, Imm_shr_imm32 +is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b011101 & Rd_FPR32 & Rn_FPR32 & Imm_shr_imm32 & Zd +{ + Rd_FPR32 = NEON_uqshl(Rn_FPR32, Imm_shr_imm32:1); +} + +# C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x7f007400/mask=xff80fc00 +# CONSTRUCT x7f407400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2 +# AUNIT --inst x7f407400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=1xxx V=FPR64 imm=Imm_shr_imm64 bb=b_22 aa=1 + +:uqshl Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b011111110 & b_22=1 & b_1015=0b011101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd +{ + Rd_FPR64 = NEON_uqshl(Rn_FPR64, Imm_shr_imm64:1); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x7e204c00/mask=xff20fc00 +# CONSTRUCT x7e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2 +# AUNIT --inst x7e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_FPR8, Rn_FPR8, Rm_FPR8 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x9 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_uqshl(Rn_FPR8, Rm_FPR8); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x7e204c00/mask=xff20fc00 +# CONSTRUCT x7ee04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2 +# AUNIT --inst x7ee04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x9 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_uqshl(Rn_FPR64, Rm_FPR64); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x7e204c00/mask=xff20fc00 +# CONSTRUCT x7e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2 +# AUNIT --inst x7e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x9 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_uqshl(Rn_FPR16, Rm_FPR16); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x7e204c00/mask=xff20fc00 +# CONSTRUCT x7ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2 +# AUNIT --inst x7ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x9 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_uqshl(Rn_FPR32, Rm_FPR32); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 +# CONSTRUCT x6e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@1 +# AUNIT --inst x6e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x9 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uqshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 +# CONSTRUCT x6ee04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@8 +# AUNIT --inst x6ee04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x9 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_uqshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 +# CONSTRUCT x2ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@4 +# AUNIT --inst x2ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x9 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uqshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 +# CONSTRUCT x2e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@2 +# AUNIT --inst x2e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x9 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uqshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 +# CONSTRUCT x6ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@4 +# AUNIT --inst x6ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x9 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uqshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 +# CONSTRUCT x2e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@1 +# AUNIT --inst x2e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x9 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uqshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 +# CONSTRUCT x6e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@2 +# AUNIT --inst x6e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x9 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uqshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 +# CONSTRUCT x6f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn2/3@2 +# AUNIT --inst x6f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:uqshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uqshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 +# CONSTRUCT x2f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3@8 +# AUNIT --inst x2f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uqshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 +# CONSTRUCT x2f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3@4 +# AUNIT --inst x2f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:uqshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uqshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 +# CONSTRUCT x6f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn2/3@8 +# AUNIT --inst x6f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uqshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); +} + +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 +# CONSTRUCT x2f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3@2 +# AUNIT --inst x2f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" + +:uqshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uqshrn(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); +} + +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 +# CONSTRUCT x6f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn2/3@4 +# AUNIT --inst x6f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" + +:uqshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uqshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); +} + +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x7f009400/mask=xff80fc00 +# CONSTRUCT x7f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3 +# AUNIT --inst x7f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 + +:uqshrn Rd_FPR8, Rn_FPR16, Imm_shr_imm8 +is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100101 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd +{ + Rd_FPR8 = NEON_uqshrn(Rd_FPR8, Rn_FPR16, Imm_shr_imm8:1); +} + +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x7f009400/mask=xff80fc00 +# CONSTRUCT x7f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3 +# AUNIT --inst x7f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 + +:uqshrn Rd_FPR16, Rn_FPR32, Imm_shr_imm16 +is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100101 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd +{ + Rd_FPR16 = NEON_uqshrn(Rd_FPR16, Rn_FPR32, Imm_shr_imm16:1); +} + +# C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x7f009400/mask=xff80fc00 +# CONSTRUCT x7f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3 +# AUNIT --inst x7f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" +# Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 + +:uqshrn Rd_FPR32, Rn_FPR64, Imm_shr_imm32 +is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100101 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd +{ + Rd_FPR32 = NEON_uqshrn(Rd_FPR32, Rn_FPR64, Imm_shr_imm32:1); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x7e202c00/mask=xff20fc00 +# CONSTRUCT x7e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2 +# AUNIT --inst x7e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_FPR8, Rn_FPR8, Rm_FPR8 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x5 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd +{ + Rd_FPR8 = NEON_uqsub(Rn_FPR8, Rm_FPR8); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x7e202c00/mask=xff20fc00 +# CONSTRUCT x7ee02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2 +# AUNIT --inst x7ee02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x5 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_uqsub(Rn_FPR64, Rm_FPR64); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x7e202c00/mask=xff20fc00 +# CONSTRUCT x7e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2 +# AUNIT --inst x7e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_FPR16, Rn_FPR16, Rm_FPR16 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x5 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd +{ + Rd_FPR16 = NEON_uqsub(Rn_FPR16, Rm_FPR16); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x7e202c00/mask=xff20fc00 +# CONSTRUCT x7ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2 +# AUNIT --inst x7ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_FPR32, Rn_FPR32, Rm_FPR32 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x5 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd +{ + Rd_FPR32 = NEON_uqsub(Rn_FPR32, Rm_FPR32); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 +# CONSTRUCT x6e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@1 +# AUNIT --inst x6e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x5 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_uqsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 +# CONSTRUCT x6ee02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@8 +# AUNIT --inst x6ee02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x5 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_uqsub(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 +# CONSTRUCT x2ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@4 +# AUNIT --inst x2ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x5 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_uqsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 +# CONSTRUCT x2e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@2 +# AUNIT --inst x2e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x5 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_uqsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 +# CONSTRUCT x6ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@4 +# AUNIT --inst x6ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x5 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_uqsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 +# CONSTRUCT x2e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@1 +# AUNIT --inst x2e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x5 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_uqsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 +# CONSTRUCT x6e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@2 +# AUNIT --inst x6e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" + +:uqsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x5 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_uqsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x7e214800/mask=xff3ffc00 +# CONSTRUCT x7e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2 +# AUNIT --inst x7e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Scalar variant when size=00 Q=1 bb=1 mnemonic=uqxtn Ta=FPR16 Tb=FPR8 + +:uqxtn Rd_FPR8, Rn_FPR16 +is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_FPR8 & Rn_FPR16 & Zd +{ + Rd_FPR8 = NEON_uqxtn(Rd_FPR8, Rn_FPR16); +} + +# C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x7e214800/mask=xff3ffc00 +# CONSTRUCT x7e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2 +# AUNIT --inst x7e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Scalar variant when size=01 Q=1 bb=1 mnemonic=uqxtn Ta=FPR32 Tb=FPR16 + +:uqxtn Rd_FPR16, Rn_FPR32 +is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_FPR16 & Rn_FPR32 & Zd +{ + Rd_FPR16 = NEON_uqxtn(Rd_FPR16, Rn_FPR32); +} + +# C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x7e214800/mask=xff3ffc00 +# CONSTRUCT x7ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2 +# AUNIT --inst x7ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Scalar variant when size=10 Q=1 bb=1 mnemonic=uqxtn Ta=FPR64 Tb=FPR32 + +:uqxtn Rd_FPR32, Rn_FPR64 +is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_FPR32 & Rn_FPR64 & Zd +{ + Rd_FPR32 = NEON_uqxtn(Rd_FPR32, Rn_FPR64); +} + +# C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 +# CONSTRUCT x2e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2@2 +# AUNIT --inst x2e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=00 Q=0 bb=0 mnemonic=uqxtn e=2 Ta=VPR128.8H Tb=VPR64.8B + +:uqxtn Rd_VPR64.8B, Rn_VPR128.8H +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_VPR64.8B & Rn_VPR128.8H & Zd +{ + Rd_VPR64.8B = NEON_uqxtn(Rd_VPR64.8B, Rn_VPR128.8H, 2:1); +} + +# C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 +# CONSTRUCT x6e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn2/2@2 +# AUNIT --inst x6e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=00 Q=1 bb=0 mnemonic=uqxtn2 e=2 Ta=VPR128.8H Tb=VPR128.16B + +:uqxtn2 Rd_VPR128.16B, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_VPR128.16B & Rn_VPR128.8H & Zd +{ + Rd_VPR128.16B = NEON_uqxtn2(Rd_VPR128.16B, Rn_VPR128.8H, 2:1); +} + +# C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 +# CONSTRUCT x2e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2@4 +# AUNIT --inst x2e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=01 Q=0 bb=0 mnemonic=uqxtn e=4 Ta=VPR128.4S Tb=VPR64.4H + +:uqxtn Rd_VPR64.4H, Rn_VPR128.4S +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_VPR64.4H & Rn_VPR128.4S & Zd +{ + Rd_VPR64.4H = NEON_uqxtn(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); +} + +# C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 +# CONSTRUCT x6e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn2/2@4 +# AUNIT --inst x6e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=01 Q=1 bb=0 mnemonic=uqxtn2 e=4 Ta=VPR128.4S Tb=VPR128.8H + +:uqxtn2 Rd_VPR128.8H, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_VPR128.8H & Rn_VPR128.4S & Zd +{ + Rd_VPR128.8H = NEON_uqxtn2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); +} + +# C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 +# CONSTRUCT x2ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2@8 +# AUNIT --inst x2ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=10 Q=0 bb=0 mnemonic=uqxtn e=8 Ta=VPR128.2D Tb=VPR64.2S + +:uqxtn Rd_VPR64.2S, Rn_VPR128.2D +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_VPR64.2S & Rn_VPR128.2D & Zd +{ + Rd_VPR64.2S = NEON_uqxtn(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); +} + +# C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 +# CONSTRUCT x6ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn2/2@8 +# AUNIT --inst x6ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=10 Q=1 bb=0 mnemonic=uqxtn2 e=8 Ta=VPR128.2D Tb=VPR128.4S + +:uqxtn2 Rd_VPR128.4S, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_VPR128.4S & Rn_VPR128.2D & Zd +{ + Rd_VPR128.4S = NEON_uqxtn2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); +} + +# C7.2.382 URECPE page C7-2263 line 127300 MATCH x0ea1c800/mask=xbfbffc00 +# CONSTRUCT x0ea1c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_urecpe/1@4 +# AUNIT --inst x0ea1c800/mask=xfffffc00 --status nopcodeop +# Vector variant when Q=0 T=VPR64.2S + +:urecpe Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_urecpe(Rn_VPR64.2S, 4:1); +} + +# C7.2.382 URECPE page C7-2263 line 127300 MATCH x0ea1c800/mask=xbfbffc00 +# CONSTRUCT x4ea1c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_urecpe/1@4 +# AUNIT --inst x4ea1c800/mask=xfffffc00 --status nopcodeop +# Vector variant when Q=1 T=VPR128.4S + +:urecpe Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_urecpe(Rn_VPR128.4S, 4:1); +} + +# C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 +# CONSTRUCT x6e201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@1 +# AUNIT --inst x6e201400/mask=xffe0fc00 --status nopcodeop + +:urhadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_urhadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 +# CONSTRUCT x2ea01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@2 +# AUNIT --inst x2ea01400/mask=xffe0fc00 --status nopcodeop + +:urhadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_urhadd(Rn_VPR64.2S, Rm_VPR64.2S, 2:1); +} + +# C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 +# CONSTRUCT x2e601400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@2 +# AUNIT --inst x2e601400/mask=xffe0fc00 --status nopcodeop + +:urhadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_urhadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 +# CONSTRUCT x6ea01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@4 +# AUNIT --inst x6ea01400/mask=xffe0fc00 --status nopcodeop + +:urhadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_urhadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 +# CONSTRUCT x2e201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@1 +# AUNIT --inst x2e201400/mask=xffe0fc00 --status nopcodeop + +:urhadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_urhadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 +# CONSTRUCT x6e601400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@2 +# AUNIT --inst x6e601400/mask=xffe0fc00 --status nopcodeop + +:urhadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_urhadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.384 URSHL page C7-2266 line 127452 MATCH x7e205400/mask=xff20fc00 +# CONSTRUCT x7ee05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2 +# AUNIT --inst x7ee05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:urshl Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xa & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_urshl(Rn_FPR64, Rm_FPR64); +} + +# C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 +# CONSTRUCT x6e205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@1 +# AUNIT --inst x6e205400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:urshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_urshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 +# CONSTRUCT x6ee05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@8 +# AUNIT --inst x6ee05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:urshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_urshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 +# CONSTRUCT x2ea05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@4 +# AUNIT --inst x2ea05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:urshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_urshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 +# CONSTRUCT x2e605400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@2 +# AUNIT --inst x2e605400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:urshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_urshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 +# CONSTRUCT x6ea05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@4 +# AUNIT --inst x6ea05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:urshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_urshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 +# CONSTRUCT x2e205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@1 +# AUNIT --inst x2e205400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:urshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_urshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 +# CONSTRUCT x6e605400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@2 +# AUNIT --inst x6e605400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:urshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_urshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 +# CONSTRUCT x6f082400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@1 +# AUNIT --inst x6f082400/mask=xfff8fc00 --status nopcodeop --comment "nointround" + +:urshr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_urshr(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); +} + +# C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 +# CONSTRUCT x6f402400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@8 +# AUNIT --inst x6f402400/mask=xffc0fc00 --status nopcodeop --comment "nointround" + +:urshr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x4 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_urshr(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); +} + +# C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 +# CONSTRUCT x2f202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@4 +# AUNIT --inst x2f202400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:urshr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_urshr(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); +} + +# C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 +# CONSTRUCT x2f102400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@2 +# AUNIT --inst x2f102400/mask=xfff0fc00 --status nopcodeop --comment "nointround" + +:urshr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_urshr(Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); +} + +# C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 +# CONSTRUCT x6f202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@4 +# AUNIT --inst x6f202400/mask=xffe0fc00 --status nopcodeop --comment "nointround" + +:urshr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_urshr(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); +} + +# C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 +# CONSTRUCT x2f082400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@1 +# AUNIT --inst x2f082400/mask=xfff8fc00 --status nopcodeop --comment "nointround" + +:urshr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_urshr(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); +} + +# C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 +# CONSTRUCT x6f102400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@2 +# AUNIT --inst x6f102400/mask=xfff0fc00 --status nopcodeop --comment "nointround" + +:urshr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_urshr(Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); +} + +# C7.2.385 URSHR page C7-2268 line 127587 MATCH x7f002400/mask=xff80fc00 +# CONSTRUCT x7f402400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2 +# AUNIT --inst x7f402400/mask=xffc0fc00 --status nopcodeop --comment "nointround" +# Scalar variant + +:urshr Rd_FPR64, Rn_FPR64, Imm_shr_imm32 +is b_2331=0b011111110 & b_22=1 & b_1015=0b001001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm32 & Zd +{ + Rd_FPR64 = NEON_urshr(Rn_FPR64, Imm_shr_imm32:1); +} + +# C7.2.386 URSQRTE page C7-2270 line 127723 MATCH x2ea1c800/mask=xbfbffc00 +# CONSTRUCT x2ea1c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_ursqrte/1@4 +# AUNIT --inst x2ea1c800/mask=xfffffc00 --status nopcodeop +# Vector variant when Q=0 T=VPR64.2S + +:ursqrte Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_ursqrte(Rn_VPR64.2S, 4:1); +} + +# C7.2.386 URSQRTE page C7-2270 line 127723 MATCH x2ea1c800/mask=xbfbffc00 +# CONSTRUCT x6ea1c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 =NEON_ursqrte/1@4 +# AUNIT --inst x6ea1c800/mask=xfffffc00 --status nopcodeop +# Vector variant when Q=0 T=VPR128.4S + +:ursqrte Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_ursqrte(Rn_VPR128.4S, 4:1); +} + +# C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 +# CONSTRUCT x6f083400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 $>>@1 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@1 +# AUNIT --inst x6f083400/mask=xfff8fc00 --status fail --comment "nointround" + +:ursra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.16B >> Imm_shr_imm8:1 on lane size 1 + TMPQ1[0,8] = Rn_VPR128.16B[0,8] >> Imm_shr_imm8:1; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] >> Imm_shr_imm8:1; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] >> Imm_shr_imm8:1; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] >> Imm_shr_imm8:1; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] >> Imm_shr_imm8:1; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] >> Imm_shr_imm8:1; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] >> Imm_shr_imm8:1; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] >> Imm_shr_imm8:1; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] >> Imm_shr_imm8:1; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] >> Imm_shr_imm8:1; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] >> Imm_shr_imm8:1; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] >> Imm_shr_imm8:1; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] >> Imm_shr_imm8:1; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] >> Imm_shr_imm8:1; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] >> Imm_shr_imm8:1; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] >> Imm_shr_imm8:1; + # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 +# CONSTRUCT x6f403400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 $>>@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@8 +# AUNIT --inst x6f403400/mask=xffc0fc00 --status fail --comment "nointround" + +:ursra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 +# CONSTRUCT x2f203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 $>>@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@4 +# AUNIT --inst x2f203400/mask=xffe0fc00 --status fail --comment "nointround" + +:ursra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + local tmp1:4 = Imm_shr_imm32; + # simd infix TMPD1 = Rn_VPR64.2S >> tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] >> tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] >> tmp1; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 +# CONSTRUCT x2f103400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 $>>@2 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@2 +# AUNIT --inst x2f103400/mask=xfff0fc00 --status fail --comment "nointround" + +:ursra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix TMPD1 = Rn_VPR64.4H >> Imm_shr_imm16:2 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] >> Imm_shr_imm16:2; + TMPD1[16,16] = Rn_VPR64.4H[16,16] >> Imm_shr_imm16:2; + TMPD1[32,16] = Rn_VPR64.4H[32,16] >> Imm_shr_imm16:2; + TMPD1[48,16] = Rn_VPR64.4H[48,16] >> Imm_shr_imm16:2; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 4 + Rd_VPR64.4H[0,32] = Rd_VPR64.4H[0,32] + TMPD1[0,32]; + Rd_VPR64.4H[32,32] = Rd_VPR64.4H[32,32] + TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 +# CONSTRUCT x6f203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 $>>@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@4 +# AUNIT --inst x6f203400/mask=xffe0fc00 --status fail --comment "nointround" + +:ursra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + local tmp1:4 = Imm_shr_imm32; + # simd infix TMPQ1 = Rn_VPR128.4S >> tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> tmp1; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 +# CONSTRUCT x2f083400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 $>>@1 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@1 +# AUNIT --inst x2f083400/mask=xfff8fc00 --status fail --comment "nointround" + +:ursra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix TMPD1 = Rn_VPR64.8B >> Imm_shr_imm8:1 on lane size 1 + TMPD1[0,8] = Rn_VPR64.8B[0,8] >> Imm_shr_imm8:1; + TMPD1[8,8] = Rn_VPR64.8B[8,8] >> Imm_shr_imm8:1; + TMPD1[16,8] = Rn_VPR64.8B[16,8] >> Imm_shr_imm8:1; + TMPD1[24,8] = Rn_VPR64.8B[24,8] >> Imm_shr_imm8:1; + TMPD1[32,8] = Rn_VPR64.8B[32,8] >> Imm_shr_imm8:1; + TMPD1[40,8] = Rn_VPR64.8B[40,8] >> Imm_shr_imm8:1; + TMPD1[48,8] = Rn_VPR64.8B[48,8] >> Imm_shr_imm8:1; + TMPD1[56,8] = Rn_VPR64.8B[56,8] >> Imm_shr_imm8:1; + # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 +# CONSTRUCT x6f103400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 $>>@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@2 +# AUNIT --inst x6f103400/mask=xfff0fc00 --status fail --comment "nointround" + +:ursra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm16:2 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm16:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm16:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm16:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm16:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm16:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm16:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm16:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm16:2; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.387 URSRA page C7-2271 line 127788 MATCH x7f003400/mask=xff80fc00 +# CONSTRUCT x7f403400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 >> &=+ +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3 +# AUNIT --inst x7f403400/mask=xffc0fc00 --status fail --comment "nointround" +# Scalar variant when immh=1xxx + +:ursra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b011111110 & b_22=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + local tmp2:8 = Rn_FPR64 >> tmp1; + Rd_FPR64 = Rd_FPR64 + tmp2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.390 USHL page C7-2277 line 128100 MATCH x7e204400/mask=xff20fc00 +# CONSTRUCT x7ee04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2 +# AUNIT --inst x7ee04400/mask=xffe0fc00 --status nopcodeop + +:ushl Rd_FPR64, Rn_FPR64, Rm_FPR64 +is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x8 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd +{ + Rd_FPR64 = NEON_ushl(Rn_FPR64, Rm_FPR64); +} + +# C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 +# CONSTRUCT x6e204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@1 +# AUNIT --inst x6e204400/mask=xffe0fc00 --status nopcodeop + +:ushl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x8 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_ushl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 +# CONSTRUCT x6ee04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@8 +# AUNIT --inst x6ee04400/mask=xffe0fc00 --status nopcodeop + +:ushl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x8 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_ushl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); +} + +# C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 +# CONSTRUCT x2ea04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@4 +# AUNIT --inst x2ea04400/mask=xffe0fc00 --status nopcodeop + +:ushl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x8 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_ushl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); +} + +# C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 +# CONSTRUCT x2e604400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@2 +# AUNIT --inst x2e604400/mask=xffe0fc00 --status nopcodeop + +:ushl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x8 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_ushl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); +} + +# C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 +# CONSTRUCT x6ea04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@4 +# AUNIT --inst x6ea04400/mask=xffe0fc00 --status nopcodeop + +:ushl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x8 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_ushl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); +} + +# C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 +# CONSTRUCT x2e204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@1 +# AUNIT --inst x2e204400/mask=xffe0fc00 --status nopcodeop + +:ushl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x8 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_ushl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); +} + +# C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 +# CONSTRUCT x6e604400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@2 +# AUNIT --inst x6e604400/mask=xffe0fc00 --status nopcodeop + +:ushl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x8 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_ushl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); +} + +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# CONSTRUCT x6f08a400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3 =var:2 =$<<@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll2/2@1 +# AUNIT --inst x6f08a400/mask=xfff8fc00 --status pass --comment "ext" + +:ushll2 Rd_VPR128.8H, Rn_VPR128.16B, Imm_uimm3 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + local tmp3:2 = Imm_uimm3; + # simd infix Rd_VPR128.8H = TMPQ2 << tmp3 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] << tmp3; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] << tmp3; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] << tmp3; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] << tmp3; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] << tmp3; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] << tmp3; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] << tmp3; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# CONSTRUCT x2f20a400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:16 ARG3 =var:8 =$<<@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll/2@4 +# AUNIT --inst x2f20a400/mask=xffe0fc00 --status pass --comment "ext" + +:ushll Rd_VPR128.2D, Rn_VPR64.2S, Imm_uimm5 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); + local tmp2:8 = Imm_uimm5; + # simd infix Rd_VPR128.2D = TMPQ1 << tmp2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] << tmp2; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# CONSTRUCT x2f10a400/mask=xfff0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:16 ARG3 =var:4 =$<<@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll/2@2 +# AUNIT --inst x2f10a400/mask=xfff0fc00 --status pass --comment "ext" + +:ushll Rd_VPR128.4S, Rn_VPR64.4H, Imm_uimm4 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); + local tmp2:4 = Imm_uimm4; + # simd infix Rd_VPR128.4S = TMPQ1 << tmp2 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] << tmp2; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] << tmp2; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] << tmp2; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# CONSTRUCT x6f20a400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3 =var:8 =$<<@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll2/2@4 +# AUNIT --inst x6f20a400/mask=xffe0fc00 --status pass --comment "ext" + +:ushll2 Rd_VPR128.2D, Rn_VPR128.4S, Imm_uimm5 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + local tmp3:8 = Imm_uimm5; + # simd infix Rd_VPR128.2D = TMPQ2 << tmp3 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] << tmp3; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# CONSTRUCT x2f08a400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@1:16 ARG3 =var:2 =$<<@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll/2@1 +# AUNIT --inst x2f08a400/mask=xfff8fc00 --status pass --comment "ext" + +:ushll Rd_VPR128.8H, Rn_VPR64.8B, Imm_uimm3 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); + local tmp2:2 = Imm_uimm3; + # simd infix Rd_VPR128.8H = TMPQ1 << tmp2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16] << tmp2; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] << tmp2; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] << tmp2; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] << tmp2; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] << tmp2; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] << tmp2; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] << tmp2; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# CONSTRUCT x6f10a400/mask=xfff0fc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3 =var:4 =$<<@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll2/2@2 +# AUNIT --inst x6f10a400/mask=xfff0fc00 --status pass --comment "ext" + +:ushll2 Rd_VPR128.4S, Rn_VPR128.8H, Imm_uimm4 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + local tmp3:4 = Imm_uimm4; + # simd infix Rd_VPR128.4S = TMPQ2 << tmp3 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] << tmp3; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] << tmp3; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] << tmp3; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 +# CONSTRUCT x6f080400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 =$>>@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@1 +# AUNIT --inst x6f080400/mask=xfff8fc00 --status pass + +:ushr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix Rd_VPR128.16B = Rn_VPR128.16B >> Imm_shr_imm8:1 on lane size 1 + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] >> Imm_shr_imm8:1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 +# CONSTRUCT x6f400400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 =$>>@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@8 +# AUNIT --inst x6f400400/mask=xffc0fc00 --status pass + +:ushr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + # simd infix Rd_VPR128.2D = Rn_VPR128.2D >> tmp1 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] >> tmp1; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] >> tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 +# CONSTRUCT x2f200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 =$>>@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@4 +# AUNIT --inst x2f200400/mask=xffe0fc00 --status pass + +:ushr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + local tmp1:4 = Imm_shr_imm32; + # simd infix Rd_VPR64.2S = Rn_VPR64.2S >> tmp1 on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] >> tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] >> tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 +# CONSTRUCT x2f100400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 =$>>@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@2 +# AUNIT --inst x2f100400/mask=xfff0fc00 --status pass + +:ushr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix Rd_VPR64.4H = Rn_VPR64.4H >> Imm_shr_imm16:2 on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] >> Imm_shr_imm16:2; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] >> Imm_shr_imm16:2; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] >> Imm_shr_imm16:2; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] >> Imm_shr_imm16:2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 +# CONSTRUCT x6f200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 =$>>@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@4 +# AUNIT --inst x6f200400/mask=xffe0fc00 --status pass + +:ushr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + local tmp1:4 = Imm_shr_imm32; + # simd infix Rd_VPR128.4S = Rn_VPR128.4S >> tmp1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] >> tmp1; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] >> tmp1; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] >> tmp1; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] >> tmp1; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 +# CONSTRUCT x2f080400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 =$>>@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@1 +# AUNIT --inst x2f080400/mask=xfff8fc00 --status pass + +:ushr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix Rd_VPR64.8B = Rn_VPR64.8B >> Imm_shr_imm8:1 on lane size 1 + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] >> Imm_shr_imm8:1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 +# CONSTRUCT x6f100400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 =$>>@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@2 +# AUNIT --inst x6f100400/mask=xfff0fc00 --status pass + +:ushr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix Rd_VPR128.8H = Rn_VPR128.8H >> Imm_shr_imm16:2 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm16:2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.392 USHR page C7-2282 line 128386 MATCH x7f000400/mask=xff80fc00 +# CONSTRUCT x7f400400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 =>> +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2 +# AUNIT --inst x7f400400/mask=xffc0fc00 --status pass +# Scalar variant when immh=1xxx + +:ushr Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b011111110 & b_22=1 & b_1015=0b000001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + Rd_FPR64 = Rn_FPR64 >> tmp1; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x7e203800/mask=xff3ffc00 +# CONSTRUCT x7e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2 +# AUNIT --inst x7e203800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Scalar variant when size=00 Q=1 bb=1 T=FPR8 + +:usqadd Rd_FPR8, Rn_FPR8 +is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_FPR8 & Rn_FPR8 & Zd +{ + Rd_FPR8 = NEON_usqadd(Rd_FPR8, Rn_FPR8); +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x7e203800/mask=xff3ffc00 +# CONSTRUCT x7e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2 +# AUNIT --inst x7e603800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Scalar variant when size=01 Q=1 bb=1 T=FPR16 + +:usqadd Rd_FPR16, Rn_FPR16 +is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_FPR16 & Rn_FPR16 & Zd +{ + Rd_FPR16 = NEON_usqadd(Rd_FPR16, Rn_FPR16); +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x7e203800/mask=xff3ffc00 +# CONSTRUCT x7ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2 +# AUNIT --inst x7ea03800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Scalar variant when size=10 Q=1 bb=1 T=FPR32 + +:usqadd Rd_FPR32, Rn_FPR32 +is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_FPR32 & Rn_FPR32 & Zd +{ + Rd_FPR32 = NEON_usqadd(Rd_FPR32, Rn_FPR32); +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x7e203800/mask=xff3ffc00 +# CONSTRUCT x7ee03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2 +# AUNIT --inst x7ee03800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Scalar variant when size=11 Q=1 bb=1 T=FPR64 + +:usqadd Rd_FPR64, Rn_FPR64 +is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_FPR64 & Rn_FPR64 & Zd +{ + Rd_FPR64 = NEON_usqadd(Rd_FPR64, Rn_FPR64); +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 +# CONSTRUCT x2e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@1 +# AUNIT --inst x2e203800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=00 Q=0 bb=0 e=1 T=VPR64.8B + +:usqadd Rd_VPR64.8B, Rn_VPR64.8B +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd +{ + Rd_VPR64.8B = NEON_usqadd(Rd_VPR64.8B, Rn_VPR64.8B, 1:1); +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 +# CONSTRUCT x6e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@1 +# AUNIT --inst x6e203800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=00 Q=1 bb=0 e=1 T=VPR128.16B + +:usqadd Rd_VPR128.16B, Rn_VPR128.16B +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd +{ + Rd_VPR128.16B = NEON_usqadd(Rd_VPR128.16B, Rn_VPR128.16B, 1:1); +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 +# CONSTRUCT x2e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@2 +# AUNIT --inst x2e603800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=01 Q=0 bb=0 e=2 T=VPR64.4H + +:usqadd Rd_VPR64.4H, Rn_VPR64.4H +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd +{ + Rd_VPR64.4H = NEON_usqadd(Rd_VPR64.4H, Rn_VPR64.4H, 2:1); +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 +# CONSTRUCT x6e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@2 +# AUNIT --inst x6e603800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=01 Q=1 bb=0 e=2 T=VPR128.8H + +:usqadd Rd_VPR128.8H, Rn_VPR128.8H +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd +{ + Rd_VPR128.8H = NEON_usqadd(Rd_VPR128.8H, Rn_VPR128.8H, 2:1); +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 +# CONSTRUCT x2ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@4 +# AUNIT --inst x2ea03800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=10 Q=0 bb=0 e=4 T=VPR64.2S + +:usqadd Rd_VPR64.2S, Rn_VPR64.2S +is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd +{ + Rd_VPR64.2S = NEON_usqadd(Rd_VPR64.2S, Rn_VPR64.2S, 4:1); +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 +# CONSTRUCT x6ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@4 +# AUNIT --inst x6ea03800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=10 Q=1 bb=0 e=4 T=VPR128.4S + +:usqadd Rd_VPR128.4S, Rn_VPR128.4S +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd +{ + Rd_VPR128.4S = NEON_usqadd(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); +} + +# C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 +# CONSTRUCT x6ee03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@8 +# AUNIT --inst x6ee03800/mask=xfffffc00 --status nopcodeop --comment "nointsat" +# Vector variant when size=11 Q=1 bb=0 e=8 T=VPR128.2D + +:usqadd Rd_VPR128.2D, Rn_VPR128.2D +is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd +{ + Rd_VPR128.2D = NEON_usqadd(Rd_VPR128.2D, Rn_VPR128.2D, 8:1); +} + +# C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 +# CONSTRUCT x6f081400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 $>>@1 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@1 +# AUNIT --inst x6f081400/mask=xfff8fc00 --status pass + +:usra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.16B >> Imm_shr_imm8:1 on lane size 1 + TMPQ1[0,8] = Rn_VPR128.16B[0,8] >> Imm_shr_imm8:1; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] >> Imm_shr_imm8:1; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] >> Imm_shr_imm8:1; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] >> Imm_shr_imm8:1; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] >> Imm_shr_imm8:1; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] >> Imm_shr_imm8:1; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] >> Imm_shr_imm8:1; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] >> Imm_shr_imm8:1; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] >> Imm_shr_imm8:1; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] >> Imm_shr_imm8:1; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] >> Imm_shr_imm8:1; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] >> Imm_shr_imm8:1; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] >> Imm_shr_imm8:1; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] >> Imm_shr_imm8:1; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] >> Imm_shr_imm8:1; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] >> Imm_shr_imm8:1; + # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 +# CONSTRUCT x6f401400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 $>>@8 &=$+@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@8 +# AUNIT --inst x6f401400/mask=xffc0fc00 --status pass + +:usra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x2 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 +# CONSTRUCT x2f201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 $>>@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@4 +# AUNIT --inst x2f201400/mask=xffe0fc00 --status pass + +:usra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + local tmp1:4 = Imm_shr_imm32; + # simd infix TMPD1 = Rn_VPR64.2S >> tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] >> tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] >> tmp1; + # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 +# CONSTRUCT x2f101400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 $>>@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@2 +# AUNIT --inst x2f101400/mask=xfff0fc00 --status pass + +:usra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + # simd infix TMPD1 = Rn_VPR64.4H >> Imm_shr_imm16:2 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] >> Imm_shr_imm16:2; + TMPD1[16,16] = Rn_VPR64.4H[16,16] >> Imm_shr_imm16:2; + TMPD1[32,16] = Rn_VPR64.4H[32,16] >> Imm_shr_imm16:2; + TMPD1[48,16] = Rn_VPR64.4H[48,16] >> Imm_shr_imm16:2; + # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 +# CONSTRUCT x6f201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 =var:4 $>>@4 &=$+@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@4 +# AUNIT --inst x6f201400/mask=xffe0fc00 --status pass + +:usra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + local tmp1:4 = Imm_shr_imm32; + # simd infix TMPQ1 = Rn_VPR128.4S >> tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> tmp1; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 +# CONSTRUCT x2f081400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:1 $>>@1 &=$+@1 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@1 +# AUNIT --inst x2f081400/mask=xfff8fc00 --status pass + +:usra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + # simd infix TMPD1 = Rn_VPR64.8B >> Imm_shr_imm8:1 on lane size 1 + TMPD1[0,8] = Rn_VPR64.8B[0,8] >> Imm_shr_imm8:1; + TMPD1[8,8] = Rn_VPR64.8B[8,8] >> Imm_shr_imm8:1; + TMPD1[16,8] = Rn_VPR64.8B[16,8] >> Imm_shr_imm8:1; + TMPD1[24,8] = Rn_VPR64.8B[24,8] >> Imm_shr_imm8:1; + TMPD1[32,8] = Rn_VPR64.8B[32,8] >> Imm_shr_imm8:1; + TMPD1[40,8] = Rn_VPR64.8B[40,8] >> Imm_shr_imm8:1; + TMPD1[48,8] = Rn_VPR64.8B[48,8] >> Imm_shr_imm8:1; + TMPD1[56,8] = Rn_VPR64.8B[56,8] >> Imm_shr_imm8:1; + # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 +# CONSTRUCT x6f101400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3:2 $>>@2 &=$+@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@2 +# AUNIT --inst x6f101400/mask=xfff0fc00 --status pass + +:usra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm16:2 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm16:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm16:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm16:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm16:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm16:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm16:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm16:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm16:2; + # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.395 USRA page C7-2288 line 128723 MATCH x7f001400/mask=xff80fc00 +# CONSTRUCT x7f401400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 zext:8 >> &=+ +# SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3 +# AUNIT --inst x7f401400/mask=xffc0fc00 --status pass +# Scalar variant when immh=1xxx + +:usra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 +is b_2331=0b011111110 & b_22=1 & b_1015=0b000101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd +{ + local tmp1:8 = zext(Imm_shr_imm64); + local tmp2:8 = Rn_FPR64 >> tmp1; + Rd_FPR64 = Rd_FPR64 + tmp2; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 +# CONSTRUCT x6ea02000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 =$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl2/2@4 +# AUNIT --inst x6ea02000/mask=xffe0fc00 --status pass --comment "ext" + +:usubl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x2 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); + # simd infix Rd_VPR128.2D = TMPQ2 - TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 +# CONSTRUCT x6e602000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 =$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl2/2@2 +# AUNIT --inst x6e602000/mask=xffe0fc00 --status pass --comment "ext" + +:usubl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x2 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); + # simd infix Rd_VPR128.4S = TMPQ2 - TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 +# CONSTRUCT x6e202000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 =$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl2/2@1 +# AUNIT --inst x6e202000/mask=xffe0fc00 --status pass --comment "ext" + +:usubl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x2 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); + # simd infix Rd_VPR128.8H = TMPQ2 - TMPQ4 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 +# CONSTRUCT x2ea02000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 =$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl/2@4 +# AUNIT --inst x2ea02000/mask=xffe0fc00 --status pass --comment "ext" + +:usubl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x2 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); + # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); + # simd infix Rd_VPR128.2D = TMPQ1 - TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 +# CONSTRUCT x2e602000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 =$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl/2@2 +# AUNIT --inst x2e602000/mask=xffe0fc00 --status pass --comment "ext" + +:usubl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x2 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); + # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); + # simd infix Rd_VPR128.4S = TMPQ1 - TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 +# CONSTRUCT x2e202000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 =$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl/2@1 +# AUNIT --inst x2e202000/mask=xffe0fc00 --status pass --comment "ext" + +:usubl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x2 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); + # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR128.8H = TMPQ1 - TMPQ2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 +# CONSTRUCT x6ea03000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $zext@4:16 =$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw2/2@4 +# AUNIT --inst x6ea03000/mask=xffe0fc00 --status pass --comment "ext" + +:usubw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPD1 = Rm_VPR128.4S[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 +# CONSTRUCT x6e603000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $zext@2:16 =$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw2/2@2 +# AUNIT --inst x6e603000/mask=xffe0fc00 --status pass --comment "ext" + +:usubw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPD1 = Rm_VPR128.8H[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 +# CONSTRUCT x6e203000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3[1]:8 $zext@1:16 =$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw2/2@1 +# AUNIT --inst x6e203000/mask=xffe0fc00 --status pass --comment "ext" + +:usubw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPD1 = Rm_VPR128.16B[64,64]; + # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ2 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 +# CONSTRUCT x2ea03000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $zext@4:16 =$-@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw/2@4 +# AUNIT --inst x2ea03000/mask=xffe0fc00 --status pass --comment "ext" + +:usubw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + # simd resize TMPQ1 = zext(Rm_VPR64.2S) (lane size 4 to 8) + TMPQ1[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rm_VPR64.2S[32,32]); + # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ1 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ1[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 +# CONSTRUCT x2e603000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $zext@2:16 =$-@4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw/2@2 +# AUNIT --inst x2e603000/mask=xffe0fc00 --status pass --comment "ext" + +:usubw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + # simd resize TMPQ1 = zext(Rm_VPR64.4H) (lane size 2 to 4) + TMPQ1[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rm_VPR64.4H[48,16]); + # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ1[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 +# CONSTRUCT x2e203000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $zext@1:16 =$-@2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw/2@1 +# AUNIT --inst x2e203000/mask=xffe0fc00 --status pass --comment "ext" + +:usubw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + # simd resize TMPQ1 = zext(Rm_VPR64.8B) (lane size 1 to 2) + TMPQ1[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rm_VPR64.8B[56,8]); + # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ1[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# CONSTRUCT x6f08a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 =$zext@1:16 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl2/1@1 +# AUNIT --inst x6f08a400/mask=xfffffc00 --status pass --comment "ext" + +:uxtl2 Rd_VPR128.8H, Rn_VPR128.16B +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR128.16B[64,64]; + # simd resize Rd_VPR128.8H = zext(TMPD1) (lane size 1 to 2) + Rd_VPR128.8H[0,16] = zext(TMPD1[0,8]); + Rd_VPR128.8H[16,16] = zext(TMPD1[8,8]); + Rd_VPR128.8H[32,16] = zext(TMPD1[16,8]); + Rd_VPR128.8H[48,16] = zext(TMPD1[24,8]); + Rd_VPR128.8H[64,16] = zext(TMPD1[32,8]); + Rd_VPR128.8H[80,16] = zext(TMPD1[40,8]); + Rd_VPR128.8H[96,16] = zext(TMPD1[48,8]); + Rd_VPR128.8H[112,16] = zext(TMPD1[56,8]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# CONSTRUCT x2f20a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$zext@4:16 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl/1@4 +# AUNIT --inst x2f20a400/mask=xfffffc00 --status pass --comment "ext" + +:uxtl Rd_VPR128.2D, Rn_VPR64.2S +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR64.2S; + # simd resize Rd_VPR128.2D = zext(TMPD1) (lane size 4 to 8) + Rd_VPR128.2D[0,64] = zext(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = zext(TMPD1[32,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# CONSTRUCT x2f10a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$zext@2:16 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl/1@2 +# AUNIT --inst x2f10a400/mask=xfffffc00 --status pass --comment "ext" + +:uxtl Rd_VPR128.4S, Rn_VPR64.4H +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR64.4H; + # simd resize Rd_VPR128.4S = zext(TMPD1) (lane size 2 to 4) + Rd_VPR128.4S[0,32] = zext(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = zext(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = zext(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = zext(TMPD1[48,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# CONSTRUCT x6f20a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 =$zext@4:16 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl2/1@4 +# AUNIT --inst x6f20a400/mask=xfffffc00 --status pass --comment "ext" + +:uxtl2 Rd_VPR128.2D, Rn_VPR128.4S +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd +{ + TMPD1 = Rn_VPR128.4S[64,64]; + # simd resize Rd_VPR128.2D = zext(TMPD1) (lane size 4 to 8) + Rd_VPR128.2D[0,64] = zext(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = zext(TMPD1[32,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# CONSTRUCT x2f08a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 =var =$zext@1:16 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl/1@1 +# AUNIT --inst x2f08a400/mask=xfffffc00 --status pass --comment "ext" + +:uxtl Rd_VPR128.8H, Rn_VPR64.8B +is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Zd +{ + TMPD1 = Rn_VPR64.8B; + # simd resize Rd_VPR128.8H = zext(TMPD1) (lane size 1 to 2) + Rd_VPR128.8H[0,16] = zext(TMPD1[0,8]); + Rd_VPR128.8H[16,16] = zext(TMPD1[8,8]); + Rd_VPR128.8H[32,16] = zext(TMPD1[16,8]); + Rd_VPR128.8H[48,16] = zext(TMPD1[24,8]); + Rd_VPR128.8H[64,16] = zext(TMPD1[32,8]); + Rd_VPR128.8H[80,16] = zext(TMPD1[40,8]); + Rd_VPR128.8H[96,16] = zext(TMPD1[48,8]); + Rd_VPR128.8H[112,16] = zext(TMPD1[56,8]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 +# C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 +# CONSTRUCT x6f10a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2[1]:8 =$zext@2:16 +# SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl2/1@2 +# AUNIT --inst x6f10a400/mask=xfffffc00 --status pass --comment "ext" + +:uxtl2 Rd_VPR128.4S, Rn_VPR128.8H +is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPD1 = Rn_VPR128.8H[64,64]; + # simd resize Rd_VPR128.4S = zext(TMPD1) (lane size 2 to 4) + Rd_VPR128.4S[0,32] = zext(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = zext(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = zext(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = zext(TMPD1[48,16]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 +# CONSTRUCT x4e001800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-1@4-2@6-3@8-4@10-5@12-6@14-7:1 swap &=$shuffle@0-8@2-9@4-10@6-11@8-12@10-13@12-14@14-15:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@1 +# AUNIT --inst x4e001800/mask=xffe0fc00 --status pass + +:uzp1 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + TMPQ2 = Rm_VPR128.16B; + TMPQ1 = Rn_VPR128.16B; + # simd shuffle Rd_VPR128.16B = TMPQ1 (@0-0@2-1@4-2@6-3@8-4@10-5@12-6@14-7) lane size 1 + Rd_VPR128.16B[0,8] = TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = TMPQ1[16,8]; + Rd_VPR128.16B[16,8] = TMPQ1[32,8]; + Rd_VPR128.16B[24,8] = TMPQ1[48,8]; + Rd_VPR128.16B[32,8] = TMPQ1[64,8]; + Rd_VPR128.16B[40,8] = TMPQ1[80,8]; + Rd_VPR128.16B[48,8] = TMPQ1[96,8]; + Rd_VPR128.16B[56,8] = TMPQ1[112,8]; + # simd shuffle Rd_VPR128.16B = TMPQ2 (@0-8@2-9@4-10@6-11@8-12@10-13@12-14@14-15) lane size 1 + Rd_VPR128.16B[64,8] = TMPQ2[0,8]; + Rd_VPR128.16B[72,8] = TMPQ2[16,8]; + Rd_VPR128.16B[80,8] = TMPQ2[32,8]; + Rd_VPR128.16B[88,8] = TMPQ2[48,8]; + Rd_VPR128.16B[96,8] = TMPQ2[64,8]; + Rd_VPR128.16B[104,8] = TMPQ2[80,8]; + Rd_VPR128.16B[112,8] = TMPQ2[96,8]; + Rd_VPR128.16B[120,8] = TMPQ2[112,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 +# CONSTRUCT x4ec01800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:8 swap &=$shuffle@0-1:8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@8 +# AUNIT --inst x4ec01800/mask=xffe0fc00 --status pass + +:uzp1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPQ2 = Rm_VPR128.2D; + TMPQ1 = Rn_VPR128.2D; + # simd shuffle Rd_VPR128.2D = TMPQ1 (@0-0) lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64]; + # simd shuffle Rd_VPR128.2D = TMPQ2 (@0-1) lane size 8 + Rd_VPR128.2D[64,64] = TMPQ2[0,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 +# CONSTRUCT x0e801800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:4 swap &=$shuffle@0-1:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@4 +# AUNIT --inst x0e801800/mask=xffe0fc00 --status pass + +:uzp1 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + TMPD2 = Rm_VPR64.2S; + TMPD1 = Rn_VPR64.2S; + # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0) lane size 4 + Rd_VPR64.2S[0,32] = TMPD1[0,32]; + # simd shuffle Rd_VPR64.2S = TMPD2 (@0-1) lane size 4 + Rd_VPR64.2S[32,32] = TMPD2[0,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 +# CONSTRUCT x0e401800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-1:2 swap &=$shuffle@0-2@2-3:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@2 +# AUNIT --inst x0e401800/mask=xffe0fc00 --status pass + +:uzp1 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + TMPD2 = Rm_VPR64.4H; + TMPD1 = Rn_VPR64.4H; + # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@2-1) lane size 2 + Rd_VPR64.4H[0,16] = TMPD1[0,16]; + Rd_VPR64.4H[16,16] = TMPD1[32,16]; + # simd shuffle Rd_VPR64.4H = TMPD2 (@0-2@2-3) lane size 2 + Rd_VPR64.4H[32,16] = TMPD2[0,16]; + Rd_VPR64.4H[48,16] = TMPD2[32,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 +# CONSTRUCT x4e801800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-1:4 swap &=$shuffle@0-2@2-3:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@4 +# AUNIT --inst x4e801800/mask=xffe0fc00 --status pass + +:uzp1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPQ2 = Rm_VPR128.4S; + TMPQ1 = Rn_VPR128.4S; + # simd shuffle Rd_VPR128.4S = TMPQ1 (@0-0@2-1) lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[64,32]; + # simd shuffle Rd_VPR128.4S = TMPQ2 (@0-2@2-3) lane size 4 + Rd_VPR128.4S[64,32] = TMPQ2[0,32]; + Rd_VPR128.4S[96,32] = TMPQ2[64,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 +# CONSTRUCT x0e001800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-1@4-2@6-3:1 swap &=$shuffle@0-4@2-5@4-6@6-7:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@1 +# AUNIT --inst x0e001800/mask=xffe0fc00 --status pass + +:uzp1 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + TMPD2 = Rm_VPR64.8B; + TMPD1 = Rn_VPR64.8B; + # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@2-1@4-2@6-3) lane size 1 + Rd_VPR64.8B[0,8] = TMPD1[0,8]; + Rd_VPR64.8B[8,8] = TMPD1[16,8]; + Rd_VPR64.8B[16,8] = TMPD1[32,8]; + Rd_VPR64.8B[24,8] = TMPD1[48,8]; + # simd shuffle Rd_VPR64.8B = TMPD2 (@0-4@2-5@4-6@6-7) lane size 1 + Rd_VPR64.8B[32,8] = TMPD2[0,8]; + Rd_VPR64.8B[40,8] = TMPD2[16,8]; + Rd_VPR64.8B[48,8] = TMPD2[32,8]; + Rd_VPR64.8B[56,8] = TMPD2[48,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 +# CONSTRUCT x4e401800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-1@4-2@6-3:2 swap &=$shuffle@0-4@2-5@4-6@6-7:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@2 +# AUNIT --inst x4e401800/mask=xffe0fc00 --status pass + +:uzp1 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPQ2 = Rm_VPR128.8H; + TMPQ1 = Rn_VPR128.8H; + # simd shuffle Rd_VPR128.8H = TMPQ1 (@0-0@2-1@4-2@6-3) lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[32,16]; + Rd_VPR128.8H[32,16] = TMPQ1[64,16]; + Rd_VPR128.8H[48,16] = TMPQ1[96,16]; + # simd shuffle Rd_VPR128.8H = TMPQ2 (@0-4@2-5@4-6@6-7) lane size 2 + Rd_VPR128.8H[64,16] = TMPQ2[0,16]; + Rd_VPR128.8H[80,16] = TMPQ2[32,16]; + Rd_VPR128.8H[96,16] = TMPQ2[64,16]; + Rd_VPR128.8H[112,16] = TMPQ2[96,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 +# CONSTRUCT x4e005800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7:1 swap &=$shuffle@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@1 +# AUNIT --inst x4e005800/mask=xffe0fc00 --status pass + +:uzp2 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + TMPQ2 = Rm_VPR128.16B; + TMPQ1 = Rn_VPR128.16B; + # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 1 + Rd_VPR128.16B[0,8] = TMPQ1[8,8]; + Rd_VPR128.16B[8,8] = TMPQ1[24,8]; + Rd_VPR128.16B[16,8] = TMPQ1[40,8]; + Rd_VPR128.16B[24,8] = TMPQ1[56,8]; + Rd_VPR128.16B[32,8] = TMPQ1[72,8]; + Rd_VPR128.16B[40,8] = TMPQ1[88,8]; + Rd_VPR128.16B[48,8] = TMPQ1[104,8]; + Rd_VPR128.16B[56,8] = TMPQ1[120,8]; + # simd shuffle Rd_VPR128.16B = TMPQ2 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 + Rd_VPR128.16B[64,8] = TMPQ2[8,8]; + Rd_VPR128.16B[72,8] = TMPQ2[24,8]; + Rd_VPR128.16B[80,8] = TMPQ2[40,8]; + Rd_VPR128.16B[88,8] = TMPQ2[56,8]; + Rd_VPR128.16B[96,8] = TMPQ2[72,8]; + Rd_VPR128.16B[104,8] = TMPQ2[88,8]; + Rd_VPR128.16B[112,8] = TMPQ2[104,8]; + Rd_VPR128.16B[120,8] = TMPQ2[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 +# CONSTRUCT x4ec05800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:8 swap &=$shuffle@1-1:8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@8 +# AUNIT --inst x4ec05800/mask=xffe0fc00 --status pass + +:uzp2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPQ2 = Rm_VPR128.2D; + TMPQ1 = Rn_VPR128.2D; + # simd shuffle Rd_VPR128.2D = TMPQ1 (@1-0) lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[64,64]; + # simd shuffle Rd_VPR128.2D = TMPQ2 (@1-1) lane size 8 + Rd_VPR128.2D[64,64] = TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 +# CONSTRUCT x0e805800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:4 swap &=$shuffle@1-1:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@4 +# AUNIT --inst x0e805800/mask=xffe0fc00 --status pass + +:uzp2 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + TMPD2 = Rm_VPR64.2S; + TMPD1 = Rn_VPR64.2S; + # simd shuffle Rd_VPR64.2S = TMPD1 (@1-0) lane size 4 + Rd_VPR64.2S[0,32] = TMPD1[32,32]; + # simd shuffle Rd_VPR64.2S = TMPD2 (@1-1) lane size 4 + Rd_VPR64.2S[32,32] = TMPD2[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 +# CONSTRUCT x0e405800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-1:2 swap &=$shuffle@1-2@3-3:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@2 +# AUNIT --inst x0e405800/mask=xffe0fc00 --status pass + +:uzp2 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + TMPD2 = Rm_VPR64.4H; + TMPD1 = Rn_VPR64.4H; + # simd shuffle Rd_VPR64.4H = TMPD1 (@1-0@3-1) lane size 2 + Rd_VPR64.4H[0,16] = TMPD1[16,16]; + Rd_VPR64.4H[16,16] = TMPD1[48,16]; + # simd shuffle Rd_VPR64.4H = TMPD2 (@1-2@3-3) lane size 2 + Rd_VPR64.4H[32,16] = TMPD2[16,16]; + Rd_VPR64.4H[48,16] = TMPD2[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 +# CONSTRUCT x4e805800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-1:4 swap &=$shuffle@1-2@3-3:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@4 +# AUNIT --inst x4e805800/mask=xffe0fc00 --status pass + +:uzp2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPQ2 = Rm_VPR128.4S; + TMPQ1 = Rn_VPR128.4S; + # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-0@3-1) lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[32,32]; + Rd_VPR128.4S[32,32] = TMPQ1[96,32]; + # simd shuffle Rd_VPR128.4S = TMPQ2 (@1-2@3-3) lane size 4 + Rd_VPR128.4S[64,32] = TMPQ2[32,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 +# CONSTRUCT x0e005800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-1@5-2@7-3:1 swap &=$shuffle@1-4@3-5@5-6@7-7:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@1 +# AUNIT --inst x0e005800/mask=xffe0fc00 --status pass + +:uzp2 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + TMPD2 = Rm_VPR64.8B; + TMPD1 = Rn_VPR64.8B; + # simd shuffle Rd_VPR64.8B = TMPD1 (@1-0@3-1@5-2@7-3) lane size 1 + Rd_VPR64.8B[0,8] = TMPD1[8,8]; + Rd_VPR64.8B[8,8] = TMPD1[24,8]; + Rd_VPR64.8B[16,8] = TMPD1[40,8]; + Rd_VPR64.8B[24,8] = TMPD1[56,8]; + # simd shuffle Rd_VPR64.8B = TMPD2 (@1-4@3-5@5-6@7-7) lane size 1 + Rd_VPR64.8B[32,8] = TMPD2[8,8]; + Rd_VPR64.8B[40,8] = TMPD2[24,8]; + Rd_VPR64.8B[48,8] = TMPD2[40,8]; + Rd_VPR64.8B[56,8] = TMPD2[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 +# CONSTRUCT x4e405800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-1@5-2@7-3:2 swap &=$shuffle@1-4@3-5@5-6@7-7:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@2 +# AUNIT --inst x4e405800/mask=xffe0fc00 --status pass + +:uzp2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPQ2 = Rm_VPR128.8H; + TMPQ1 = Rn_VPR128.8H; + # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-0@3-1@5-2@7-3) lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[16,16]; + Rd_VPR128.8H[16,16] = TMPQ1[48,16]; + Rd_VPR128.8H[32,16] = TMPQ1[80,16]; + Rd_VPR128.8H[48,16] = TMPQ1[112,16]; + # simd shuffle Rd_VPR128.8H = TMPQ2 (@1-4@3-5@5-6@7-7) lane size 2 + Rd_VPR128.8H[64,16] = TMPQ2[16,16]; + Rd_VPR128.8H[80,16] = TMPQ2[48,16]; + Rd_VPR128.8H[96,16] = TMPQ2[80,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.401 XAR page C7-2301 line 129443 MATCH xce800000/mask=xffe00000 +# CONSTRUCT xce800000/mask=xffe00000 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 ARG3 $|@8 ARG4 =var:8 =$>>@8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_xar/3@8 +# AUNIT --inst xce800000/mask=xffe00000 --status noqemu +# Advanced SIMD variant + +:xar Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, LSB_bitfield64_imm +is b_2131=0b11001110100 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & LSB_bitfield64_imm & Zd +{ + # simd infix TMPQ1 = Rn_VPR128.2D | Rm_VPR128.2D on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] | Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] | Rm_VPR128.2D[64,64]; + local tmp2:8 = LSB_bitfield64_imm; + # simd infix Rd_VPR128.2D = TMPQ1 >> tmp2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] >> tmp2; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] >> tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 +# CONSTRUCT x0ea12800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@8:8 &=$shuffle@0-0@1-1:4 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn/2@8 +# AUNIT --inst x0ea12800/mask=xfffffc00 --status pass --comment "ext" + +:xtn Rd_VPR64.2S, Rn_VPR128.2D +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.2D & Rd_VPR64.2S & Zd +{ + # simd resize TMPD1 = zext(Rn_VPR128.2D) (lane size 8 to 4) + TMPD1[0,32] = Rn_VPR128.2D[0,32]; + TMPD1[32,32] = Rn_VPR128.2D[64,32]; + # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0@1-1) lane size 4 + Rd_VPR64.2S[0,32] = TMPD1[0,32]; + Rd_VPR64.2S[32,32] = TMPD1[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 +# CONSTRUCT x4ea12800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@8:8 &=$shuffle@0-2@1-3:4 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn2/2@8 +# AUNIT --inst x4ea12800/mask=xfffffc00 --status pass --comment "ext" + +:xtn2 Rd_VPR128.4S, Rn_VPR128.2D +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.4S & Zd +{ + # simd resize TMPD1 = zext(Rn_VPR128.2D) (lane size 8 to 4) + TMPD1[0,32] = Rn_VPR128.2D[0,32]; + TMPD1[32,32] = Rn_VPR128.2D[64,32]; + # simd shuffle Rd_VPR128.4S = TMPD1 (@0-2@1-3) lane size 4 + Rd_VPR128.4S[64,32] = TMPD1[0,32]; + Rd_VPR128.4S[96,32] = TMPD1[32,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 +# CONSTRUCT x0e612800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:8 &=$shuffle@0-0@1-1@2-2@3-3:2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn/2@4 +# AUNIT --inst x0e612800/mask=xfffffc00 --status pass --comment "ext" + +:xtn Rd_VPR64.4H, Rn_VPR128.4S +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.4S & Rd_VPR64.4H & Zd +{ + # simd resize TMPD1 = zext(Rn_VPR128.4S) (lane size 4 to 2) + TMPD1[0,16] = Rn_VPR128.4S[0,16]; + TMPD1[16,16] = Rn_VPR128.4S[32,16]; + TMPD1[32,16] = Rn_VPR128.4S[64,16]; + TMPD1[48,16] = Rn_VPR128.4S[96,16]; + # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@1-1@2-2@3-3) lane size 2 + Rd_VPR64.4H[0,16] = TMPD1[0,16]; + Rd_VPR64.4H[16,16] = TMPD1[16,16]; + Rd_VPR64.4H[32,16] = TMPD1[32,16]; + Rd_VPR64.4H[48,16] = TMPD1[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 +# CONSTRUCT x4e612800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@4:8 &=$shuffle@0-4@1-5@2-6@3-7:2 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn2/2@4 +# AUNIT --inst x4e612800/mask=xfffffc00 --status pass --comment "ext" + +:xtn2 Rd_VPR128.8H, Rn_VPR128.4S +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.8H & Zd +{ + # simd resize TMPD1 = zext(Rn_VPR128.4S) (lane size 4 to 2) + TMPD1[0,16] = Rn_VPR128.4S[0,16]; + TMPD1[16,16] = Rn_VPR128.4S[32,16]; + TMPD1[32,16] = Rn_VPR128.4S[64,16]; + TMPD1[48,16] = Rn_VPR128.4S[96,16]; + # simd shuffle Rd_VPR128.8H = TMPD1 (@0-4@1-5@2-6@3-7) lane size 2 + Rd_VPR128.8H[64,16] = TMPD1[0,16]; + Rd_VPR128.8H[80,16] = TMPD1[16,16]; + Rd_VPR128.8H[96,16] = TMPD1[32,16]; + Rd_VPR128.8H[112,16] = TMPD1[48,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 +# CONSTRUCT x0e212800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:8 &=$shuffle@0-0@1-1@2-2@3-3@4-4@5-5@6-6@7-7:1 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn/2@2 +# AUNIT --inst x0e212800/mask=xfffffc00 --status pass --comment "ext" + +:xtn Rd_VPR64.8B, Rn_VPR128.8H +is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.8H & Rd_VPR64.8B & Zd +{ + # simd resize TMPD1 = zext(Rn_VPR128.8H) (lane size 2 to 1) + TMPD1[0,8] = Rn_VPR128.8H[0,8]; + TMPD1[8,8] = Rn_VPR128.8H[16,8]; + TMPD1[16,8] = Rn_VPR128.8H[32,8]; + TMPD1[24,8] = Rn_VPR128.8H[48,8]; + TMPD1[32,8] = Rn_VPR128.8H[64,8]; + TMPD1[40,8] = Rn_VPR128.8H[80,8]; + TMPD1[48,8] = Rn_VPR128.8H[96,8]; + TMPD1[56,8] = Rn_VPR128.8H[112,8]; + # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@1-1@2-2@3-3@4-4@5-5@6-6@7-7) lane size 1 + Rd_VPR64.8B[0,8] = TMPD1[0,8]; + Rd_VPR64.8B[8,8] = TMPD1[8,8]; + Rd_VPR64.8B[16,8] = TMPD1[16,8]; + Rd_VPR64.8B[24,8] = TMPD1[24,8]; + Rd_VPR64.8B[32,8] = TMPD1[32,8]; + Rd_VPR64.8B[40,8] = TMPD1[40,8]; + Rd_VPR64.8B[48,8] = TMPD1[48,8]; + Rd_VPR64.8B[56,8] = TMPD1[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 +# CONSTRUCT x4e212800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG1 ARG2 $zext@2:8 &=$shuffle@0-8@1-9@2-10@3-11@4-12@5-13@6-14@7-15:1 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn2/2@2 +# AUNIT --inst x4e212800/mask=xfffffc00 --status pass --comment "ext" + +:xtn2 Rd_VPR128.16B, Rn_VPR128.8H +is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.16B & Zd +{ + # simd resize TMPD1 = zext(Rn_VPR128.8H) (lane size 2 to 1) + TMPD1[0,8] = Rn_VPR128.8H[0,8]; + TMPD1[8,8] = Rn_VPR128.8H[16,8]; + TMPD1[16,8] = Rn_VPR128.8H[32,8]; + TMPD1[24,8] = Rn_VPR128.8H[48,8]; + TMPD1[32,8] = Rn_VPR128.8H[64,8]; + TMPD1[40,8] = Rn_VPR128.8H[80,8]; + TMPD1[48,8] = Rn_VPR128.8H[96,8]; + TMPD1[56,8] = Rn_VPR128.8H[112,8]; + # simd shuffle Rd_VPR128.16B = TMPD1 (@0-8@1-9@2-10@3-11@4-12@5-13@6-14@7-15) lane size 1 + Rd_VPR128.16B[64,8] = TMPD1[0,8]; + Rd_VPR128.16B[72,8] = TMPD1[8,8]; + Rd_VPR128.16B[80,8] = TMPD1[16,8]; + Rd_VPR128.16B[88,8] = TMPD1[24,8]; + Rd_VPR128.16B[96,8] = TMPD1[32,8]; + Rd_VPR128.16B[104,8] = TMPD1[40,8]; + Rd_VPR128.16B[112,8] = TMPD1[48,8]; + Rd_VPR128.16B[120,8] = TMPD1[56,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 +# CONSTRUCT x4e003800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@1-2@2-4@3-6@4-8@5-10@6-12@7-14:1 swap &=$shuffle@0-1@1-3@2-5@3-7@4-9@5-11@6-13@7-15:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@1 +# AUNIT --inst x4e003800/mask=xffe0fc00 --status pass + +:zip1 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + TMPQ2 = Rm_VPR128.16B; + TMPQ1 = Rn_VPR128.16B; + # simd shuffle Rd_VPR128.16B = TMPQ1 (@0-0@1-2@2-4@3-6@4-8@5-10@6-12@7-14) lane size 1 + Rd_VPR128.16B[0,8] = TMPQ1[0,8]; + Rd_VPR128.16B[16,8] = TMPQ1[8,8]; + Rd_VPR128.16B[32,8] = TMPQ1[16,8]; + Rd_VPR128.16B[48,8] = TMPQ1[24,8]; + Rd_VPR128.16B[64,8] = TMPQ1[32,8]; + Rd_VPR128.16B[80,8] = TMPQ1[40,8]; + Rd_VPR128.16B[96,8] = TMPQ1[48,8]; + Rd_VPR128.16B[112,8] = TMPQ1[56,8]; + # simd shuffle Rd_VPR128.16B = TMPQ2 (@0-1@1-3@2-5@3-7@4-9@5-11@6-13@7-15) lane size 1 + Rd_VPR128.16B[8,8] = TMPQ2[0,8]; + Rd_VPR128.16B[24,8] = TMPQ2[8,8]; + Rd_VPR128.16B[40,8] = TMPQ2[16,8]; + Rd_VPR128.16B[56,8] = TMPQ2[24,8]; + Rd_VPR128.16B[72,8] = TMPQ2[32,8]; + Rd_VPR128.16B[88,8] = TMPQ2[40,8]; + Rd_VPR128.16B[104,8] = TMPQ2[48,8]; + Rd_VPR128.16B[120,8] = TMPQ2[56,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 +# CONSTRUCT x4ec03800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:8 swap &=$shuffle@0-1:8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@8 +# AUNIT --inst x4ec03800/mask=xffe0fc00 --status pass + +:zip1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPQ2 = Rm_VPR128.2D; + TMPQ1 = Rn_VPR128.2D; + # simd shuffle Rd_VPR128.2D = TMPQ1 (@0-0) lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64]; + # simd shuffle Rd_VPR128.2D = TMPQ2 (@0-1) lane size 8 + Rd_VPR128.2D[64,64] = TMPQ2[0,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 +# CONSTRUCT x0e803800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:4 swap &=$shuffle@0-1:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@4 +# AUNIT --inst x0e803800/mask=xffe0fc00 --status pass + +:zip1 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + TMPD2 = Rm_VPR64.2S; + TMPD1 = Rn_VPR64.2S; + # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0) lane size 4 + Rd_VPR64.2S[0,32] = TMPD1[0,32]; + # simd shuffle Rd_VPR64.2S = TMPD2 (@0-1) lane size 4 + Rd_VPR64.2S[32,32] = TMPD2[0,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 +# CONSTRUCT x0e403800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@1-2:2 swap &=$shuffle@0-1@1-3:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@2 +# AUNIT --inst x0e403800/mask=xffe0fc00 --status pass + +:zip1 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + TMPD2 = Rm_VPR64.4H; + TMPD1 = Rn_VPR64.4H; + # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@1-2) lane size 2 + Rd_VPR64.4H[0,16] = TMPD1[0,16]; + Rd_VPR64.4H[32,16] = TMPD1[16,16]; + # simd shuffle Rd_VPR64.4H = TMPD2 (@0-1@1-3) lane size 2 + Rd_VPR64.4H[16,16] = TMPD2[0,16]; + Rd_VPR64.4H[48,16] = TMPD2[16,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 +# CONSTRUCT x4e803800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@1-2:4 swap &=$shuffle@0-1@1-3:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@4 +# AUNIT --inst x4e803800/mask=xffe0fc00 --status pass + +:zip1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPQ2 = Rm_VPR128.4S; + TMPQ1 = Rn_VPR128.4S; + # simd shuffle Rd_VPR128.4S = TMPQ1 (@0-0@1-2) lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32]; + Rd_VPR128.4S[64,32] = TMPQ1[32,32]; + # simd shuffle Rd_VPR128.4S = TMPQ2 (@0-1@1-3) lane size 4 + Rd_VPR128.4S[32,32] = TMPQ2[0,32]; + Rd_VPR128.4S[96,32] = TMPQ2[32,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 +# CONSTRUCT x0e003800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@1-2@2-4@3-6:1 swap &=$shuffle@0-1@1-3@2-5@3-7:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@1 +# AUNIT --inst x0e003800/mask=xffe0fc00 --status pass + +:zip1 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + TMPD2 = Rm_VPR64.8B; + TMPD1 = Rn_VPR64.8B; + # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@1-2@2-4@3-6) lane size 1 + Rd_VPR64.8B[0,8] = TMPD1[0,8]; + Rd_VPR64.8B[16,8] = TMPD1[8,8]; + Rd_VPR64.8B[32,8] = TMPD1[16,8]; + Rd_VPR64.8B[48,8] = TMPD1[24,8]; + # simd shuffle Rd_VPR64.8B = TMPD2 (@0-1@1-3@2-5@3-7) lane size 1 + Rd_VPR64.8B[8,8] = TMPD2[0,8]; + Rd_VPR64.8B[24,8] = TMPD2[8,8]; + Rd_VPR64.8B[40,8] = TMPD2[16,8]; + Rd_VPR64.8B[56,8] = TMPD2[24,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 +# CONSTRUCT x4e403800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@1-2@2-4@3-6:2 swap &=$shuffle@0-1@1-3@2-5@3-7:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@2 +# AUNIT --inst x4e403800/mask=xffe0fc00 --status pass + +:zip1 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPQ2 = Rm_VPR128.8H; + TMPQ1 = Rn_VPR128.8H; + # simd shuffle Rd_VPR128.8H = TMPQ1 (@0-0@1-2@2-4@3-6) lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16]; + Rd_VPR128.8H[32,16] = TMPQ1[16,16]; + Rd_VPR128.8H[64,16] = TMPQ1[32,16]; + Rd_VPR128.8H[96,16] = TMPQ1[48,16]; + # simd shuffle Rd_VPR128.8H = TMPQ2 (@0-1@1-3@2-5@3-7) lane size 2 + Rd_VPR128.8H[16,16] = TMPQ2[0,16]; + Rd_VPR128.8H[48,16] = TMPQ2[16,16]; + Rd_VPR128.8H[80,16] = TMPQ2[32,16]; + Rd_VPR128.8H[112,16] = TMPQ2[48,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 +# CONSTRUCT x4e007800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@8-0@9-2@10-4@11-6@12-8@13-10@14-12@15-14:1 swap &=$shuffle@8-1@9-3@10-5@11-7@12-9@13-11@14-13@15-15:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@1 +# AUNIT --inst x4e007800/mask=xffe0fc00 --status pass + +:zip2 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd +{ + TMPQ2 = Rm_VPR128.16B; + TMPQ1 = Rn_VPR128.16B; + # simd shuffle Rd_VPR128.16B = TMPQ1 (@8-0@9-2@10-4@11-6@12-8@13-10@14-12@15-14) lane size 1 + Rd_VPR128.16B[0,8] = TMPQ1[64,8]; + Rd_VPR128.16B[16,8] = TMPQ1[72,8]; + Rd_VPR128.16B[32,8] = TMPQ1[80,8]; + Rd_VPR128.16B[48,8] = TMPQ1[88,8]; + Rd_VPR128.16B[64,8] = TMPQ1[96,8]; + Rd_VPR128.16B[80,8] = TMPQ1[104,8]; + Rd_VPR128.16B[96,8] = TMPQ1[112,8]; + Rd_VPR128.16B[112,8] = TMPQ1[120,8]; + # simd shuffle Rd_VPR128.16B = TMPQ2 (@8-1@9-3@10-5@11-7@12-9@13-11@14-13@15-15) lane size 1 + Rd_VPR128.16B[8,8] = TMPQ2[64,8]; + Rd_VPR128.16B[24,8] = TMPQ2[72,8]; + Rd_VPR128.16B[40,8] = TMPQ2[80,8]; + Rd_VPR128.16B[56,8] = TMPQ2[88,8]; + Rd_VPR128.16B[72,8] = TMPQ2[96,8]; + Rd_VPR128.16B[88,8] = TMPQ2[104,8]; + Rd_VPR128.16B[104,8] = TMPQ2[112,8]; + Rd_VPR128.16B[120,8] = TMPQ2[120,8]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 +# CONSTRUCT x4ec07800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:8 swap &=$shuffle@1-1:8 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@8 +# AUNIT --inst x4ec07800/mask=xffe0fc00 --status pass + +:zip2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd +{ + TMPQ2 = Rm_VPR128.2D; + TMPQ1 = Rn_VPR128.2D; + # simd shuffle Rd_VPR128.2D = TMPQ1 (@1-0) lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[64,64]; + # simd shuffle Rd_VPR128.2D = TMPQ2 (@1-1) lane size 8 + Rd_VPR128.2D[64,64] = TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 +# CONSTRUCT x0e807800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:4 swap &=$shuffle@1-1:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@4 +# AUNIT --inst x0e807800/mask=xffe0fc00 --status pass + +:zip2 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd +{ + TMPD2 = Rm_VPR64.2S; + TMPD1 = Rn_VPR64.2S; + # simd shuffle Rd_VPR64.2S = TMPD1 (@1-0) lane size 4 + Rd_VPR64.2S[0,32] = TMPD1[32,32]; + # simd shuffle Rd_VPR64.2S = TMPD2 (@1-1) lane size 4 + Rd_VPR64.2S[32,32] = TMPD2[32,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 +# CONSTRUCT x0e407800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@2-0@3-2:2 swap &=$shuffle@2-1@3-3:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@2 +# AUNIT --inst x0e407800/mask=xffe0fc00 --status pass + +:zip2 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd +{ + TMPD2 = Rm_VPR64.4H; + TMPD1 = Rn_VPR64.4H; + # simd shuffle Rd_VPR64.4H = TMPD1 (@2-0@3-2) lane size 2 + Rd_VPR64.4H[0,16] = TMPD1[32,16]; + Rd_VPR64.4H[32,16] = TMPD1[48,16]; + # simd shuffle Rd_VPR64.4H = TMPD2 (@2-1@3-3) lane size 2 + Rd_VPR64.4H[16,16] = TMPD2[32,16]; + Rd_VPR64.4H[48,16] = TMPD2[48,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 +# CONSTRUCT x4e807800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@2-0@3-2:4 swap &=$shuffle@2-1@3-3:4 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@4 +# AUNIT --inst x4e807800/mask=xffe0fc00 --status pass + +:zip2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd +{ + TMPQ2 = Rm_VPR128.4S; + TMPQ1 = Rn_VPR128.4S; + # simd shuffle Rd_VPR128.4S = TMPQ1 (@2-0@3-2) lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[64,32]; + Rd_VPR128.4S[64,32] = TMPQ1[96,32]; + # simd shuffle Rd_VPR128.4S = TMPQ2 (@2-1@3-3) lane size 4 + Rd_VPR128.4S[32,32] = TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 +# CONSTRUCT x0e007800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@4-0@5-2@6-4@7-6:1 swap &=$shuffle@4-1@5-3@6-5@7-7:1 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@1 +# AUNIT --inst x0e007800/mask=xffe0fc00 --status pass + +:zip2 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B +is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd +{ + TMPD2 = Rm_VPR64.8B; + TMPD1 = Rn_VPR64.8B; + # simd shuffle Rd_VPR64.8B = TMPD1 (@4-0@5-2@6-4@7-6) lane size 1 + Rd_VPR64.8B[0,8] = TMPD1[32,8]; + Rd_VPR64.8B[16,8] = TMPD1[40,8]; + Rd_VPR64.8B[32,8] = TMPD1[48,8]; + Rd_VPR64.8B[48,8] = TMPD1[56,8]; + # simd shuffle Rd_VPR64.8B = TMPD2 (@4-1@5-3@6-5@7-7) lane size 1 + Rd_VPR64.8B[8,8] = TMPD2[32,8]; + Rd_VPR64.8B[24,8] = TMPD2[40,8]; + Rd_VPR64.8B[40,8] = TMPD2[48,8]; + Rd_VPR64.8B[56,8] = TMPD2[56,8]; + zext_zd(Zd); # zero upper 24 bytes of Zd +} + +# C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 +# CONSTRUCT x4e407800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@4-0@5-2@6-4@7-6:2 swap &=$shuffle@4-1@5-3@6-5@7-7:2 +# SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@2 +# AUNIT --inst x4e407800/mask=xffe0fc00 --status pass + +:zip2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd +{ + TMPQ2 = Rm_VPR128.8H; + TMPQ1 = Rn_VPR128.8H; + # simd shuffle Rd_VPR128.8H = TMPQ1 (@4-0@5-2@6-4@7-6) lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[64,16]; + Rd_VPR128.8H[32,16] = TMPQ1[80,16]; + Rd_VPR128.8H[64,16] = TMPQ1[96,16]; + Rd_VPR128.8H[96,16] = TMPQ1[112,16]; + # simd shuffle Rd_VPR128.8H = TMPQ2 (@4-1@5-3@6-5@7-7) lane size 2 + Rd_VPR128.8H[16,16] = TMPQ2[64,16]; + Rd_VPR128.8H[48,16] = TMPQ2[80,16]; + Rd_VPR128.8H[80,16] = TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd +} + + +# C7.2.13 BFCVT page C7-1417 line 78462 MATCH x1e634000/mask=xfffffc00 +# C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 +# CONSTRUCT x1e634000/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES +# x1e634000/mask=xfffffc00 NOT MATCHED BY ANY CONSTRUCTOR +# SMACRO ARG1 ARG2 =float2float/1 +# SMACRO(pseudo) ARG1 ARG2 =NEON_bfcvt/1 +# b_0031=0001111001100011010000.......... +:bfcvt Rd_FPR16, Rn_FPR32 +is b_1031=0b0001111001100011010000 & Rd_FPR16 & Rn_FPR32 & Zd +{ + Rd_FPR16 = float2float(Rn_FPR32); + zext_zh(Zd); # zero upper 30 bytes of Zd +} + +# C7.2.14 BFCVTN, BFCVTN2 page C7-1418 line 78518 MATCH x0ea16800/mask=xbffffc00 +# CONSTRUCT x0ea16800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# x0ea16800/mask=xbffffc00 NOT MATCHED BY ANY CONSTRUCTOR +# SMACRO ARG1 ARG2 =var =$float2float@4:8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn/2@4 +# b_0031=0.00111010100001011010.......... + +:bfcvtn Rd_VPR128.4S, Rn_VPR128.4H +is b_3131=0b0 & Q=0 & b_1029=0b00111010100001011010 & Rn_VPR128.4H & Rd_VPR128.4S & Zd +{ + TMPQ1 = Rn_VPR128.4H; + # simd resize Rd_VPR128.4S = float2float(TMPQ1) (lane size 4 to 4) + Rd_VPR128.4S[0,32] = float2float(TMPQ1[0,32]); + Rd_VPR128.4S[32,32] = float2float(TMPQ1[32,32]); + Rd_VPR128.4S[64,32] = float2float(TMPQ1[64,32]); + Rd_VPR128.4S[96,32] = float2float(TMPQ1[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.14 BFCVTN, BFCVTN2 page C7-1418 line 78518 MATCH x0ea16800/mask=xbffffc00 +# CONSTRUCT x4ea16800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES +# x0ea16800/mask=xbffffc00 NOT MATCHED BY ANY CONSTRUCTOR +# SMACRO ARG1 ARG2 =var =$float2float@4:8 +# SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn/2@4 + +:bfcvtn2 Rd_VPR128.4S, Rn_VPR128.8H +is b_3131=0b0 & Q=1 & b_1029=0b00111010100001011010 & Rn_VPR128.8H & Rd_VPR128.4S & Zd +{ + TMPQ1 = Rn_VPR128.8H; + # simd resize Rd_VPR128.4S = float2float(TMPQ1) (lane size 4 to 4) + Rd_VPR128.4S[0,32] = float2float(TMPQ1[0,32]); + Rd_VPR128.4S[32,32] = float2float(TMPQ1[32,32]); + Rd_VPR128.4S[64,32] = float2float(TMPQ1[64,32]); + Rd_VPR128.4S[96,32] = float2float(TMPQ1[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd +} + +# C7.2.15 BFDOT (by element) page C7-1420 line 78603 MATCH x0f40f000/mask=xbfc0f400 +# CONSTRUCT x0f40f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# x0f40f000/mask=xbfc0f400 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.00111101......1111.0.......... + +:bfdot Rd_VPR128.2S, Rn_VPR128.4H, , Re_VPR128.H.vIndexHL +is b_3131=0b0 & Q=0 & b_2229=0b00111101 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.4H & Rd_VPR128.2S +{ + Rd_VPR128.2S = NEON_bfdot(Rn_VPR128.4H, Re_VPR128.H.vIndexHL); +} + +# C7.2.15 BFDOT (by element) page C7-1420 line 78603 MATCH x0f40f000/mask=xbfc0f400 +# CONSTRUCT x4f40f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES + +:bfdot Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128.H.vIndexHL +is b_3131=0b0 & Q=1 & b_2229=0b00111101 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8H & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_bfdot(Rn_VPR128.8H, Re_VPR128.H.vIndexHL); +} + +# C7.2.16 BFDOT (vector) page C7-1422 line 78694 MATCH x2e40fc00/mask=xbfe0fc00 +# CONSTRUCT x2e40fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# x2e40fc00/mask=xbfe0fc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.101110010.....111111.......... + +:bfdot Rd_VPR128.2S, Rn_VPR128.4H, Rm_VPR128.4H +is b_3131=0b0 & Q=0 & b_2129=0b101110010 & Rm_VPR128.4H & b_1015=0b111111 & Rn_VPR128.4H & Rd_VPR128.2S +{ + Rd_VPR128.2S = NEON_bfdot(Rn_VPR128.4H, Rm_VPR128.4H); +} + +# C7.2.16 BFDOT (vector) page C7-1422 line 78694 MATCH x2e40fc00/mask=xbfe0fc00 +# CONSTRUCT x6e40fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES + +:bfdot Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0b0 & Q=1 & b_2129=0b101110010 & Rm_VPR128.8H & b_1015=0b111111 & Rn_VPR128.8H & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_bfdot(Rn_VPR128.8H, Rm_VPR128.8H); +} + +# C7.2.17 BFMLALB, BFMLALT (by element) page C7-1424 line 78780 MATCH x0fc0f000/mask=xbfc0f400 +# CONSTRUCT x0fc0f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# x0fc0f000/mask=xbfc0f400 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.00111111......1111.0.......... + +:bfmlalb Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0b0 & Q=0 & b_2229=0b00111111 & Re_VPR128Lo.H.vIndexHLM & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8H & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_bfmlalb(Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM); +} + +# C7.2.17 BFMLALB, BFMLALT (by element) page C7-1424 line 78780 MATCH x0fc0f000/mask=xbfc0f400 +# CONSTRUCT x4fc0f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES + +:bfmlalt Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM +is b_3131=0b0 & Q=1 & b_2229=0b00111111 & Re_VPR128Lo.H.vIndexHLM & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8H & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_bfmlalt(Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM); +} + +# C7.2.18 BFMLALB, BFMLALT (vector) page C7-1426 line 78870 MATCH x2ec0fc00/mask=xbfe0fc00 +# CONSTRUCT x2ec0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# x2ec0fc00/mask=xbfe0fc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.101110110.....111111.......... + +:bfmlalb Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0b0 & Q=0 & b_2129=0b101110110 & Rm_VPR128.8H & b_1015=0b111111 & Rn_VPR128.8H & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_bfmlalb(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H); +} + +# C7.2.18 BFMLALB, BFMLALT (vector) page C7-1426 line 78870 MATCH x2ec0fc00/mask=xbfe0fc00 +# CONSTRUCT x6ec0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES + +:bfmlalt Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_3131=0b0 & Q=1 & b_2129=0b101110110 & Rm_VPR128.8H & b_1015=0b111111 & Rn_VPR128.8H & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_bfmlalt(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H); +} + +# C7.2.19 BFMMLA page C7-1427 line 78943 MATCH x6e40ec00/mask=xffe0fc00 +# CONSTRUCT x6e40ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# x6e40ec00/mask=xffe0fc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=01101110010.....111011.......... + +:bfmmla Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H +is b_2131=0b01101110010 & Rm_VPR128.8H & b_1015=0b111011 & Rn_VPR128.8H & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_bfmmla(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H); +} + +# C7.2.147 FRINT32X (vector) page C7-1726 line 96547 MATCH x2e21e800/mask=xbfbffc00 +# CONSTRUCT x2e21e800/mask=xbfbffc00 MATCHED 1 DOCUMENTED OPCODES +# x2e21e800/mask=xbfbffc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.1011100.100001111010.......... + +:frint32x +is b_3131=0b0 & Q & b_2329=0b1011100 & b_22 & b_1021=0b100001111010 & Rn & Rd +unimpl + +# C7.2.148 FRINT32X (scalar) page C7-1728 line 96636 MATCH x1e28c000/mask=xffbffc00 +# CONSTRUCT x1e28c000/mask=xffbffc00 MATCHED 1 DOCUMENTED OPCODES +# x1e28c000/mask=xffbffc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=000111100.101000110000.......... + +:frint32x +is b_2331=0b000111100 & b_22 & b_1021=0b101000110000 & Rn & Rd +unimpl + +# C7.2.149 FRINT32Z (vector) page C7-1730 line 96730 MATCH x0e21e800/mask=xbfbffc00 +# CONSTRUCT x0e21e800/mask=xbfbffc00 MATCHED 1 DOCUMENTED OPCODES +# x0e21e800/mask=xbfbffc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.0011100.100001111010.......... + +:frint32z +is b_3131=0b0 & Q & b_2329=0b0011100 & b_22 & b_1021=0b100001111010 & Rn & Rd +unimpl + +# C7.2.150 FRINT32Z (scalar) page C7-1732 line 96819 MATCH x1e284000/mask=xffbffc00 +# CONSTRUCT x1e284000/mask=xffbffc00 MATCHED 1 DOCUMENTED OPCODES +# x1e284000/mask=xffbffc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=000111100.101000010000.......... + +:frint32z +is b_2331=0b000111100 & b_22 & b_1021=0b101000010000 & Rn & Rd +unimpl + +# C7.2.151 FRINT64X (vector) page C7-1734 line 96910 MATCH x2e21f800/mask=xbfbffc00 +# CONSTRUCT x2e21f800/mask=xbfbffc00 MATCHED 1 DOCUMENTED OPCODES +# x2e21f800/mask=xbfbffc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.1011100.100001111110.......... + +:frint64x +is b_3131=0b0 & Q & b_2329=0b1011100 & b_22 & b_1021=0b100001111110 & Rn & Rd +unimpl + +# C7.2.152 FRINT64X (scalar) page C7-1736 line 96999 MATCH x1e29c000/mask=xffbffc00 +# CONSTRUCT x1e29c000/mask=xffbffc00 MATCHED 1 DOCUMENTED OPCODES +# x1e29c000/mask=xffbffc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=000111100.101001110000.......... + +:frint64x +is b_2331=0b000111100 & b_22 & b_1021=0b101001110000 & Rn & Rd +unimpl + +# C7.2.153 FRINT64Z (vector) page C7-1738 line 97093 MATCH x0e21f800/mask=xbfbffc00 +# CONSTRUCT x0e21f800/mask=xbfbffc00 MATCHED 1 DOCUMENTED OPCODES +# x0e21f800/mask=xbfbffc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.0011100.100001111110.......... + +:frint64z +is b_3131=0b0 & Q & b_2329=0b0011100 & b_22 & b_1021=0b100001111110 & Rn & Rd +unimpl + +# C7.2.154 FRINT64Z (scalar) page C7-1740 line 97182 MATCH x1e294000/mask=xffbffc00 +# CONSTRUCT x1e294000/mask=xffbffc00 MATCHED 1 DOCUMENTED OPCODES +# x1e294000/mask=xffbffc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=000111100.101001010000.......... + +:frint64z +is b_2331=0b000111100 & b_22 & b_1021=0b101001010000 & Rn & Rd +unimpl + +# C7.2.278 SMMLA (vector) page C7-2006 line 112254 MATCH x4e80a400/mask=xffe0fc00 +# CONSTRUCT x4e80a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# x4e80a400/mask=xffe0fc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=01001110100.....101001.......... + +:smmla Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B +is b_2131=0b01001110100 & Rm_VPR128.16B & b_1015=0b101001 & Rn_VPR128.16B & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_smmla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B); +} + +# C7.2.336 SUDOT (by element) page C7-2163 line 121691 MATCH x0f00f000/mask=xbfc0f400 +# CONSTRUCT x0f00f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# x0f00f000/mask=xbfc0f400 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.00111100......1111.0.......... + +:sudot Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL +is b_3131=0b0 & Q=0 & b_2229=0b00111100 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8B & Rd_VPR128.2S +{ + Rd_VPR128.2S = NEON_sudot(Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL); +} + +# C7.2.336 SUDOT (by element) page C7-2163 line 121691 MATCH x0f00f000/mask=xbfc0f400 +# CONSTRUCT x4f00f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES + +:sudot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL +is b_3131=0b0 & Q=1 & b_2229=0b00111100 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.16B & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_sudot(Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL); +} + +# C7.2.370 UMMLA (vector) page C7-2235 line 125634 MATCH x6e80a400/mask=xffe0fc00 +# CONSTRUCT x6e80a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# x6e80a400/mask=xffe0fc00 NOT MATCHED BY ANY CONSTRUCTOR +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_ummla/3@1 +# b_0031=01101110100.....101001.......... +:ummla Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B +is b_2131=0b01101110100 & Rm_VPR128.16B & b_1015=0b101001 & Rn_VPR128.16B & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_ummla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} + +# C7.2.388 USDOT (vector) page C7-2273 line 127924 MATCH x0e809c00/mask=xbfe0fc00 +# CONSTRUCT x0e809c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# x0e809c00/mask=xbfe0fc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.001110100.....100111.......... + +:usdot Rd_VPR128.2S, Rn_VPR128.8B, Rm_VPR128.8B +is b_3131=0b0 & Q=0 & b_2129=0b001110100 & Rm_VPR128.8B & b_1015=0b100111 & Rn_VPR128.8B & Rd_VPR128.2S +{ + Rd_VPR128.2S = NEON_usdot(Rd_VPR128.2S, Rn_VPR128.8B, Rm_VPR128.8B); +} + +# C7.2.388 USDOT (vector) page C7-2273 line 127924 MATCH x0e809c00/mask=xbfe0fc00 +# CONSTRUCT x4e809c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES + +:usdot Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B +is b_3131=0b0 & Q=1 & b_2129=0b001110100 & Rn_VPR128.16B & b_1015=0b100111 & Rm_VPR128.16B & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_usdot(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B); +} + +# C7.2.389 USDOT (by element) page C7-2275 line 128010 MATCH x0f80f000/mask=xbfc0f400 +# CONSTRUCT x0f80f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES +# x0f80f000/mask=xbfc0f400 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=0.00111110......1111.0.......... + +:usdot Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL +is b_3131=0b0 & Q=0 & b_2229=0b00111110 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8B & Rd_VPR128.2S +{ + Rd_VPR128.2S = NEON_usdot(Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL); +} + +# C7.2.389 USDOT (by element) page C7-2275 line 128010 MATCH x0f80f000/mask=xbfc0f400 +# CONSTRUCT x4f80f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES + +:usdot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL +is b_3131=0b0 & Q=1 & b_2229=0b00111110 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.16B & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_usdot(Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL); +} + +# C7.2.393 USMMLA (vector) page C7-2285 line 128543 MATCH x4e80ac00/mask=xffe0fc00 +# CONSTRUCT x4e80ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES +# SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_usmmla/3@1 +# x4e80ac00/mask=xffe0fc00 NOT MATCHED BY ANY CONSTRUCTOR +# b_0031=01001110100.....101011.......... +:usmmla Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B +is b_2131=0b01001110100 & Rm_VPR128.16B & b_1015=0b101011 & Rn_VPR128.16B & Rd_VPR128.4S +{ + Rd_VPR128.4S = NEON_usmmla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); +} diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64sve.sinc b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64sve.sinc new file mode 100644 index 00000000..958b4669 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AARCH64sve.sinc @@ -0,0 +1,7405 @@ +# INFO This file automatically generated by andre on Mon Apr 30 14:51:39 2018 +# INFO Direct edits to this file may be lost in future updates +# INFO Command line arguments: ['../../../ProcessorTest/test/andre/scrape/sveit.py', '--sinc'] + +# abs_z_p_z.xml: ABS variant SVE +# PATTERN x0416a000/mask=xff3fe000 + +:abs Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_abs(Zd.T, Pg3_m, Zn.T); +} + +# add_z_p_zz.xml: ADD (vectors, predicated) variant SVE +# PATTERN x04000000/mask=xff3fe000 + +:add Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_add(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# add_z_zi.xml: ADD (immediate) variant SVE +# PATTERN x2520c000/mask=xff3fc000 + +:add Zd.T, Zd.T_2, sve_shf8_1_0to255 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 +{ + Zd.T = SVE_add(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); +} + +# add_z_zz.xml: ADD (vectors, unpredicated) variant SVE +# PATTERN x04200000/mask=xff20fc00 + +:add Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b00 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_add(Zd.T, Zn.T, Zm.T); +} + +# addpl_r_ri.xml: ADDPL variant SVE +# PATTERN x04605000/mask=xffe0f800 + +:addpl Rd_GPR64xsp, Rm_GPR64xsp, "#"^sve_imm6_1_m32to31 +is sve_b_2331=0b000001000 & sve_b_22=1 & sve_b_21=1 & sve_rn_1620 & sve_b_1115=0b01010 & sve_imm6_0510 & sve_rd_0004 & Rd_GPR64xsp & Rm_GPR64xsp & sve_imm6_1_m32to31 +{ + Rd_GPR64xsp = SVE_addpl(Rd_GPR64xsp, Rm_GPR64xsp, sve_imm6_1_m32to31:1); +} + +# addvl_r_ri.xml: ADDVL variant SVE +# PATTERN x04205000/mask=xffe0f800 + +:addvl Rd_GPR64xsp, Rm_GPR64xsp, "#"^sve_imm6_1_m32to31 +is sve_b_2331=0b000001000 & sve_b_22=0 & sve_b_21=1 & sve_rn_1620 & sve_b_1115=0b01010 & sve_imm6_0510 & sve_rd_0004 & Rd_GPR64xsp & Rm_GPR64xsp & sve_imm6_1_m32to31 +{ + Rd_GPR64xsp = SVE_addvl(Rd_GPR64xsp, Rm_GPR64xsp, sve_imm6_1_m32to31:1); +} + +# adr_z_az.xml: ADR variant Packed offsets +# PATTERN x04a0a000/mask=xffa0f000 + +:adr Zd.T_sz, [Zn.T_sz, Zm.T_sz^sve_mod_amount] +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_sz_22 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1010 & sve_msz_1011 & sve_zn_0509 & sve_zd_0004 & sve_mod_amount & Zm.T_sz & Zd.T_sz & Zn.T_sz +{ + Zd.T_sz = SVE_adr(Zd.T_sz, Zn.T_sz, Zm.T_sz, sve_mod_amount:1); +} + +# adr_z_az.xml: ADR variant Unpacked 32-bit signed offsets +# PATTERN x0420a000/mask=xffe0f000 + +:adr Zd.D, [Zn.D, Zm.D, "sxtw"^sve_msz_1011] +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1010 & sve_msz_1011 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D +{ + Zd.D = SVE_adr(Zd.D, Zn.D, Zm.D, sve_msz_1011:1); +} + +# adr_z_az.xml: ADR variant Unpacked 32-bit unsigned offsets +# PATTERN x0460a000/mask=xffe0f000 + +:adr Zd.D, [Zn.D, Zm.D, "uxtw"^sve_msz_1011] +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1010 & sve_msz_1011 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D +{ + Zd.D = SVE_adr(Zd.D, Zn.D, Zm.D, sve_msz_1011:1); +} + +# and_p_p_pp.xml: AND, ANDS (predicates) variant Flag setting +# PATTERN x25404000/mask=xfff0c210 + +:ands Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_s_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_ands(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# and_p_p_pp.xml: AND, ANDS (predicates) variant Not flag setting +# PATTERN x25004000/mask=xfff0c210 + +:and Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_s_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_and(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# and_z_p_zz.xml: AND (vectors, predicated) variant SVE +# PATTERN x041a0000/mask=xff3fe000 + +:and Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_and(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# and_z_zi.xml: AND (immediate) variant SVE +# PATTERN x05800000/mask=xfffc0000 + +:and Zd.T_imm13, Zd.T_imm13_2, "#"^sve_decode_bit_mask +is sve_b_2431=0b00000101 & sve_b_23=1 & sve_b_22=0 & sve_b_1821=0b0000 & sve_imm13_0517 & sve_zdn_0004 & sve_decode_bit_mask & Zd.T_imm13 & Zd.T_imm13_2 +{ + Zd.T_imm13 = SVE_and(Zd.T_imm13, Zd.T_imm13_2, sve_decode_bit_mask:1); +} + +# and_z_zz.xml: AND (vectors, unpredicated) variant SVE +# PATTERN x04203000/mask=xffe0fc00 + +:and Zd.D, Zn.D, Zm.D +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_zm_1620 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D +{ + Zd.D = SVE_and(Zd.D, Zn.D, Zm.D); +} + +# andv_r_p_z.xml: ANDV variant SVE +# PATTERN x041a2000/mask=xff3fe000 + +:andv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_andv(Rd_FPR8, Pg3, Zn.T); +} + +# andv_r_p_z.xml: ANDV variant SVE +# PATTERN x041a2000/mask=xff3fe000 + +:andv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_andv(Rd_FPR32, Pg3, Zn.T); +} + +# andv_r_p_z.xml: ANDV variant SVE +# PATTERN x041a2000/mask=xff3fe000 + +:andv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_andv(Rd_FPR16, Pg3, Zn.T); +} + +# andv_r_p_z.xml: ANDV variant SVE +# PATTERN x041a2000/mask=xff3fe000 + +:andv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_andv(Rd_FPR64, Pg3, Zn.T); +} + +# asr_z_p_zi.xml: ASR (immediate, predicated) variant SVE +# PATTERN x04008000/mask=xff3fe000 + +:asr Zd.T_tszh, Pg3_m, Zd.T_tszh_2, "#"^sve_imm_shift +is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_tszl_0809 & sve_imm3_0507 & sve_zdn_0004 & sve_imm_shift & Zd.T_tszh & Zd.T_tszh_2 & Pg3_m +{ + Zd.T_tszh = SVE_asr(Zd.T_tszh, Pg3_m, Zd.T_tszh_2, sve_imm_shift:1); +} + +# asr_z_p_zw.xml: ASR (wide elements, predicated) variant SVE +# PATTERN x04188000/mask=xff3fe000 + +:asr Zd.T, Pg3_m, Zd.T_2, Zn.D +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Pg3_m & Zn.D +{ + Zd.T = SVE_asr(Zd.T, Pg3_m, Zd.T_2, Zn.D); +} + +# asr_z_p_zz.xml: ASR (vectors) variant SVE +# PATTERN x04108000/mask=xff3fe000 + +:asr Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_asr(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# asr_z_zi.xml: ASR (immediate, unpredicated) variant SVE +# PATTERN x04209000/mask=xff20fc00 + +:asr Zd.T_tszh, Zn.T_tszh, "#"^sve_imm_shift +is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_21=1 & sve_tszl_1920 & sve_imm3_1618 & sve_b_1215=0b1001 & sve_b_11=0 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & sve_imm_shift & Zd.T_tszh & Zn.T_tszh +{ + Zd.T_tszh = SVE_asr(Zd.T_tszh, Zn.T_tszh, sve_imm_shift:1); +} + +# asr_z_zw.xml: ASR (wide elements, unpredicated) variant SVE +# PATTERN x04208000/mask=xff20fc00 + +:asr Zd.T, Zn.T, Zm.D +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1000 & sve_b_11=0 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Zm.D +{ + Zd.T = SVE_asr(Zd.T, Zn.T, Zm.D); +} + +# asrd_z_p_zi.xml: ASRD variant SVE +# PATTERN x04048000/mask=xff3fe000 + +:asrd Zd.T_tszh, Pg3_m, Zd.T_tszh_2, "#"^sve_imm_shift +is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_tszl_0809 & sve_imm3_0507 & sve_zdn_0004 & sve_imm_shift & Zd.T_tszh & Zd.T_tszh_2 & Pg3_m +{ + Zd.T_tszh = SVE_asrd(Zd.T_tszh, Pg3_m, Zd.T_tszh_2, sve_imm_shift:1); +} + +# asrr_z_p_zz.xml: ASRR variant SVE +# PATTERN x04148000/mask=xff3fe000 + +:asrr Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_asrr(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# bic_and_z_zi.xml: BIC (immediate) variant SVE +# ALIASEDBY AND ., ., #(- - 1) if Never +# PATTERN x05800000/mask=xfffc0000 + +# SKIPPING bic_and_z_zi.xml because x05800000/mask=xfffc0000 has already been defined + +# bic_p_p_pp.xml: BIC, BICS (predicates) variant Flag setting +# PATTERN x25404010/mask=xfff0c210 + +:bics Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_bics(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# bic_p_p_pp.xml: BIC, BICS (predicates) variant Not flag setting +# PATTERN x25004010/mask=xfff0c210 + +:bic Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_bic(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# bic_z_p_zz.xml: BIC (vectors, predicated) variant SVE +# PATTERN x041b0000/mask=xff3fe000 + +:bic Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_bic(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# bic_z_zz.xml: BIC (vectors, unpredicated) variant SVE +# PATTERN x04e03000/mask=xffe0fc00 + +:bic Zd.D, Zn.D, Zm.D +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_zm_1620 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D +{ + Zd.D = SVE_bic(Zd.D, Zn.D, Zm.D); +} + +# brka_p_p_p.xml: BRKA, BRKAS variant Flag setting +# PATTERN x25504000/mask=xffffc210 + +:brkas Pd.B, Pg_z, Pn.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1421=0b01000001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B +{ + Pd.B = SVE_brkas(Pd.B, Pg_z, Pn.B); +} + +# brka_p_p_p.xml: BRKA, BRKAS variant Not flag setting +# PATTERN x25104000/mask=xffffc200 + +:brka Pd.B, Pg_zm, Pn.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_1421=0b01000001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_m_04 & sve_pd_0003 & Pg_zm & Pd.B & Pn.B +{ + Pd.B = SVE_brka(Pd.B, Pg_zm, Pn.B); +} + +# brkb_p_p_p.xml: BRKB, BRKBS variant Flag setting +# PATTERN x25d04000/mask=xffffc210 + +:brkbs Pd.B, Pg_z, Pn.B +is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1421=0b01000001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B +{ + Pd.B = SVE_brkbs(Pd.B, Pg_z, Pn.B); +} + +# brkb_p_p_p.xml: BRKB, BRKBS variant Not flag setting +# PATTERN x25904000/mask=xffffc200 + +:brkb Pd.B, Pg_zm, Pn.B +is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1421=0b01000001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_m_04 & sve_pd_0003 & Pg_zm & Pd.B & Pn.B +{ + Pd.B = SVE_brkb(Pd.B, Pg_zm, Pn.B); +} + +# brkn_p_p_pp.xml: BRKN, BRKNS variant Flag setting +# PATTERN x25584000/mask=xffffc210 + +:brkns Pd.B, Pg_z, Pn.B, Pd.B_2 +is sve_b_2331=0b001001010 & sve_b_22=1 & sve_b_1421=0b01100001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pdm_0003 & Pd.B & Pd.B_2 & Pg_z & Pn.B +{ + Pd.B = SVE_brkns(Pd.B, Pg_z, Pn.B, Pd.B_2); +} + +# brkn_p_p_pp.xml: BRKN, BRKNS variant Not flag setting +# PATTERN x25184000/mask=xffffc210 + +:brkn Pd.B, Pg_z, Pn.B, Pd.B_2 +is sve_b_2331=0b001001010 & sve_b_22=0 & sve_b_1421=0b01100001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pdm_0003 & Pd.B & Pd.B_2 & Pg_z & Pn.B +{ + Pd.B = SVE_brkn(Pd.B, Pg_z, Pn.B, Pd.B_2); +} + +# brkpa_p_p_pp.xml: BRKPA, BRKPAS variant Flag setting +# PATTERN x2540c000/mask=xfff0c210 + +:brkpas Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b11 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_brkpas(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# brkpa_p_p_pp.xml: BRKPA, BRKPAS variant Not flag setting +# PATTERN x2500c000/mask=xfff0c210 + +:brkpa Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b11 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_brkpa(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# brkpb_p_p_pp.xml: BRKPB, BRKPBS variant Flag setting +# PATTERN x2540c010/mask=xfff0c210 + +:brkpbs Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b11 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_brkpbs(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# brkpb_p_p_pp.xml: BRKPB, BRKPBS variant Not flag setting +# PATTERN x2500c010/mask=xfff0c210 + +:brkpb Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b11 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_brkpb(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# clasta_r_p_z.xml: CLASTA (scalar) variant SVE +# PATTERN x0530a000/mask=xff3fe000 + +:clasta Rd_GPR64, Pg3, Rd_GPR64_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b11000 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zm_0509 & sve_rdn_0004 & Zn.T & Rd_GPR64 & Rd_GPR64_2 & Pg3 +{ + Rd_GPR64 = SVE_clasta(Rd_GPR64, Pg3, Rd_GPR64_2, Zn.T); +} + +# clasta_r_p_z.xml: CLASTA (scalar) variant SVE +# PATTERN x0530a000/mask=xff3fe000 + +:clasta Rd_GPR32, Pg3, Rd_GPR32_2, Zn.T +is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1721=0b11000 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zm_0509 & sve_rdn_0004 & Zn.T & Rd_GPR32 & Rd_GPR32_2 & Pg3 +{ + Rd_GPR32 = SVE_clasta(Rd_GPR32, Pg3, Rd_GPR32_2, Zn.T); +} + +# clasta_v_p_z.xml: CLASTA (SIMD&FP scalar) variant SVE +# PATTERN x052a8000/mask=xff3fe000 + +:clasta Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1721=0b10101 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR8 & Rd_FPR8_2 & Pg3 +{ + Rd_FPR8 = SVE_clasta(Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T); +} + +# clasta_v_p_z.xml: CLASTA (SIMD&FP scalar) variant SVE +# PATTERN x052a8000/mask=xff3fe000 + +:clasta Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1721=0b10101 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR32 & Rd_FPR32_2 & Pg3 +{ + Rd_FPR32 = SVE_clasta(Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T); +} + +# clasta_v_p_z.xml: CLASTA (SIMD&FP scalar) variant SVE +# PATTERN x052a8000/mask=xff3fe000 + +:clasta Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1721=0b10101 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR16 & Rd_FPR16_2 & Pg3 +{ + Rd_FPR16 = SVE_clasta(Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T); +} + +# clasta_v_p_z.xml: CLASTA (SIMD&FP scalar) variant SVE +# PATTERN x052a8000/mask=xff3fe000 + +:clasta Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10101 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR64 & Rd_FPR64_2 & Pg3 +{ + Rd_FPR64 = SVE_clasta(Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T); +} + +# clasta_z_p_zz.xml: CLASTA (vectors) variant SVE +# PATTERN x05288000/mask=xff3fe000 + +:clasta Zd.T, Pg3, Zd.T_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1721=0b10100 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3 +{ + Zd.T = SVE_clasta(Zd.T, Pg3, Zd.T_2, Zn.T); +} + +# clastb_r_p_z.xml: CLASTB (scalar) variant SVE +# PATTERN x0531a000/mask=xff3fe000 + +:clastb Rd_GPR64, Pg3, Rd_GPR64_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b11000 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zm_0509 & sve_rdn_0004 & Zn.T & Rd_GPR64 & Rd_GPR64_2 & Pg3 +{ + Rd_GPR64 = SVE_clastb(Rd_GPR64, Pg3, Rd_GPR64_2, Zn.T); +} + +# clastb_r_p_z.xml: CLASTB (scalar) variant SVE +# PATTERN x0531a000/mask=xff3fe000 + +:clastb Rd_GPR32, Pg3, Rd_GPR32_2, Zn.T +is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1721=0b11000 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zm_0509 & sve_rdn_0004 & Zn.T & Rd_GPR32 & Rd_GPR32_2 & Pg3 +{ + Rd_GPR32 = SVE_clastb(Rd_GPR32, Pg3, Rd_GPR32_2, Zn.T); +} + +# clastb_v_p_z.xml: CLASTB (SIMD&FP scalar) variant SVE +# PATTERN x052b8000/mask=xff3fe000 + +:clastb Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1721=0b10101 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR8 & Rd_FPR8_2 & Pg3 +{ + Rd_FPR8 = SVE_clastb(Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T); +} + +# clastb_v_p_z.xml: CLASTB (SIMD&FP scalar) variant SVE +# PATTERN x052b8000/mask=xff3fe000 + +:clastb Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1721=0b10101 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR32 & Rd_FPR32_2 & Pg3 +{ + Rd_FPR32 = SVE_clastb(Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T); +} + +# clastb_v_p_z.xml: CLASTB (SIMD&FP scalar) variant SVE +# PATTERN x052b8000/mask=xff3fe000 + +:clastb Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1721=0b10101 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR16 & Rd_FPR16_2 & Pg3 +{ + Rd_FPR16 = SVE_clastb(Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T); +} + +# clastb_v_p_z.xml: CLASTB (SIMD&FP scalar) variant SVE +# PATTERN x052b8000/mask=xff3fe000 + +:clastb Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10101 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR64 & Rd_FPR64_2 & Pg3 +{ + Rd_FPR64 = SVE_clastb(Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T); +} + +# clastb_z_p_zz.xml: CLASTB (vectors) variant SVE +# PATTERN x05298000/mask=xff3fe000 + +:clastb Zd.T, Pg3, Zd.T_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1721=0b10100 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3 +{ + Zd.T = SVE_clastb(Zd.T, Pg3, Zd.T_2, Zn.T); +} + +# cls_z_p_z.xml: CLS variant SVE +# PATTERN x0418a000/mask=xff3fe000 + +:cls Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_cls(Zd.T, Pg3_m, Zn.T); +} + +# clz_z_p_z.xml: CLZ variant SVE +# PATTERN x0419a000/mask=xff3fe000 + +:clz Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_clz(Zd.T, Pg3_m, Zn.T); +} + +# cmpeq_p_p_zi.xml: CMP (immediate) variant Equal +# PATTERN x25008000/mask=xff20e010 + +:cmpeq Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 +{ + Pd.T = SVE_cmpeq(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); +} + +# cmpeq_p_p_zi.xml: CMP (immediate) variant Greater than +# PATTERN x25000010/mask=xff20e010 + +:cmpgt Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 +{ + Pd.T = SVE_cmpgt(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); +} + +# cmpeq_p_p_zi.xml: CMP (immediate) variant Greater than or equal +# PATTERN x25000000/mask=xff20e010 + +:cmpge Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 +{ + Pd.T = SVE_cmpge(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); +} + +# cmpeq_p_p_zi.xml: CMP (immediate) variant Higher +# PATTERN x24200010/mask=xff202010 + +:cmphi Pd.T, Pg3_z, Zn.T, "#"^sve_imm7_1420 +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=1 & sve_imm7_1420 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_cmphi(Pd.T, Pg3_z, Zn.T, sve_imm7_1420:1); +} + +# cmpeq_p_p_zi.xml: CMP (immediate) variant Higher or same +# PATTERN x24200000/mask=xff202010 + +:cmphs Pd.T, Pg3_z, Zn.T, "#"^sve_imm7_1420 +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=1 & sve_imm7_1420 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_cmphs(Pd.T, Pg3_z, Zn.T, sve_imm7_1420:1); +} + +# cmpeq_p_p_zi.xml: CMP (immediate) variant Less than +# PATTERN x25002000/mask=xff20e010 + +:cmplt Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 +{ + Pd.T = SVE_cmplt(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); +} + +# cmpeq_p_p_zi.xml: CMP (immediate) variant Less than or equal +# PATTERN x25002010/mask=xff20e010 + +:cmple Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 +{ + Pd.T = SVE_cmple(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); +} + +# cmpeq_p_p_zi.xml: CMP (immediate) variant Lower +# PATTERN x24202000/mask=xff202010 + +:cmplo Pd.T, Pg3_z, Zn.T, "#"^sve_imm7_1420 +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=1 & sve_imm7_1420 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_cmplo(Pd.T, Pg3_z, Zn.T, sve_imm7_1420:1); +} + +# cmpeq_p_p_zi.xml: CMP (immediate) variant Lower or same +# PATTERN x24202010/mask=xff202010 + +:cmpls Pd.T, Pg3_z, Zn.T, "#"^sve_imm7_1420 +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=1 & sve_imm7_1420 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_cmpls(Pd.T, Pg3_z, Zn.T, sve_imm7_1420:1); +} + +# cmpeq_p_p_zi.xml: CMP (immediate) variant Not equal +# PATTERN x25008010/mask=xff20e010 + +:cmpne Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 +{ + Pd.T = SVE_cmpne(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); +} + +# cmpeq_p_p_zw.xml: CMP (wide elements) variant Equal +# PATTERN x24002000/mask=xff20e010 + +:cmpeq Pd.T, Pg3_z, Zn.T, Zm.D +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D +{ + Pd.T = SVE_cmpeq(Pd.T, Pg3_z, Zn.T, Zm.D); +} + +# cmpeq_p_p_zw.xml: CMP (wide elements) variant Greater than +# PATTERN x24004010/mask=xff20e010 + +:cmpgt Pd.T, Pg3_z, Zn.T, Zm.D +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D +{ + Pd.T = SVE_cmpgt(Pd.T, Pg3_z, Zn.T, Zm.D); +} + +# cmpeq_p_p_zw.xml: CMP (wide elements) variant Greater than or equal +# PATTERN x24004000/mask=xff20e010 + +:cmpge Pd.T, Pg3_z, Zn.T, Zm.D +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D +{ + Pd.T = SVE_cmpge(Pd.T, Pg3_z, Zn.T, Zm.D); +} + +# cmpeq_p_p_zw.xml: CMP (wide elements) variant Higher +# PATTERN x2400c010/mask=xff20e010 + +:cmphi Pd.T, Pg3_z, Zn.T, Zm.D +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D +{ + Pd.T = SVE_cmphi(Pd.T, Pg3_z, Zn.T, Zm.D); +} + +# cmpeq_p_p_zw.xml: CMP (wide elements) variant Higher or same +# PATTERN x2400c000/mask=xff20e010 + +:cmphs Pd.T, Pg3_z, Zn.T, Zm.D +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D +{ + Pd.T = SVE_cmphs(Pd.T, Pg3_z, Zn.T, Zm.D); +} + +# cmpeq_p_p_zw.xml: CMP (wide elements) variant Less than +# PATTERN x24006000/mask=xff20e010 + +:cmplt Pd.T, Pg3_z, Zn.T, Zm.D +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D +{ + Pd.T = SVE_cmplt(Pd.T, Pg3_z, Zn.T, Zm.D); +} + +# cmpeq_p_p_zw.xml: CMP (wide elements) variant Less than or equal +# PATTERN x24006010/mask=xff20e010 + +:cmple Pd.T, Pg3_z, Zn.T, Zm.D +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D +{ + Pd.T = SVE_cmple(Pd.T, Pg3_z, Zn.T, Zm.D); +} + +# cmpeq_p_p_zw.xml: CMP (wide elements) variant Lower +# PATTERN x2400e000/mask=xff20e010 + +:cmplo Pd.T, Pg3_z, Zn.T, Zm.D +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D +{ + Pd.T = SVE_cmplo(Pd.T, Pg3_z, Zn.T, Zm.D); +} + +# cmpeq_p_p_zw.xml: CMP (wide elements) variant Lower or same +# PATTERN x2400e010/mask=xff20e010 + +:cmpls Pd.T, Pg3_z, Zn.T, Zm.D +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D +{ + Pd.T = SVE_cmpls(Pd.T, Pg3_z, Zn.T, Zm.D); +} + +# cmpeq_p_p_zw.xml: CMP (wide elements) variant Not equal +# PATTERN x24002010/mask=xff20e010 + +:cmpne Pd.T, Pg3_z, Zn.T, Zm.D +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D +{ + Pd.T = SVE_cmpne(Pd.T, Pg3_z, Zn.T, Zm.D); +} + +# cmpeq_p_p_zz.xml: CMP (vectors) variant Equal +# PATTERN x2400a000/mask=xff20e010 + +:cmpeq Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_cmpeq(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# cmpeq_p_p_zz.xml: CMP (vectors) variant Greater than +# PATTERN x24008010/mask=xff20e010 + +:cmpgt Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_cmpgt(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# cmpeq_p_p_zz.xml: CMP (vectors) variant Greater than or equal +# PATTERN x24008000/mask=xff20e010 + +:cmpge Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_cmpge(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# cmpeq_p_p_zz.xml: CMP (vectors) variant Higher +# PATTERN x24000010/mask=xff20e010 + +:cmphi Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_cmphi(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# cmpeq_p_p_zz.xml: CMP (vectors) variant Higher or same +# PATTERN x24000000/mask=xff20e010 + +:cmphs Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_cmphs(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# cmpeq_p_p_zz.xml: CMP (vectors) variant Not equal +# PATTERN x2400a010/mask=xff20e010 + +:cmpne Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_cmpne(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# cmple_cmpeq_p_p_zz.xml: CMPLE (vectors) variant Greater than or equal +# ALIASEDBY CMPGE ., /Z, ., . if Never +# PATTERN x24008000/mask=xff20e010 + +# SKIPPING cmple_cmpeq_p_p_zz.xml because x24008000/mask=xff20e010 has already been defined + +# cmplo_cmpeq_p_p_zz.xml: CMPLO (vectors) variant Higher +# ALIASEDBY CMPHI ., /Z, ., . if Never +# PATTERN x24000010/mask=xff20e010 + +# SKIPPING cmplo_cmpeq_p_p_zz.xml because x24000010/mask=xff20e010 has already been defined + +# cmpls_cmpeq_p_p_zz.xml: CMPLS (vectors) variant Higher or same +# ALIASEDBY CMPHS ., /Z, ., . if Never +# PATTERN x24000000/mask=xff20e010 + +# SKIPPING cmpls_cmpeq_p_p_zz.xml because x24000000/mask=xff20e010 has already been defined + +# cmplt_cmpeq_p_p_zz.xml: CMPLT (vectors) variant Greater than +# ALIASEDBY CMPGT ., /Z, ., . if Never +# PATTERN x24008010/mask=xff20e010 + +# SKIPPING cmplt_cmpeq_p_p_zz.xml because x24008010/mask=xff20e010 has already been defined + +# cnot_z_p_z.xml: CNOT variant SVE +# PATTERN x041ba000/mask=xff3fe000 + +:cnot Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_cnot(Zd.T, Pg3_m, Zn.T); +} + +# cnt_z_p_z.xml: CNT variant SVE +# PATTERN x041aa000/mask=xff3fe000 + +:cnt Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_cnt(Zd.T, Pg3_m, Zn.T); +} + +# cntb_r_s.xml: CNTB, CNTD, CNTH, CNTW variant Byte +# PATTERN x0420e000/mask=xfff0fc00 + +:cntb Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rd_0004 & sve_pattern & sve_imm4_1_1to16 & Rd_GPR64 & sve_mul_pattern +{ + Rd_GPR64 = SVE_cntb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# cntb_r_s.xml: CNTB, CNTD, CNTH, CNTW variant Doubleword +# PATTERN x04e0e000/mask=xfff0fc00 + +:cntd Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rd_0004 & sve_pattern & sve_imm4_1_1to16 & Rd_GPR64 & sve_mul_pattern +{ + Rd_GPR64 = SVE_cntd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# cntb_r_s.xml: CNTB, CNTD, CNTH, CNTW variant Halfword +# PATTERN x0460e000/mask=xfff0fc00 + +:cnth Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rd_0004 & sve_pattern & sve_imm4_1_1to16 & Rd_GPR64 & sve_mul_pattern +{ + Rd_GPR64 = SVE_cnth(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# cntb_r_s.xml: CNTB, CNTD, CNTH, CNTW variant Word +# PATTERN x04a0e000/mask=xfff0fc00 + +:cntw Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rd_0004 & sve_pattern & sve_imm4_1_1to16 & Rd_GPR64 & sve_mul_pattern +{ + Rd_GPR64 = SVE_cntw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# cntp_r_p_p.xml: CNTP variant SVE +# PATTERN x25208000/mask=xff3fc200 + +:cntp Rd_GPR64, Pg, Pn.T +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1415=0b10 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_rd_0004 & Pn.T & Rd_GPR64 & Pg +{ + Rd_GPR64 = SVE_cntp(Rd_GPR64, Pn.T, Pg); +} + +# compact_z_p_z.xml: COMPACT variant SVE +# PATTERN x05a18000/mask=xffbfe000 + +:compact Zd.T_sz, Pg3, Zn.T_sz +is sve_b_2331=0b000001011 & sve_sz_22 & sve_b_1321=0b100001100 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T_sz & Zn.T_sz & Pg3 +{ + Zd.T_sz = SVE_compact(Zd.T_sz, Pg3, Zn.T_sz); +} + +# cpy_z_p_i.xml: CPY (immediate) variant SVE +# PATTERN x05100000/mask=xff308000 + +:cpy Zd.T, Pm_zm, sve_shf8_1_m128to127 +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b01 & sve_pg_1619 & sve_b_15=0 & sve_m_14 & sve_sh_13 & sve_imm8_0512 & sve_zd_0004 & sve_shift_13 & Pm_zm & Zd.T & sve_imm8_1_m128to127 & sve_shf8_1_m128to127 +{ + Zd.T = SVE_cpy(Zd.T, Pm_zm, sve_shf8_1_m128to127, sve_shift_13:1); +} + +# cpy_z_p_r.xml: CPY (scalar) variant SVE +# PATTERN x0528a000/mask=xff3fe000 + +:cpy Zd.T, Pg3_m, Rn_GPR64xsp +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1321=0b101000101 & sve_pg_1012 & sve_rn_0509 & sve_zd_0004 & Rn_GPR64xsp & Zd.T & Pg3_m +{ + Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_GPR64xsp); +} + +# cpy_z_p_r.xml: CPY (scalar) variant SVE +# PATTERN x0528a000/mask=xff3fe000 + +:cpy Zd.T, Pg3_m, Rn_GPR32xsp +is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1321=0b101000101 & sve_pg_1012 & sve_rn_0509 & sve_zd_0004 & Rn_GPR32xsp & Zd.T & Pg3_m +{ + Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_GPR32xsp); +} + +# cpy_z_p_v.xml: CPY (SIMD&FP scalar) variant SVE +# PATTERN x05208000/mask=xff3fe000 + +:cpy Zd.T, Pg3_m, Rn_FPR8 +is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1321=0b100000100 & sve_pg_1012 & sve_vn_0509 & sve_zd_0004 & Zd.T & Rn_FPR8 & Pg3_m +{ + Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_FPR8); +} + +# cpy_z_p_v.xml: CPY (SIMD&FP scalar) variant SVE +# PATTERN x05208000/mask=xff3fe000 + +:cpy Zd.T, Pg3_m, Rn_FPR32 +is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1321=0b100000100 & sve_pg_1012 & sve_vn_0509 & sve_zd_0004 & Zd.T & Rn_FPR32 & Pg3_m +{ + Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_FPR32); +} + +# cpy_z_p_v.xml: CPY (SIMD&FP scalar) variant SVE +# PATTERN x05208000/mask=xff3fe000 + +:cpy Zd.T, Pg3_m, Rn_FPR16 +is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1321=0b100000100 & sve_pg_1012 & sve_vn_0509 & sve_zd_0004 & Zd.T & Rn_FPR16 & Pg3_m +{ + Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_FPR16); +} + +# cpy_z_p_v.xml: CPY (SIMD&FP scalar) variant SVE +# PATTERN x05208000/mask=xff3fe000 + +:cpy Zd.T, Pg3_m, Rn_FPR64 +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1321=0b100000100 & sve_pg_1012 & sve_vn_0509 & sve_zd_0004 & Zd.T & Rn_FPR64 & Pg3_m +{ + Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_FPR64); +} + +# ctermeq_rr.xml: CTERMEQ, CTERMNE variant Equal +# PATTERN x25a02000/mask=xffa0fc1f + +:ctermeq Rn_GPR64, Rm_GPR64 +is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=1 & sve_sz_22=1 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b001000 & sve_rn_0509 & sve_b_04=0 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 & Rn_GPR64 & Rm_GPR64 +{ + SVE_ctermeq(Rn_GPR64, Rm_GPR64); +} + +# ctermeq_rr.xml: CTERMEQ, CTERMNE variant Equal +# PATTERN x25a02000/mask=xffa0fc1f + +:ctermeq Rn_GPR32, Rm_GPR32 +is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=1 & sve_sz_22=0 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b001000 & sve_rn_0509 & sve_b_04=0 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 & Rn_GPR32 & Rm_GPR32 +{ + SVE_ctermeq(Rn_GPR32, Rm_GPR32); +} + +# ctermeq_rr.xml: CTERMEQ, CTERMNE variant Not equal +# PATTERN x25a02010/mask=xffa0fc1f + +:ctermne Rn_GPR64, Rm_GPR64 +is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=1 & sve_sz_22=1 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b001000 & sve_rn_0509 & sve_b_04=1 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 & Rn_GPR64 & Rm_GPR64 +{ + SVE_ctermne(Rn_GPR64, Rm_GPR64); +} + +# ctermeq_rr.xml: CTERMEQ, CTERMNE variant Not equal +# PATTERN x25a02010/mask=xffa0fc1f + +:ctermne Rn_GPR32, Rm_GPR32 +is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=1 & sve_sz_22=0 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b001000 & sve_rn_0509 & sve_b_04=1 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 & Rn_GPR32 & Rm_GPR32 +{ + SVE_ctermne(Rn_GPR32, Rm_GPR32); +} + +# decb_r_rs.xml: DECB, DECD, DECH, DECW (scalar) variant Byte +# PATTERN x0430e400/mask=xfff0fc00 + +:decb Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_decb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# decb_r_rs.xml: DECB, DECD, DECH, DECW (scalar) variant Doubleword +# PATTERN x04f0e400/mask=xfff0fc00 + +:decd Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_decd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# decb_r_rs.xml: DECB, DECD, DECH, DECW (scalar) variant Halfword +# PATTERN x0470e400/mask=xfff0fc00 + +:dech Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_dech(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# decb_r_rs.xml: DECB, DECD, DECH, DECW (scalar) variant Word +# PATTERN x04b0e400/mask=xfff0fc00 + +:decw Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_decw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# decd_z_zs.xml: DECD, DECH, DECW (vector) variant Doubleword +# PATTERN x04f0c400/mask=xfff0fc00 + +:decd Zd.D^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.D = SVE_decd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# decd_z_zs.xml: DECD, DECH, DECW (vector) variant Halfword +# PATTERN x0470c400/mask=xfff0fc00 + +:dech Zd.H^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.H = SVE_dech(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# decd_z_zs.xml: DECD, DECH, DECW (vector) variant Word +# PATTERN x04b0c400/mask=xfff0fc00 + +:decw Zd.S^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.S = SVE_decw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# decp_r_p_r.xml: DECP (scalar) variant SVE +# PATTERN x252d8800/mask=xff3ffe00 + +:decp Rd_GPR64, Pn.T +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1011 & sve_b_17=0 & sve_b_16=1 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 +{ + Rd_GPR64 = SVE_decp(Rd_GPR64, Pn.T); +} + +# decp_z_p_z.xml: DECP (vector) variant SVE +# PATTERN x252d8000/mask=xff3ffe00 + +:decp Zd.T, Pn +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1011 & sve_b_17=0 & sve_b_16=1 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn +{ + Zd.T = SVE_decp(Zd.T, Pn); +} + +# dup_z_i.xml: DUP (immediate) variant SVE +# PATTERN x2538c000/mask=xff3fc000 + +:dup Zd.T, sve_shf8_1_m128to127 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b111 & sve_b_18=0 & sve_b_17=0 & sve_b_1416=0b011 & sve_sh_13 & sve_imm8_0512 & sve_zd_0004 & sve_shift_13 & Zd.T & sve_imm8_1_m128to127 & sve_shf8_1_m128to127 +{ + Zd.T = SVE_dup(Zd.T, sve_shf8_1_m128to127, sve_shift_13:1); +} + +# dup_z_r.xml: DUP (scalar) variant SVE +# PATTERN x05203800/mask=xff3ffc00 + +:dup Zd.T, Rn_GPR64xsp +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1021=0b100000001110 & sve_rn_0509 & sve_zd_0004 & Rn_GPR64xsp & Zd.T +{ + Zd.T = SVE_dup(Zd.T, Rn_GPR64xsp); +} + +# dup_z_r.xml: DUP (scalar) variant SVE +# PATTERN x05203800/mask=xff3ffc00 + +:dup Zd.T, Rn_GPR32xsp +is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1021=0b100000001110 & sve_rn_0509 & sve_zd_0004 & Rn_GPR32xsp & Zd.T +{ + Zd.T = SVE_dup(Zd.T, Rn_GPR32xsp); +} + +# dup_z_zi.xml: DUP (indexed) variant SVE +# PATTERN x05202000/mask=xff20fc00 + +:dup Zd.T_tsz, Zn.T_tsz[sve_imm2_tsz] +is sve_b_2431=0b00000101 & sve_imm2_2223 & sve_b_21=1 & sve_tsz_1620 & sve_b_1015=0b001000 & sve_zn_0509 & sve_zd_0004 & Zd.T_tsz & Zn.T_tsz & sve_imm2_tsz +{ + Zd.T_tsz = SVE_dup(Zd.T_tsz, Zn.T_tsz, sve_imm2_tsz:1); +} + +# dupm_z_i.xml: DUPM variant SVE +# PATTERN x05c00000/mask=xfffc0000 + +:dupm Zd.T_imm13, "#"^sve_decode_bit_mask +is sve_b_1831=0b00000101110000 & sve_imm13_0517 & sve_zd_0004 & sve_decode_bit_mask & Zd.T_imm13 +{ + Zd.T_imm13 = SVE_dupm(Zd.T_imm13, sve_decode_bit_mask:1); +} + +# eon_eor_z_zi.xml: EON variant SVE +# ALIASEDBY EOR ., ., #(- - 1) if Never +# PATTERN x05400000/mask=xfffc0000 + +:eon Zd.T_imm13, Zd.T_imm13_2, "#"^sve_decode_bit_mask +is sve_b_2431=0b00000101 & sve_b_23=0 & sve_b_22=1 & sve_b_1821=0b0000 & sve_imm13_0517 & sve_zdn_0004 & sve_decode_bit_mask & Zd.T_imm13 & Zd.T_imm13_2 +{ + Zd.T_imm13 = SVE_eon(Zd.T_imm13, Zd.T_imm13_2, sve_decode_bit_mask:1); +} + +# eor_p_p_pp.xml: EOR, EORS (predicates) variant Flag setting +# PATTERN x25404200/mask=xfff0c210 + +:eors Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_eors(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# eor_p_p_pp.xml: EOR, EORS (predicates) variant Not flag setting +# PATTERN x25004200/mask=xfff0c210 + +:eor Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_eor(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# eor_z_p_zz.xml: EOR (vectors, predicated) variant SVE +# PATTERN x04190000/mask=xff3fe000 + +:eor Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_eor(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# eor_z_zi.xml: EOR (immediate) variant SVE +# PATTERN x05400000/mask=xfffc0000 + +# SKIPPING eor_z_zi.xml because x05400000/mask=xfffc0000 has already been defined + +# eor_z_zz.xml: EOR (vectors, unpredicated) variant SVE +# PATTERN x04a03000/mask=xffe0fc00 + +:eor Zd.D, Zn.D, Zm.D +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_zm_1620 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D +{ + Zd.D = SVE_eor(Zd.D, Zn.D, Zm.D); +} + +# eorv_r_p_z.xml: EORV variant SVE +# PATTERN x04192000/mask=xff3fe000 + +:eorv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_eorv(Rd_FPR8, Pg3, Zn.T); +} + +# eorv_r_p_z.xml: EORV variant SVE +# PATTERN x04192000/mask=xff3fe000 + +:eorv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_eorv(Rd_FPR32, Pg3, Zn.T); +} + +# eorv_r_p_z.xml: EORV variant SVE +# PATTERN x04192000/mask=xff3fe000 + +:eorv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_eorv(Rd_FPR16, Pg3, Zn.T); +} + +# eorv_r_p_z.xml: EORV variant SVE +# PATTERN x04192000/mask=xff3fe000 + +:eorv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_eorv(Rd_FPR64, Pg3, Zn.T); +} + +# ext_z_zi.xml: EXT variant SVE +# PATTERN x05200000/mask=xffe0e000 + +:ext Zd.B, Zd.B_2, Zn.B, "#"^sve_imm8_2_0to255 +is sve_b_2131=0b00000101001 & sve_imm8h_1620 & sve_b_1315=0b000 & sve_imm8l_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.B & Zd.B_2 & Zn.B & sve_imm8_2_0to255 +{ + Zd.B = SVE_ext(Zd.B, Zd.B_2, Zn.B, sve_imm8_2_0to255:1); +} + +# fabd_z_p_zz.xml: FABD variant SVE +# PATTERN x65088000/mask=xff3fe000 + +:fabd Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b100 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fabd(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fabs_z_p_z.xml: FABS variant SVE +# PATTERN x041ca000/mask=xff3fe000 + +:fabs Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_fabs(Zd.T, Pg3_m, Zn.T); +} + +# facge_p_p_zz.xml: FAC variant Greater than +# PATTERN x6500e010/mask=xff20e010 + +:facgt Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_facgt(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# facge_p_p_zz.xml: FAC variant Greater than or equal +# PATTERN x6500c010/mask=xff20e010 + +:facge Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_facge(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# facle_facge_p_p_zz.xml: FACLE variant Greater than or equal +# ALIASEDBY FACGE ., /Z, ., . if Never +# PATTERN x6500c010/mask=xff20e010 + +# SKIPPING facle_facge_p_p_zz.xml because x6500c010/mask=xff20e010 has already been defined + +# faclt_facge_p_p_zz.xml: FACLT variant Greater than +# ALIASEDBY FACGT ., /Z, ., . if Never +# PATTERN x6500e010/mask=xff20e010 + +# SKIPPING faclt_facge_p_p_zz.xml because x6500e010/mask=xff20e010 has already been defined + +# fadd_z_p_zs.xml: FADD (immediate) variant SVE +# PATTERN x65188000/mask=xff3fe3c0 + +:fadd Zd.T, Pg3_m, Zd.T_2, sve_float_0510 +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0510 & Zd.T & Zd.T_2 & Pg3_m +{ + Zd.T = SVE_fadd(Zd.T, Pg3_m, Zd.T_2, sve_float_0510:1); +} + +# fadd_z_p_zz.xml: FADD (vectors, predicated) variant SVE +# PATTERN x65008000/mask=xff3fe000 + +:fadd Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b000 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fadd(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fadd_z_zz.xml: FADD (vectors, unpredicated) variant SVE +# PATTERN x65000000/mask=xff20fc00 + +:fadd Zd.T, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b00 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_fadd(Zd.T, Zn.T, Zm.T); +} + +# fadda_v_p_z.xml: FADDA variant SVE +# PATTERN x65182000/mask=xff3fe000 + +:fadda Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR8 & Rd_FPR8_2 & Pg3 +{ + Rd_FPR8 = SVE_fadda(Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T); +} + +# fadda_v_p_z.xml: FADDA variant SVE +# PATTERN x65182000/mask=xff3fe000 + +:fadda Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR32 & Rd_FPR32_2 & Pg3 +{ + Rd_FPR32 = SVE_fadda(Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T); +} + +# fadda_v_p_z.xml: FADDA variant SVE +# PATTERN x65182000/mask=xff3fe000 + +:fadda Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR16 & Rd_FPR16_2 & Pg3 +{ + Rd_FPR16 = SVE_fadda(Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T); +} + +# fadda_v_p_z.xml: FADDA variant SVE +# PATTERN x65182000/mask=xff3fe000 + +:fadda Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR64 & Rd_FPR64_2 & Pg3 +{ + Rd_FPR64 = SVE_fadda(Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T); +} + +# faddv_v_p_z.xml: FADDV variant SVE +# PATTERN x65002000/mask=xff3fe000 + +:faddv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_faddv(Rd_FPR8, Pg3, Zn.T); +} + +# faddv_v_p_z.xml: FADDV variant SVE +# PATTERN x65002000/mask=xff3fe000 + +:faddv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_faddv(Rd_FPR32, Pg3, Zn.T); +} + +# faddv_v_p_z.xml: FADDV variant SVE +# PATTERN x65002000/mask=xff3fe000 + +:faddv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_faddv(Rd_FPR16, Pg3, Zn.T); +} + +# faddv_v_p_z.xml: FADDV variant SVE +# PATTERN x65002000/mask=xff3fe000 + +:faddv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_faddv(Rd_FPR64, Pg3, Zn.T); +} + +# fcadd_z_p_zz.xml: FCADD variant SVE +# PATTERN x64008000/mask=xff3ee000 + +:fcadd Zd.T, Pg3_m, Zd.T_2, Zn.T, sve_rot_16 +is sve_b_2431=0b01100100 & sve_size_2223 & sve_b_1721=0b00000 & sve_rot_16 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fcadd(Zd.T, Pg3_m, Zd.T_2, Zn.T, sve_rot_16:1); +} + +# fcmeq_p_p_z0.xml: FCM (zero) variant Equal +# PATTERN x65122000/mask=xff3fe010 + +:fcmeq Pd.T, Pg3_z, Zn.T, "#0.0" +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmeq(Pd.T, Pg3_z, Zn.T); +} + +# fcmeq_p_p_z0.xml: FCM (zero) variant Greater than +# PATTERN x65102010/mask=xff3fe010 + +:fcmgt Pd.T, Pg3_z, Zn.T, "#0.0" +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmgt(Pd.T, Pg3_z, Zn.T); +} + +# fcmeq_p_p_z0.xml: FCM (zero) variant Greater than or equal +# PATTERN x65102000/mask=xff3fe010 + +:fcmge Pd.T, Pg3_z, Zn.T, "#0.0" +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmge(Pd.T, Pg3_z, Zn.T); +} + +# fcmeq_p_p_z0.xml: FCM (zero) variant Less than +# PATTERN x65112000/mask=xff3fe010 + +:fcmlt Pd.T, Pg3_z, Zn.T, "#0.0" +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmlt(Pd.T, Pg3_z, Zn.T); +} + +# fcmeq_p_p_z0.xml: FCM (zero) variant Less than or equal +# PATTERN x65112010/mask=xff3fe010 + +:fcmle Pd.T, Pg3_z, Zn.T, "#0.0" +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmle(Pd.T, Pg3_z, Zn.T); +} + +# fcmeq_p_p_z0.xml: FCM (zero) variant Not equal +# PATTERN x65132000/mask=xff3fe010 + +:fcmne Pd.T, Pg3_z, Zn.T, "#0.0" +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmne(Pd.T, Pg3_z, Zn.T); +} + +# fcmeq_p_p_zz.xml: FCM (vectors) variant Equal +# PATTERN x65006000/mask=xff20e010 + +:fcmeq Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmeq(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# fcmeq_p_p_zz.xml: FCM (vectors) variant Greater than +# PATTERN x65004010/mask=xff20e010 + +:fcmgt Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmgt(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# fcmeq_p_p_zz.xml: FCM (vectors) variant Greater than or equal +# PATTERN x65004000/mask=xff20e010 + +:fcmge Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmge(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# fcmeq_p_p_zz.xml: FCM (vectors) variant Not equal +# PATTERN x65006010/mask=xff20e010 + +:fcmne Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmne(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# fcmeq_p_p_zz.xml: FCM (vectors) variant Unordered +# PATTERN x6500c000/mask=xff20e010 + +:fcmuo Pd.T, Pg3_z, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z +{ + Pd.T = SVE_fcmuo(Pd.T, Pg3_z, Zn.T, Zm.T); +} + +# fcmla_z_p_zzz.xml: FCMLA (vectors) variant SVE +# PATTERN x64000000/mask=xff208000 + +:fcmla Zd.T, Pg3_m, Zn.T, Zm.T, sve_rot_1314 +is sve_b_2431=0b01100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_rot_1314 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m +{ + Zd.T = SVE_fcmla(Zd.T, Pg3_m, Zn.T, Zm.T, sve_rot_1314:1); +} + +# fcmla_z_zzzi.xml: FCMLA (indexed) variant Half-precision +# PATTERN x64a01000/mask=xffe0f000 + +:fcmla Zd.H, Zn.H, Zm3.H[sve_i2_1920], sve_rot_1011 +is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1215=0b0001 & sve_rot_1011 & sve_zn_0509 & sve_zda_0004 & Zd.H & Zn.H & Zm3.H +{ + Zd.H = SVE_fcmla(Zd.H, Zn.H, Zm3.H, sve_i2_1920:1, sve_rot_1011:1); +} + +# fcmla_z_zzzi.xml: FCMLA (indexed) variant Single-precision +# PATTERN x64e01000/mask=xffe0f000 + +:fcmla Zd.S, Zn.S, Zm4.S[sve_i1_20], sve_rot_1011 +is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1215=0b0001 & sve_rot_1011 & sve_zn_0509 & sve_zda_0004 & Zd.S & Zn.S & Zm4.S +{ + Zd.S = SVE_fcmla(Zd.S, Zn.S, Zm4.S, sve_i1_20:1, sve_rot_1011:1); +} + +# fcmle_fcmeq_p_p_zz.xml: FCMLE (vectors) variant Greater than or equal +# ALIASEDBY FCMGE ., /Z, ., . if Never +# PATTERN x65004000/mask=xff20e010 + +# SKIPPING fcmle_fcmeq_p_p_zz.xml because x65004000/mask=xff20e010 has already been defined + +# fcmlt_fcmeq_p_p_zz.xml: FCMLT (vectors) variant Greater than +# ALIASEDBY FCMGT ., /Z, ., . if Never +# PATTERN x65004010/mask=xff20e010 + +# SKIPPING fcmlt_fcmeq_p_p_zz.xml because x65004010/mask=xff20e010 has already been defined + +# fcpy_z_p_i.xml: FCPY variant SVE +# PATTERN x0510c000/mask=xff30e000 + +:fcpy Zd.T, Pm_m, "#"^sve_float_imm8 +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b01 & sve_pg_1619 & sve_b_1315=0b110 & sve_imm8_0512 & sve_zd_0004 & sve_float_imm8 & Zd.T & Pm_m +{ + Zd.T = SVE_fcpy(Zd.T, Pm_m, sve_float_imm8:1); +} + +# fcvt_z_p_z.xml: FCVT variant Half-precision to single-precision +# PATTERN x6589a000/mask=xffffe000 + +:fcvt Zd.S, Pg3_m, Zn.H +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1821=0b0010 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.S & Pg3_m +{ + Zd.S = SVE_fcvt(Zd.S, Pg3_m, Zn.H); +} + +# fcvt_z_p_z.xml: FCVT variant Half-precision to double-precision +# PATTERN x65c9a000/mask=xffffe000 + +:fcvt Zd.D, Pg3_m, Zn.H +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1821=0b0010 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.D & Pg3_m +{ + Zd.D = SVE_fcvt(Zd.D, Pg3_m, Zn.H); +} + +# fcvt_z_p_z.xml: FCVT variant Single-precision to half-precision +# PATTERN x6588a000/mask=xffffe000 + +:fcvt Zd.H, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1821=0b0010 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.H & Pg3_m +{ + Zd.H = SVE_fcvt(Zd.H, Pg3_m, Zn.S); +} + +# fcvt_z_p_z.xml: FCVT variant Single-precision to double-precision +# PATTERN x65cba000/mask=xffffe000 + +:fcvt Zd.D, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1821=0b0010 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.D & Pg3_m +{ + Zd.D = SVE_fcvt(Zd.D, Pg3_m, Zn.S); +} + +# fcvt_z_p_z.xml: FCVT variant Double-precision to half-precision +# PATTERN x65c8a000/mask=xffffe000 + +:fcvt Zd.H, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1821=0b0010 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.H & Pg3_m +{ + Zd.H = SVE_fcvt(Zd.H, Pg3_m, Zn.D); +} + +# fcvt_z_p_z.xml: FCVT variant Double-precision to single-precision +# PATTERN x65caa000/mask=xffffe000 + +:fcvt Zd.S, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1821=0b0010 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.S & Pg3_m +{ + Zd.S = SVE_fcvt(Zd.S, Pg3_m, Zn.D); +} + +# fcvtzs_z_p_z.xml: FCVTZS variant Half-precision to 16-bit +# PATTERN x655aa000/mask=xffffe000 + +:fcvtzs Zd.H, Pg3_m, Zn.H +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.H & Pg3_m +{ + Zd.H = SVE_fcvtzs(Zd.H, Pg3_m, Zn.H); +} + +# fcvtzs_z_p_z.xml: FCVTZS variant Half-precision to 32-bit +# PATTERN x655ca000/mask=xffffe000 + +:fcvtzs Zd.S, Pg3_m, Zn.H +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.S & Pg3_m +{ + Zd.S = SVE_fcvtzs(Zd.S, Pg3_m, Zn.H); +} + +# fcvtzs_z_p_z.xml: FCVTZS variant Half-precision to 64-bit +# PATTERN x655ea000/mask=xffffe000 + +:fcvtzs Zd.D, Pg3_m, Zn.H +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.D & Pg3_m +{ + Zd.D = SVE_fcvtzs(Zd.D, Pg3_m, Zn.H); +} + +# fcvtzs_z_p_z.xml: FCVTZS variant Single-precision to 32-bit +# PATTERN x659ca000/mask=xffffe000 + +:fcvtzs Zd.S, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.S & Pg3_m +{ + Zd.S = SVE_fcvtzs(Zd.S, Pg3_m, Zn.S); +} + +# fcvtzs_z_p_z.xml: FCVTZS variant Single-precision to 64-bit +# PATTERN x65dca000/mask=xffffe000 + +:fcvtzs Zd.D, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.D & Pg3_m +{ + Zd.D = SVE_fcvtzs(Zd.D, Pg3_m, Zn.S); +} + +# fcvtzs_z_p_z.xml: FCVTZS variant Double-precision to 32-bit +# PATTERN x65d8a000/mask=xffffe000 + +:fcvtzs Zd.S, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.S & Pg3_m +{ + Zd.S = SVE_fcvtzs(Zd.S, Pg3_m, Zn.D); +} + +# fcvtzs_z_p_z.xml: FCVTZS variant Double-precision to 64-bit +# PATTERN x65dea000/mask=xffffe000 + +:fcvtzs Zd.D, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m +{ + Zd.D = SVE_fcvtzs(Zd.D, Pg3_m, Zn.D); +} + +# fcvtzu_z_p_z.xml: FCVTZU variant Half-precision to 16-bit +# PATTERN x655ba000/mask=xffffe000 + +:fcvtzu Zd.H, Pg3_m, Zn.H +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.H & Pg3_m +{ + Zd.H = SVE_fcvtzu(Zd.H, Pg3_m, Zn.H); +} + +# fcvtzu_z_p_z.xml: FCVTZU variant Half-precision to 32-bit +# PATTERN x655da000/mask=xffffe000 + +:fcvtzu Zd.S, Pg3_m, Zn.H +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.S & Pg3_m +{ + Zd.S = SVE_fcvtzu(Zd.S, Pg3_m, Zn.H); +} + +# fcvtzu_z_p_z.xml: FCVTZU variant Half-precision to 64-bit +# PATTERN x655fa000/mask=xffffe000 + +:fcvtzu Zd.D, Pg3_m, Zn.H +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.D & Pg3_m +{ + Zd.D = SVE_fcvtzu(Zd.D, Pg3_m, Zn.H); +} + +# fcvtzu_z_p_z.xml: FCVTZU variant Single-precision to 32-bit +# PATTERN x659da000/mask=xffffe000 + +:fcvtzu Zd.S, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.S & Pg3_m +{ + Zd.S = SVE_fcvtzu(Zd.S, Pg3_m, Zn.S); +} + +# fcvtzu_z_p_z.xml: FCVTZU variant Single-precision to 64-bit +# PATTERN x65dda000/mask=xffffe000 + +:fcvtzu Zd.D, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.D & Pg3_m +{ + Zd.D = SVE_fcvtzu(Zd.D, Pg3_m, Zn.S); +} + +# fcvtzu_z_p_z.xml: FCVTZU variant Double-precision to 32-bit +# PATTERN x65d9a000/mask=xffffe000 + +:fcvtzu Zd.S, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.S & Pg3_m +{ + Zd.S = SVE_fcvtzu(Zd.S, Pg3_m, Zn.D); +} + +# fcvtzu_z_p_z.xml: FCVTZU variant Double-precision to 64-bit +# PATTERN x65dfa000/mask=xffffe000 + +:fcvtzu Zd.D, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m +{ + Zd.D = SVE_fcvtzu(Zd.D, Pg3_m, Zn.D); +} + +# fdiv_z_p_zz.xml: FDIV variant SVE +# PATTERN x650d8000/mask=xff3fe000 + +:fdiv Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b110 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fdiv(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fdivr_z_p_zz.xml: FDIVR variant SVE +# PATTERN x650c8000/mask=xff3fe000 + +:fdivr Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b110 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fdivr(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fdup_z_i.xml: FDUP variant SVE +# PATTERN x2539c000/mask=xff3fe000 + +:fdup Zd.T, "#"^sve_float_imm8 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b111 & sve_b_18=0 & sve_b_17=0 & sve_b_1416=0b111 & sve_b_13=0 & sve_imm8_0512 & sve_zd_0004 & sve_float_imm8 & Zd.T +{ + Zd.T = SVE_fdup(Zd.T, sve_float_imm8:1); +} + +# fexpa_z_z.xml: FEXPA variant SVE +# PATTERN x0420b800/mask=xff3ffc00 + +:fexpa Zd.T, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_b_1720=0b0000 & sve_b_16=0 & sve_b_1015=0b101110 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T +{ + Zd.T = SVE_fexpa(Zd.T, Zn.T); +} + +# fmad_z_p_zzz.xml: FMAD variant SVE +# PATTERN x65208000/mask=xff20e000 + +:fmad Zd.T, Pg3_m, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_za_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zn.T & Zm.T & Pg3_m +{ + Zd.T = SVE_fmad(Zd.T, Pg3_m, Zn.T, Zm.T); +} + +# fmax_z_p_zs.xml: FMAX (immediate) variant SVE +# PATTERN x651e8000/mask=xff3fe3c0 + +:fmax Zd.T, Pg3_m, Zd.T_2, sve_float_0010 +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0010 & Zd.T & Zd.T_2 & Pg3_m +{ + Zd.T = SVE_fmax(Zd.T, Pg3_m, Zd.T_2, sve_float_0010:1); +} + +# fmax_z_p_zz.xml: FMAX (vectors) variant SVE +# PATTERN x65068000/mask=xff3fe000 + +:fmax Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b011 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fmax(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fmaxnm_z_p_zs.xml: FMAXNM (immediate) variant SVE +# PATTERN x651c8000/mask=xff3fe3c0 + +:fmaxnm Zd.T, Pg3_m, Zd.T_2, sve_float_0010 +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0010 & Zd.T & Zd.T_2 & Pg3_m +{ + Zd.T = SVE_fmaxnm(Zd.T, Pg3_m, Zd.T_2, sve_float_0010:1); +} + +# fmaxnm_z_p_zz.xml: FMAXNM (vectors) variant SVE +# PATTERN x65048000/mask=xff3fe000 + +:fmaxnm Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b010 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fmaxnm(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fmaxnmv_v_p_z.xml: FMAXNMV variant SVE +# PATTERN x65042000/mask=xff3fe000 + +:fmaxnmv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_fmaxnmv(Rd_FPR8, Pg3, Zn.T); +} + +# fmaxnmv_v_p_z.xml: FMAXNMV variant SVE +# PATTERN x65042000/mask=xff3fe000 + +:fmaxnmv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_fmaxnmv(Rd_FPR32, Pg3, Zn.T); +} + +# fmaxnmv_v_p_z.xml: FMAXNMV variant SVE +# PATTERN x65042000/mask=xff3fe000 + +:fmaxnmv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_fmaxnmv(Rd_FPR16, Pg3, Zn.T); +} + +# fmaxnmv_v_p_z.xml: FMAXNMV variant SVE +# PATTERN x65042000/mask=xff3fe000 + +:fmaxnmv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_fmaxnmv(Rd_FPR64, Pg3, Zn.T); +} + +# fmaxv_v_p_z.xml: FMAXV variant SVE +# PATTERN x65062000/mask=xff3fe000 + +:fmaxv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_fmaxv(Rd_FPR8, Pg3, Zn.T); +} + +# fmaxv_v_p_z.xml: FMAXV variant SVE +# PATTERN x65062000/mask=xff3fe000 + +:fmaxv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_fmaxv(Rd_FPR32, Pg3, Zn.T); +} + +# fmaxv_v_p_z.xml: FMAXV variant SVE +# PATTERN x65062000/mask=xff3fe000 + +:fmaxv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_fmaxv(Rd_FPR16, Pg3, Zn.T); +} + +# fmaxv_v_p_z.xml: FMAXV variant SVE +# PATTERN x65062000/mask=xff3fe000 + +:fmaxv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_fmaxv(Rd_FPR64, Pg3, Zn.T); +} + +# fmin_z_p_zs.xml: FMIN (immediate) variant SVE +# PATTERN x651f8000/mask=xff3fe3c0 + +:fmin Zd.T, Pg3_m, Zd.T_2, sve_float_0010 +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0010 & Zd.T & Zd.T_2 & Pg3_m +{ + Zd.T = SVE_fmin(Zd.T, Pg3_m, Zd.T_2, sve_float_0010:1); +} + +# fmin_z_p_zz.xml: FMIN (vectors) variant SVE +# PATTERN x65078000/mask=xff3fe000 + +:fmin Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b011 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fmin(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fminnm_z_p_zs.xml: FMINNM (immediate) variant SVE +# PATTERN x651d8000/mask=xff3fe3c0 + +:fminnm Zd.T, Pg3_m, Zd.T_2, sve_float_0010 +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0010 & Zd.T & Zd.T_2 & Pg3_m +{ + Zd.T = SVE_fminnm(Zd.T, Pg3_m, Zd.T_2, sve_float_0010:1); +} + +# fminnm_z_p_zz.xml: FMINNM (vectors) variant SVE +# PATTERN x65058000/mask=xff3fe000 + +:fminnm Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b010 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fminnm(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fminnmv_v_p_z.xml: FMINNMV variant SVE +# PATTERN x65052000/mask=xff3fe000 + +:fminnmv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_fminnmv(Rd_FPR8, Pg3, Zn.T); +} + +# fminnmv_v_p_z.xml: FMINNMV variant SVE +# PATTERN x65052000/mask=xff3fe000 + +:fminnmv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_fminnmv(Rd_FPR32, Pg3, Zn.T); +} + +# fminnmv_v_p_z.xml: FMINNMV variant SVE +# PATTERN x65052000/mask=xff3fe000 + +:fminnmv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_fminnmv(Rd_FPR16, Pg3, Zn.T); +} + +# fminnmv_v_p_z.xml: FMINNMV variant SVE +# PATTERN x65052000/mask=xff3fe000 + +:fminnmv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_fminnmv(Rd_FPR64, Pg3, Zn.T); +} + +# fminv_v_p_z.xml: FMINV variant SVE +# PATTERN x65072000/mask=xff3fe000 + +:fminv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_fminv(Rd_FPR8, Pg3, Zn.T); +} + +# fminv_v_p_z.xml: FMINV variant SVE +# PATTERN x65072000/mask=xff3fe000 + +:fminv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_fminv(Rd_FPR32, Pg3, Zn.T); +} + +# fminv_v_p_z.xml: FMINV variant SVE +# PATTERN x65072000/mask=xff3fe000 + +:fminv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_fminv(Rd_FPR16, Pg3, Zn.T); +} + +# fminv_v_p_z.xml: FMINV variant SVE +# PATTERN x65072000/mask=xff3fe000 + +:fminv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_fminv(Rd_FPR64, Pg3, Zn.T); +} + +# fmla_z_p_zzz.xml: FMLA (vectors) variant SVE +# PATTERN x65200000/mask=xff20e000 + +:fmla Zd.T, Pg3_m, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m +{ + Zd.T = SVE_fmla(Zd.T, Pg3_m, Zn.T, Zm.T); +} + +# fmla_z_zzzi.xml: FMLA (indexed) variant Half-precision +# PATTERN x64200000/mask=xffa0fc00 + +:fmla Zd.H, Zn.H, Zm3.H[sve_i3h_i3l] +is sve_b_2431=0b01100100 & sve_b_23=0 & sve_i3h_22 & sve_b_21=1 & sve_i3l_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zd.H & Zn.H & Zm3.H & sve_i3h_i3l +{ + Zd.H = SVE_fmla(Zd.H, Zn.H, Zm3.H, sve_i3h_i3l:1); +} + +# fmla_z_zzzi.xml: FMLA (indexed) variant Single-precision +# PATTERN x64a00000/mask=xffe0fc00 + +:fmla Zd.S, Zn.S, Zm3.S[sve_i2_1920] +is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zd.S & Zn.S & Zm3.S +{ + Zd.S = SVE_fmla(Zd.S, Zn.S, Zm3.S, sve_i2_1920:1); +} + +# fmla_z_zzzi.xml: FMLA (indexed) variant Double-precision +# PATTERN x64e00000/mask=xffe0fc00 + +:fmla Zd.D, Zn.D, Zm4.D[sve_i1_20] +is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zd.D & Zn.D & Zm4.D +{ + Zd.D = SVE_fmla(Zd.D, Zn.D, Zm4.D, sve_i1_20:1); +} + +# fmls_z_p_zzz.xml: FMLS (vectors) variant SVE +# PATTERN x65202000/mask=xff20e000 + +:fmls Zd.T, Pg3_m, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m +{ + Zd.T = SVE_fmls(Zd.T, Pg3_m, Zn.T, Zm.T); +} + +# fmls_z_zzzi.xml: FMLS (indexed) variant Half-precision +# PATTERN x64200400/mask=xffa0fc00 + +:fmls Zd.H, Zn.H, Zm3.H[sve_i3h_i3l] +is sve_b_2431=0b01100100 & sve_b_23=0 & sve_i3h_22 & sve_b_21=1 & sve_i3l_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zd.H & Zn.H & Zm3.H & sve_i3h_i3l +{ + Zd.H = SVE_fmls(Zd.H, Zn.H, Zm3.H, sve_i3h_i3l:1); +} + +# fmls_z_zzzi.xml: FMLS (indexed) variant Single-precision +# PATTERN x64a00400/mask=xffe0fc00 + +:fmls Zd.S, Zn.S, Zm3.S[sve_i2_1920] +is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zd.S & Zn.S & Zm3.S +{ + Zd.S = SVE_fmls(Zd.S, Zn.S, Zm3.S, sve_i2_1920:1); +} + +# fmls_z_zzzi.xml: FMLS (indexed) variant Double-precision +# PATTERN x64e00400/mask=xffe0fc00 + +:fmls Zd.D, Zn.D, Zm4.D[sve_i1_20] +is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zd.D & Zn.D & Zm4.D +{ + Zd.D = SVE_fmls(Zd.D, Zn.D, Zm4.D, sve_i1_20:1); +} + +# fmov_cpy_z_p_i.xml: FMOV (zero, predicated) variant SVE +# ALIASEDBY CPY ., /M, #0 if Never +# PATTERN x05104000/mask=xff30ffe0 + +:fmov Zd.T, Pm_m, "#0.0" +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b01 & sve_pg_1619 & sve_b_15=0 & sve_m_14=1 & sve_sh_13=0 & sve_imm8_0512=0b00000000 & sve_zd_0004 & Zd.T & Pm_m +{ + Zd.T = SVE_fmov(Zd.T, Pm_m); +} + +# fmov_dup_z_i.xml: FMOV (zero, unpredicated) variant SVE +# ALIASEDBY DUP ., #0 if Never +# PATTERN x2538c000/mask=xff3fffe0 + +:fmov Zd.T, "#0.0" +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b111 & sve_b_18=0 & sve_b_17=0 & sve_b_1416=0b011 & sve_sh_13=0 & sve_imm8_0512=0b00000000 & sve_zd_0004 & Zd.T +{ + Zd.T = SVE_fmov(Zd.T); +} + +# fmov_fcpy_z_p_i.xml: FMOV (immediate, predicated) variant SVE +# ALIASEDBY FCPY ., /M, # if Unconditionally +# PATTERN x0510c000/mask=xff30e000 + +# SKIPPING fmov_fcpy_z_p_i.xml because x0510c000/mask=xff30e000 has already been defined + +# fmov_fdup_z_i.xml: FMOV (immediate, unpredicated) variant SVE +# ALIASEDBY FDUP ., # if Unconditionally +# PATTERN x2539c000/mask=xff3fe000 + +# SKIPPING fmov_fdup_z_i.xml because x2539c000/mask=xff3fe000 has already been defined + +# fmsb_z_p_zzz.xml: FMSB variant SVE +# PATTERN x6520a000/mask=xff20e000 + +:fmsb Zd.T, Pg3_m, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_za_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zn.T & Zm.T & Pg3_m +{ + Zd.T = SVE_fmsb(Zd.T, Pg3_m, Zn.T, Zm.T); +} + +# fmul_z_p_zs.xml: FMUL (immediate) variant SVE +# PATTERN x651a8000/mask=xff3fe3c0 + +:fmul Zd.T, Pg3_m, Zd.T_2, sve_float_0520 +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0520 & Zd.T & Zd.T_2 & Pg3_m +{ + Zd.T = SVE_fmul(Zd.T, Pg3_m, Zd.T_2, sve_float_0520:1); +} + +# fmul_z_p_zz.xml: FMUL (vectors, predicated) variant SVE +# PATTERN x65028000/mask=xff3fe000 + +:fmul Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b001 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fmul(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fmul_z_zz.xml: FMUL (vectors, unpredicated) variant SVE +# PATTERN x65000800/mask=xff20fc00 + +:fmul Zd.T, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b01 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_fmul(Zd.T, Zn.T, Zm.T); +} + +# fmul_z_zzi.xml: FMUL (indexed) variant Half-precision +# PATTERN x64202000/mask=xffa0fc00 + +:fmul Zd.H, Zn.H, Zm3.H[sve_i3h_i3l] +is sve_b_2431=0b01100100 & sve_b_23=0 & sve_i3h_22 & sve_b_21=1 & sve_i3l_1920 & sve_zm_1618 & sve_b_1015=0b001000 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.H & Zm3.H & sve_i3h_i3l +{ + Zd.H = SVE_fmul(Zd.H, Zn.H, Zm3.H, sve_i3h_i3l:1); +} + +# fmul_z_zzi.xml: FMUL (indexed) variant Single-precision +# PATTERN x64a02000/mask=xffe0fc00 + +:fmul Zd.S, Zn.S, Zm3.S[sve_i2_1920] +is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1015=0b001000 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.S & Zm3.S +{ + Zd.S = SVE_fmul(Zd.S, Zn.S, Zm3.S, sve_i2_1920:1); +} + +# fmul_z_zzi.xml: FMUL (indexed) variant Double-precision +# PATTERN x64e02000/mask=xffe0fc00 + +:fmul Zd.D, Zn.D, Zm4.D[sve_i1_20] +is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1015=0b001000 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm4.D +{ + Zd.D = SVE_fmul(Zd.D, Zn.D, Zm4.D, sve_i1_20:1); +} + +# fmulx_z_p_zz.xml: FMULX variant SVE +# PATTERN x650a8000/mask=xff3fe000 + +:fmulx Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b101 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fmulx(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fneg_z_p_z.xml: FNEG variant SVE +# PATTERN x041da000/mask=xff3fe000 + +:fneg Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_fneg(Zd.T, Pg3_m, Zn.T); +} + +# fnmad_z_p_zzz.xml: FNMAD variant SVE +# PATTERN x6520c000/mask=xff20e000 + +:fnmad Zd.T, Pg3_m, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_za_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zn.T & Zm.T & Pg3_m +{ + Zd.T = SVE_fnmad(Zd.T, Pg3_m, Zn.T, Zm.T); +} + +# fnmla_z_p_zzz.xml: FNMLA variant SVE +# PATTERN x65204000/mask=xff20e000 + +:fnmla Zd.T, Pg3_m, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m +{ + Zd.T = SVE_fnmla(Zd.T, Pg3_m, Zn.T, Zm.T); +} + +# fnmls_z_p_zzz.xml: FNMLS variant SVE +# PATTERN x65206000/mask=xff20e000 + +:fnmls Zd.T, Pg3_m, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m +{ + Zd.T = SVE_fnmls(Zd.T, Pg3_m, Zn.T, Zm.T); +} + +# fnmsb_z_p_zzz.xml: FNMSB variant SVE +# PATTERN x6520e000/mask=xff20e000 + +:fnmsb Zd.T, Pg3_m, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_za_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zn.T & Zm.T & Pg3_m +{ + Zd.T = SVE_fnmsb(Zd.T, Pg3_m, Zn.T, Zm.T); +} + +# frecpe_z_z.xml: FRECPE variant SVE +# PATTERN x650e3000/mask=xff3ffc00 + +:frecpe Zd.T, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b001 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T +{ + Zd.T = SVE_frecpe(Zd.T, Zn.T); +} + +# frecps_z_zz.xml: FRECPS variant SVE +# PATTERN x65001800/mask=xff20fc00 + +:frecps Zd.T, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b11 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_frecps(Zd.T, Zn.T, Zm.T); +} + +# frecpx_z_p_z.xml: FRECPX variant SVE +# PATTERN x650ca000/mask=xff3fe000 + +:frecpx Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0011 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_frecpx(Zd.T, Pg3_m, Zn.T); +} + +# frinta_z_p_z.xml: FRINT variant Current mode +# PATTERN x6507a000/mask=xff3fe000 + +:frinti Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_frinti(Zd.T, Pg3_m, Zn.T); +} + +# frinta_z_p_z.xml: FRINT variant Current mode signalling inexact +# PATTERN x6506a000/mask=xff3fe000 + +:frintx Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_frintx(Zd.T, Pg3_m, Zn.T); +} + +# frinta_z_p_z.xml: FRINT variant Nearest with ties to away +# PATTERN x6504a000/mask=xff3fe000 + +:frinta Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_frinta(Zd.T, Pg3_m, Zn.T); +} + +# frinta_z_p_z.xml: FRINT variant Nearest with ties to even +# PATTERN x6500a000/mask=xff3fe000 + +:frintn Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_frintn(Zd.T, Pg3_m, Zn.T); +} + +# frinta_z_p_z.xml: FRINT variant Toward zero +# PATTERN x6503a000/mask=xff3fe000 + +:frintz Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_frintz(Zd.T, Pg3_m, Zn.T); +} + +# frinta_z_p_z.xml: FRINT variant Toward minus infinity +# PATTERN x6502a000/mask=xff3fe000 + +:frintm Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_frintm(Zd.T, Pg3_m, Zn.T); +} + +# frinta_z_p_z.xml: FRINT variant Toward plus infinity +# PATTERN x6501a000/mask=xff3fe000 + +:frintp Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_frintp(Zd.T, Pg3_m, Zn.T); +} + +# frsqrte_z_z.xml: FRSQRTE variant SVE +# PATTERN x650f3000/mask=xff3ffc00 + +:frsqrte Zd.T, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b001 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T +{ + Zd.T = SVE_frsqrte(Zd.T, Zn.T); +} + +# frsqrts_z_zz.xml: FRSQRTS variant SVE +# PATTERN x65001c00/mask=xff20fc00 + +:frsqrts Zd.T, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b11 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_frsqrts(Zd.T, Zn.T, Zm.T); +} + +# fscale_z_p_zz.xml: FSCALE variant SVE +# PATTERN x65098000/mask=xff3fe000 + +:fscale Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b100 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fscale(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fsqrt_z_p_z.xml: FSQRT variant SVE +# PATTERN x650da000/mask=xff3fe000 + +:fsqrt Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0011 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_fsqrt(Zd.T, Pg3_m, Zn.T); +} + +# fsub_z_p_zs.xml: FSUB (immediate) variant SVE +# PATTERN x65198000/mask=xff3fe3c0 + +:fsub Zd.T, Pg3_m, Zd.T_2, sve_float_0510 +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0510 & Zd.T & Zd.T_2 & Pg3_m +{ + Zd.T = SVE_fsub(Zd.T, Pg3_m, Zd.T_2, sve_float_0510:1); +} + +# fsub_z_p_zz.xml: FSUB (vectors, predicated) variant SVE +# PATTERN x65018000/mask=xff3fe000 + +:fsub Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b000 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fsub(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# fsub_z_zz.xml: FSUB (vectors, unpredicated) variant SVE +# PATTERN x65000400/mask=xff20fc00 + +:fsub Zd.T, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b00 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_fsub(Zd.T, Zn.T, Zm.T); +} + +# fsubr_z_p_zs.xml: FSUBR (immediate) variant SVE +# PATTERN x651b8000/mask=xff3fe3c0 + +:fsubr Zd.T, Pg3_m, Zd.T_2, sve_float_0510 +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0510 & Zd.T & Zd.T_2 & Pg3_m +{ + Zd.T = SVE_fsubr(Zd.T, Pg3_m, Zd.T_2, sve_float_0510:1); +} + +# fsubr_z_p_zz.xml: FSUBR (vectors) variant SVE +# PATTERN x65038000/mask=xff3fe000 + +:fsubr Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b001 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_fsubr(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# ftmad_z_zzi.xml: FTMAD variant SVE +# PATTERN x65108000/mask=xff38fc00 + +:ftmad Zd.T, Zd.T_2, Zn.T, "#"^sve_imm3_1_0to7 +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b010 & sve_imm3_1618 & sve_b_1015=0b100000 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & sve_imm3_1_0to7 +{ + Zd.T = SVE_ftmad(Zd.T, Zd.T_2, Zn.T, sve_imm3_1_0to7:1); +} + +# ftsmul_z_zz.xml: FTSMUL variant SVE +# PATTERN x65000c00/mask=xff20fc00 + +:ftsmul Zd.T, Zn.T, Zm.T +is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b01 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_ftsmul(Zd.T, Zn.T, Zm.T); +} + +# ftssel_z_zz.xml: FTSSEL variant SVE +# PATTERN x0420b000/mask=xff20fc00 + +:ftssel Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1115=0b10110 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_ftssel(Zd.T, Zn.T, Zm.T); +} + +# incb_r_rs.xml: INCB, INCD, INCH, INCW (scalar) variant Byte +# PATTERN x0430e000/mask=xfff0fc00 + +:incb Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_incb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# incb_r_rs.xml: INCB, INCD, INCH, INCW (scalar) variant Doubleword +# PATTERN x04f0e000/mask=xfff0fc00 + +:incd Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_incd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# incb_r_rs.xml: INCB, INCD, INCH, INCW (scalar) variant Halfword +# PATTERN x0470e000/mask=xfff0fc00 + +:inch Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_inch(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# incb_r_rs.xml: INCB, INCD, INCH, INCW (scalar) variant Word +# PATTERN x04b0e000/mask=xfff0fc00 + +:incw Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_incw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# incd_z_zs.xml: INCD, INCH, INCW (vector) variant Doubleword +# PATTERN x04f0c000/mask=xfff0fc00 + +:incd Zd.D^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.D = SVE_incd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# incd_z_zs.xml: INCD, INCH, INCW (vector) variant Halfword +# PATTERN x0470c000/mask=xfff0fc00 + +:inch Zd.H^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.H = SVE_inch(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# incd_z_zs.xml: INCD, INCH, INCW (vector) variant Word +# PATTERN x04b0c000/mask=xfff0fc00 + +:incw Zd.S^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.S = SVE_incw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# incp_r_p_r.xml: INCP (scalar) variant SVE +# PATTERN x252c8800/mask=xff3ffe00 + +:incp Rd_GPR64, Pn.T +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1011 & sve_b_17=0 & sve_b_16=0 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 +{ + Rd_GPR64 = SVE_incp(Rd_GPR64, Pn.T); +} + +# incp_z_p_z.xml: INCP (vector) variant SVE +# PATTERN x252c8000/mask=xff3ffe00 + +:incp Zd.T, Pn +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1011 & sve_b_17=0 & sve_b_16=0 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn +{ + Zd.T = SVE_incp(Zd.T, Pn); +} + +# index_z_ii.xml: INDEX (immediates) variant SVE +# PATTERN x04204000/mask=xff20fc00 + +:index Zd.T, "#"^sve_imm5s_0509, "#"^sve_imm5b_1620 +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_imm5b_1620 & sve_b_1015=0b010000 & sve_imm5_0509 & sve_zd_0004 & Zd.T & sve_imm5s_0509 +{ + Zd.T = SVE_index(Zd.T, sve_imm5s_0509:1, sve_imm5b_1620:1); +} + +# index_z_ir.xml: INDEX (immediate, scalar) variant SVE +# PATTERN x04204800/mask=xff20fc00 + +:index Zd.T, "#"^sve_imm5_1_m16to15, Rm_GPR64 +is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b010010 & sve_imm5_0509 & sve_zd_0004 & Zd.T & Rm_GPR64 & sve_imm5_1_m16to15 +{ + Zd.T = SVE_index(Zd.T, sve_imm5_1_m16to15:1, Rm_GPR64); +} + +# index_z_ir.xml: INDEX (immediate, scalar) variant SVE +# PATTERN x04204800/mask=xff20fc00 + +:index Zd.T, "#"^sve_imm5_1_m16to15, Rm_GPR32 +is sve_b_2431=0b00000100 & (b_23=0 | b_22=0) & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b010010 & sve_imm5_0509 & sve_zd_0004 & Zd.T & Rm_GPR32 & sve_imm5_1_m16to15 +{ + Zd.T = SVE_index(Zd.T, sve_imm5_1_m16to15:1, Rm_GPR32); +} + +# index_z_ri.xml: INDEX (scalar, immediate) variant SVE +# PATTERN x04204400/mask=xff20fc00 + +:index Zd.T, Rn_GPR64, "#"^sve_imm5_1_m16to15 +is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_21=1 & sve_imm5_1620 & sve_b_1015=0b010001 & sve_rn_0509 & sve_zd_0004 & Zd.T & Rn_GPR64 & sve_imm5_1_m16to15 +{ + Zd.T = SVE_index(Zd.T, Rn_GPR64, sve_imm5_1_m16to15:1); +} + +# index_z_ri.xml: INDEX (scalar, immediate) variant SVE +# PATTERN x04204400/mask=xff20fc00 + +:index Zd.T, Rn_GPR32, "#"^sve_imm5_1_m16to15 +is sve_b_2431=0b00000100 & (b_23=0 | b_22=0) & sve_b_21=1 & sve_imm5_1620 & sve_b_1015=0b010001 & sve_rn_0509 & sve_zd_0004 & Zd.T & Rn_GPR32 & sve_imm5_1_m16to15 +{ + Zd.T = SVE_index(Zd.T, Rn_GPR32, sve_imm5_1_m16to15:1); +} + +# index_z_rr.xml: INDEX (scalars) variant SVE +# PATTERN x04204c00/mask=xff20fc00 + +:index Zd.T, Rn_GPR64, Rm_GPR64 +is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b010011 & sve_rn_0509 & sve_zd_0004 & Zd.T & Rn_GPR64 & Rm_GPR64 +{ + Zd.T = SVE_index(Zd.T, Rn_GPR64, Rm_GPR64); +} + +# index_z_rr.xml: INDEX (scalars) variant SVE +# PATTERN x04204c00/mask=xff20fc00 + +:index Zd.T, Rn_GPR32, Rm_GPR32 +is sve_b_2431=0b00000100 & (b_23=0 | b_22=0) & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b010011 & sve_rn_0509 & sve_zd_0004 & Zd.T & Rn_GPR32 & Rm_GPR32 +{ + Zd.T = SVE_index(Zd.T, Rn_GPR32, Rm_GPR32); +} + +# insr_z_r.xml: INSR (scalar) variant SVE +# PATTERN x05243800/mask=xff3ffc00 + +:insr Zd.T, Rn_GPR64 +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1021=0b100100001110 & sve_rm_0509 & sve_zdn_0004 & Zd.T & Rn_GPR64 +{ + Zd.T = SVE_insr(Zd.T, Rn_GPR64); +} + +# insr_z_r.xml: INSR (scalar) variant SVE +# PATTERN x05243800/mask=xff3ffc00 + +:insr Zd.T, Rn_GPR32 +is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1021=0b100100001110 & sve_rm_0509 & sve_zdn_0004 & Zd.T & Rn_GPR32 +{ + Zd.T = SVE_insr(Zd.T, Rn_GPR32); +} + +# insr_z_v.xml: INSR (SIMD&FP scalar) variant SVE +# PATTERN x05343800/mask=xff3ffc00 + +:insr Zd.T, Rn_FPR8 +is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1021=0b110100001110 & sve_vm_0509 & sve_zdn_0004 & Zd.T & Rn_FPR8 +{ + Zd.T = SVE_insr(Zd.T, Rn_FPR8); +} + +# insr_z_v.xml: INSR (SIMD&FP scalar) variant SVE +# PATTERN x05343800/mask=xff3ffc00 + +:insr Zd.T, Rn_FPR32 +is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1021=0b110100001110 & sve_vm_0509 & sve_zdn_0004 & Zd.T & Rn_FPR32 +{ + Zd.T = SVE_insr(Zd.T, Rn_FPR32); +} + +# insr_z_v.xml: INSR (SIMD&FP scalar) variant SVE +# PATTERN x05343800/mask=xff3ffc00 + +:insr Zd.T, Rn_FPR16 +is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1021=0b110100001110 & sve_vm_0509 & sve_zdn_0004 & Zd.T & Rn_FPR16 +{ + Zd.T = SVE_insr(Zd.T, Rn_FPR16); +} + +# insr_z_v.xml: INSR (SIMD&FP scalar) variant SVE +# PATTERN x05343800/mask=xff3ffc00 + +:insr Zd.T, Rn_FPR64 +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1021=0b110100001110 & sve_vm_0509 & sve_zdn_0004 & Zd.T & Rn_FPR64 +{ + Zd.T = SVE_insr(Zd.T, Rn_FPR64); +} + +# lasta_r_p_z.xml: LASTA (scalar) variant SVE +# PATTERN x0520a000/mask=xff3fe000 + +:lasta Rd_GPR64, Pg3, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10000 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_rd_0004 & Zn.T & Rd_GPR64 & Pg3 +{ + Rd_GPR64 = SVE_lasta(Rd_GPR64, Pg3, Zn.T); +} + +# lasta_r_p_z.xml: LASTA (scalar) variant SVE +# PATTERN x0520a000/mask=xff3fe000 + +:lasta Rd_GPR32, Pg3, Zn.T +is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1721=0b10000 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_rd_0004 & Zn.T & Rd_GPR32 & Pg3 +{ + Rd_GPR32 = SVE_lasta(Rd_GPR32, Pg3, Zn.T); +} + +# lasta_v_p_z.xml: LASTA (SIMD&FP scalar) variant SVE +# PATTERN x05228000/mask=xff3fe000 + +:lasta Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1721=0b10001 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_lasta(Rd_FPR8, Pg3, Zn.T); +} + +# lasta_v_p_z.xml: LASTA (SIMD&FP scalar) variant SVE +# PATTERN x05228000/mask=xff3fe000 + +:lasta Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1721=0b10001 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_lasta(Rd_FPR32, Pg3, Zn.T); +} + +# lasta_v_p_z.xml: LASTA (SIMD&FP scalar) variant SVE +# PATTERN x05228000/mask=xff3fe000 + +:lasta Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1721=0b10001 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_lasta(Rd_FPR16, Pg3, Zn.T); +} + +# lasta_v_p_z.xml: LASTA (SIMD&FP scalar) variant SVE +# PATTERN x05228000/mask=xff3fe000 + +:lasta Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10001 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_lasta(Rd_FPR64, Pg3, Zn.T); +} + +# lastb_r_p_z.xml: LASTB (scalar) variant SVE +# PATTERN x0521a000/mask=xff3fe000 + +:lastb Rd_GPR64, Pg3, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10000 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_rd_0004 & Zn.T & Rd_GPR64 & Pg3 +{ + Rd_GPR64 = SVE_lastb(Rd_GPR64, Pg3, Zn.T); +} + +# lastb_r_p_z.xml: LASTB (scalar) variant SVE +# PATTERN x0521a000/mask=xff3fe000 + +:lastb Rd_GPR32, Pg3, Zn.T +is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1721=0b10000 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_rd_0004 & Zn.T & Rd_GPR32 & Pg3 +{ + Rd_GPR32 = SVE_lastb(Rd_GPR32, Pg3, Zn.T); +} + +# lastb_v_p_z.xml: LASTB (SIMD&FP scalar) variant SVE +# PATTERN x05238000/mask=xff3fe000 + +:lastb Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1721=0b10001 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_lastb(Rd_FPR8, Pg3, Zn.T); +} + +# lastb_v_p_z.xml: LASTB (SIMD&FP scalar) variant SVE +# PATTERN x05238000/mask=xff3fe000 + +:lastb Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1721=0b10001 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_lastb(Rd_FPR32, Pg3, Zn.T); +} + +# lastb_v_p_z.xml: LASTB (SIMD&FP scalar) variant SVE +# PATTERN x05238000/mask=xff3fe000 + +:lastb Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1721=0b10001 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_lastb(Rd_FPR16, Pg3, Zn.T); +} + +# lastb_v_p_z.xml: LASTB (SIMD&FP scalar) variant SVE +# PATTERN x05238000/mask=xff3fe000 + +:lastb Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10001 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_lastb(Rd_FPR64, Pg3, Zn.T); +} + +# ld1b_z_p_ai.xml: LD1B (vector plus immediate) variant 32-bit element +# PATTERN x8420c000/mask=xffe0e000 + +:ld1b "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to31] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to31 +{ + Zd.S = SVE_ld1b(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to31); +} + +# ld1b_z_p_ai.xml: LD1B (vector plus immediate) variant 64-bit element +# PATTERN xc420c000/mask=xffe0e000 + +:ld1b "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to31] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to31 +{ + Zd.D = SVE_ld1b(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to31); +} + +# ld1b_z_p_bi.xml: LD1B (scalar plus immediate) variant 8-bit element +# PATTERN xa400a000/mask=xfff0e000 + +:ld1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & sve_mul4_1_m8to7 +{ + Zd.B = SVE_ld1b(Zd.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1b_z_p_bi.xml: LD1B (scalar plus immediate) variant 16-bit element +# PATTERN xa420a000/mask=xfff0e000 + +:ld1b "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 +{ + Zd.H = SVE_ld1b(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1b_z_p_bi.xml: LD1B (scalar plus immediate) variant 32-bit element +# PATTERN xa440a000/mask=xfff0e000 + +:ld1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ld1b(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1b_z_p_bi.xml: LD1B (scalar plus immediate) variant 64-bit element +# PATTERN xa460a000/mask=xfff0e000 + +:ld1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ld1b(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1b_z_p_br.xml: LD1B (scalar plus scalar) variant 8-bit element +# PATTERN xa4004000/mask=xffe0e000 + +:ld1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & Rm_GPR64 +{ + Zd.B = SVE_ld1b(Zd.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1b_z_p_br.xml: LD1B (scalar plus scalar) variant 16-bit element +# PATTERN xa4204000/mask=xffe0e000 + +:ld1b "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 +{ + Zd.H = SVE_ld1b(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1b_z_p_br.xml: LD1B (scalar plus scalar) variant 32-bit element +# PATTERN xa4404000/mask=xffe0e000 + +:ld1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ld1b(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1b_z_p_br.xml: LD1B (scalar plus scalar) variant 64-bit element +# PATTERN xa4604000/mask=xffe0e000 + +:ld1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ld1b(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1b_z_p_bz.xml: LD1B (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc4004000/mask=xffa0e000 + +:ld1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1b(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1b_z_p_bz.xml: LD1B (scalar plus vector) variant 32-bit unscaled offset +# PATTERN x84004000/mask=xffa0e000 + +:ld1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ld1b(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ld1b_z_p_bz.xml: LD1B (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc440c000/mask=xffe0e000 + +:ld1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1b(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1d_z_p_ai.xml: LD1D (vector plus immediate) variant SVE +# PATTERN xc5a0c000/mask=xffe0e000 + +:ld1d "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to248] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to248 +{ + Zd.D = SVE_ld1d(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to248); +} + +# ld1d_z_p_bi.xml: LD1D (scalar plus immediate) variant SVE +# PATTERN xa5e0a000/mask=xfff0e000 + +:ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1d_z_p_br.xml: LD1D (scalar plus scalar) variant SVE +# PATTERN xa5e04000/mask=xffe0e000 + +:ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1d_z_p_bz.xml: LD1D (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc5a04000/mask=xffa0e000 + +:ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #3"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1d_z_p_bz.xml: LD1D (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc5804000/mask=xffa0e000 + +:ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1d_z_p_bz.xml: LD1D (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc5e0c000/mask=xffe0e000 + +:ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #3"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1d_z_p_bz.xml: LD1D (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc5c0c000/mask=xffe0e000 + +:ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1h_z_p_ai.xml: LD1H (vector plus immediate) variant 32-bit element +# PATTERN x84a0c000/mask=xffe0e000 + +:ld1h "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to62] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to62 +{ + Zd.S = SVE_ld1h(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to62); +} + +# ld1h_z_p_ai.xml: LD1H (vector plus immediate) variant 64-bit element +# PATTERN xc4a0c000/mask=xffe0e000 + +:ld1h "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to62] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to62 +{ + Zd.D = SVE_ld1h(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to62); +} + +# ld1h_z_p_bi.xml: LD1H (scalar plus immediate) variant 16-bit element +# PATTERN xa4a0a000/mask=xfff0e000 + +:ld1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 +{ + Zd.H = SVE_ld1h(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1h_z_p_bi.xml: LD1H (scalar plus immediate) variant 32-bit element +# PATTERN xa4c0a000/mask=xfff0e000 + +:ld1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ld1h(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1h_z_p_bi.xml: LD1H (scalar plus immediate) variant 64-bit element +# PATTERN xa4e0a000/mask=xfff0e000 + +:ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1h_z_p_br.xml: LD1H (scalar plus scalar) variant 16-bit element +# PATTERN xa4a04000/mask=xffe0e000 + +:ld1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 +{ + Zd.H = SVE_ld1h(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1h_z_p_br.xml: LD1H (scalar plus scalar) variant 32-bit element +# PATTERN xa4c04000/mask=xffe0e000 + +:ld1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ld1h(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1h_z_p_br.xml: LD1H (scalar plus scalar) variant 64-bit element +# PATTERN xa4e04000/mask=xffe0e000 + +:ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 32-bit scaled offset +# PATTERN x84a04000/mask=xffa0e000 + +:ld1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] +is sve_b_2331=0b100001001 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ld1h(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc4a04000/mask=xffa0e000 + +:ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc4804000/mask=xffa0e000 + +:ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 32-bit unscaled offset +# PATTERN x84804000/mask=xffa0e000 + +:ld1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ld1h(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc4e0c000/mask=xffe0e000 + +:ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #1"] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc4c0c000/mask=xffe0e000 + +:ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1rb_z_p_bi.xml: LD1RB variant 8-bit element +# PATTERN x84408000/mask=xffc0e000 + +:ld1rb "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & sve_opt6_1_0to63 +{ + Zd.B = SVE_ld1rb(Zd.B, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); +} + +# ld1rb_z_p_bi.xml: LD1RB variant 16-bit element +# PATTERN x8440a000/mask=xffc0e000 + +:ld1rb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_opt6_1_0to63 +{ + Zd.H = SVE_ld1rb(Zd.H, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); +} + +# ld1rb_z_p_bi.xml: LD1RB variant 32-bit element +# PATTERN x8440c000/mask=xffc0e000 + +:ld1rb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt6_1_0to63 +{ + Zd.S = SVE_ld1rb(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); +} + +# ld1rb_z_p_bi.xml: LD1RB variant 64-bit element +# PATTERN x8440e000/mask=xffc0e000 + +:ld1rb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to63 +{ + Zd.D = SVE_ld1rb(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); +} + +# ld1rd_z_p_bi.xml: LD1RD variant SVE +# PATTERN x85c0e000/mask=xffc0e000 + +:ld1rd "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to504] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to504 +{ + Zd.D = SVE_ld1rd(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to504); +} + +# ld1rh_z_p_bi.xml: LD1RH variant 16-bit element +# PATTERN x84c0a000/mask=xffc0e000 + +:ld1rh "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to126] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_opt6_1_0to126 +{ + Zd.H = SVE_ld1rh(Zd.H, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to126); +} + +# ld1rh_z_p_bi.xml: LD1RH variant 32-bit element +# PATTERN x84c0c000/mask=xffc0e000 + +:ld1rh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to126] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt6_1_0to126 +{ + Zd.S = SVE_ld1rh(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to126); +} + +# ld1rh_z_p_bi.xml: LD1RH variant 64-bit element +# PATTERN x84c0e000/mask=xffc0e000 + +:ld1rh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to126] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to126 +{ + Zd.D = SVE_ld1rh(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to126); +} + +# ld1rqb_z_p_bi.xml: LD1RQB (scalar plus immediate) variant SVE +# PATTERN xa4002000/mask=xfff0e000 + +:ld1rqb "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp^sve_opt4_1_m128to112] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b001 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & sve_opt4_1_m128to112 +{ + Zd.B = SVE_ld1rqb(Zd.B, Pg3_z, Rn_GPR64xsp, sve_opt4_1_m128to112); +} + +# ld1rqb_z_p_br.xml: LD1RQB (scalar plus scalar) variant SVE +# PATTERN xa4000000/mask=xffe0e000 + +:ld1rqb "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b000 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & Rm_GPR64 +{ + Zd.B = SVE_ld1rqb(Zd.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1rqd_z_p_bi.xml: LD1RQD (scalar plus immediate) variant SVE +# PATTERN xa5802000/mask=xfff0e000 + +:ld1rqd "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt4_1_m128to112] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b001 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt4_1_m128to112 +{ + Zd.D = SVE_ld1rqd(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt4_1_m128to112); +} + +# ld1rqd_z_p_br.xml: LD1RQD (scalar plus scalar) variant SVE +# PATTERN xa5800000/mask=xffe0e000 + +:ld1rqd "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b000 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ld1rqd(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1rqh_z_p_bi.xml: LD1RQH (scalar plus immediate) variant SVE +# PATTERN xa4802000/mask=xfff0e000 + +:ld1rqh "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_opt4_1_m128to112] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b001 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_opt4_1_m128to112 +{ + Zd.H = SVE_ld1rqh(Zd.H, Pg3_z, Rn_GPR64xsp, sve_opt4_1_m128to112); +} + +# ld1rqh_z_p_br.xml: LD1RQH (scalar plus scalar) variant SVE +# PATTERN xa4800000/mask=xffe0e000 + +:ld1rqh "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b000 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 +{ + Zd.H = SVE_ld1rqh(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1rqw_z_p_bi.xml: LD1RQW (scalar plus immediate) variant SVE +# PATTERN xa5002000/mask=xfff0e000 + +:ld1rqw "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt4_1_m128to112] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b001 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt4_1_m128to112 +{ + Zd.S = SVE_ld1rqw(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt4_1_m128to112); +} + +# ld1rqw_z_p_br.xml: LD1RQW (scalar plus scalar) variant SVE +# PATTERN xa5000000/mask=xffe0e000 + +:ld1rqw "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b000 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ld1rqw(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1rsb_z_p_bi.xml: LD1RSB variant 16-bit element +# PATTERN x85c0c000/mask=xffc0e000 + +:ld1rsb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_opt6_1_0to63 +{ + Zd.H = SVE_ld1rsb(Zd.H, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); +} + +# ld1rsb_z_p_bi.xml: LD1RSB variant 32-bit element +# PATTERN x85c0a000/mask=xffc0e000 + +:ld1rsb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt6_1_0to63 +{ + Zd.S = SVE_ld1rsb(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); +} + +# ld1rsb_z_p_bi.xml: LD1RSB variant 64-bit element +# PATTERN x85c08000/mask=xffc0e000 + +:ld1rsb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to63 +{ + Zd.D = SVE_ld1rsb(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); +} + +# ld1rsh_z_p_bi.xml: LD1RSH variant 32-bit element +# PATTERN x8540a000/mask=xffc0e000 + +:ld1rsh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to126] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt6_1_0to126 +{ + Zd.S = SVE_ld1rsh(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to126); +} + +# ld1rsh_z_p_bi.xml: LD1RSH variant 64-bit element +# PATTERN x85408000/mask=xffc0e000 + +:ld1rsh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to126] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to126 +{ + Zd.D = SVE_ld1rsh(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to126); +} + +# ld1rsw_z_p_bi.xml: LD1RSW variant SVE +# PATTERN x84c08000/mask=xffc0e000 + +:ld1rsw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to252] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to252 +{ + Zd.D = SVE_ld1rsw(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to252); +} + +# ld1rw_z_p_bi.xml: LD1RW variant 32-bit element +# PATTERN x8540c000/mask=xffc0e000 + +:ld1rw "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to252] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt6_1_0to252 +{ + Zd.S = SVE_ld1rw(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to252); +} + +# ld1rw_z_p_bi.xml: LD1RW variant 64-bit element +# PATTERN x8540e000/mask=xffc0e000 + +:ld1rw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to252] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to252 +{ + Zd.D = SVE_ld1rw(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to252); +} + +# ld1sb_z_p_ai.xml: LD1SB (vector plus immediate) variant 32-bit element +# PATTERN x84208000/mask=xffe0e000 + +:ld1sb "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to31] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to31 +{ + Zd.S = SVE_ld1sb(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to31); +} + +# ld1sb_z_p_ai.xml: LD1SB (vector plus immediate) variant 64-bit element +# PATTERN xc4208000/mask=xffe0e000 + +:ld1sb "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to31] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to31 +{ + Zd.D = SVE_ld1sb(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to31); +} + +# ld1sb_z_p_bi.xml: LD1SB (scalar plus immediate) variant 16-bit element +# PATTERN xa5c0a000/mask=xfff0e000 + +:ld1sb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 +{ + Zd.H = SVE_ld1sb(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1sb_z_p_bi.xml: LD1SB (scalar plus immediate) variant 32-bit element +# PATTERN xa5a0a000/mask=xfff0e000 + +:ld1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ld1sb(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1sb_z_p_bi.xml: LD1SB (scalar plus immediate) variant 64-bit element +# PATTERN xa580a000/mask=xfff0e000 + +:ld1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ld1sb(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1sb_z_p_br.xml: LD1SB (scalar plus scalar) variant 16-bit element +# PATTERN xa5c04000/mask=xffe0e000 + +:ld1sb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 +{ + Zd.H = SVE_ld1sb(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1sb_z_p_br.xml: LD1SB (scalar plus scalar) variant 32-bit element +# PATTERN xa5a04000/mask=xffe0e000 + +:ld1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ld1sb(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1sb_z_p_br.xml: LD1SB (scalar plus scalar) variant 64-bit element +# PATTERN xa5804000/mask=xffe0e000 + +:ld1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ld1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1sb_z_p_bz.xml: LD1SB (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc4000000/mask=xffa0e000 + +:ld1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1sb_z_p_bz.xml: LD1SB (scalar plus vector) variant 32-bit unscaled offset +# PATTERN x84000000/mask=xffa0e000 + +:ld1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ld1sb(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ld1sb_z_p_bz.xml: LD1SB (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc4408000/mask=xffe0e000 + +:ld1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1sh_z_p_ai.xml: LD1SH (vector plus immediate) variant 32-bit element +# PATTERN x84a08000/mask=xffe0e000 + +:ld1sh "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to62] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to62 +{ + Zd.S = SVE_ld1sh(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to62); +} + +# ld1sh_z_p_ai.xml: LD1SH (vector plus immediate) variant 64-bit element +# PATTERN xc4a08000/mask=xffe0e000 + +:ld1sh "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to62] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to62 +{ + Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to62); +} + +# ld1sh_z_p_bi.xml: LD1SH (scalar plus immediate) variant 32-bit element +# PATTERN xa520a000/mask=xfff0e000 + +:ld1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ld1sh(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1sh_z_p_bi.xml: LD1SH (scalar plus immediate) variant 64-bit element +# PATTERN xa500a000/mask=xfff0e000 + +:ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1sh_z_p_br.xml: LD1SH (scalar plus scalar) variant 32-bit element +# PATTERN xa5204000/mask=xffe0e000 + +:ld1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ld1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1sh_z_p_br.xml: LD1SH (scalar plus scalar) variant 64-bit element +# PATTERN xa5004000/mask=xffe0e000 + +:ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 32-bit scaled offset +# PATTERN x84a00000/mask=xffa0e000 + +:ld1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] +is sve_b_2331=0b100001001 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ld1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc4a00000/mask=xffa0e000 + +:ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc4800000/mask=xffa0e000 + +:ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 32-bit unscaled offset +# PATTERN x84800000/mask=xffa0e000 + +:ld1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ld1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc4e08000/mask=xffe0e000 + +:ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #1"] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc4c08000/mask=xffe0e000 + +:ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1sw_z_p_ai.xml: LD1SW (vector plus immediate) variant SVE +# PATTERN xc5208000/mask=xffe0e000 + +:ld1sw "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to124] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to124 +{ + Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to124); +} + +# ld1sw_z_p_bi.xml: LD1SW (scalar plus immediate) variant SVE +# PATTERN xa480a000/mask=xfff0e000 + +:ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1sw_z_p_br.xml: LD1SW (scalar plus scalar) variant SVE +# PATTERN xa4804000/mask=xffe0e000 + +:ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1sw_z_p_bz.xml: LD1SW (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc5200000/mask=xffa0e000 + +:ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1sw_z_p_bz.xml: LD1SW (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc5000000/mask=xffa0e000 + +:ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1sw_z_p_bz.xml: LD1SW (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc5608000/mask=xffe0e000 + +:ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #2"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1sw_z_p_bz.xml: LD1SW (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc5408000/mask=xffe0e000 + +:ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1w_z_p_ai.xml: LD1W (vector plus immediate) variant 32-bit element +# PATTERN x8520c000/mask=xffe0e000 + +:ld1w "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to124] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to124 +{ + Zd.S = SVE_ld1w(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to124); +} + +# ld1w_z_p_ai.xml: LD1W (vector plus immediate) variant 64-bit element +# PATTERN xc520c000/mask=xffe0e000 + +:ld1w "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to124] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to124 +{ + Zd.D = SVE_ld1w(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to124); +} + +# ld1w_z_p_bi.xml: LD1W (scalar plus immediate) variant 32-bit element +# PATTERN xa540a000/mask=xfff0e000 + +:ld1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ld1w(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1w_z_p_bi.xml: LD1W (scalar plus immediate) variant 64-bit element +# PATTERN xa560a000/mask=xfff0e000 + +:ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ld1w_z_p_br.xml: LD1W (scalar plus scalar) variant 32-bit element +# PATTERN xa5404000/mask=xffe0e000 + +:ld1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ld1w(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1w_z_p_br.xml: LD1W (scalar plus scalar) variant 64-bit element +# PATTERN xa5604000/mask=xffe0e000 + +:ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 32-bit scaled offset +# PATTERN x85204000/mask=xffa0e000 + +:ld1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #2"] +is sve_b_2331=0b100001010 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ld1w(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc5204000/mask=xffa0e000 + +:ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc5004000/mask=xffa0e000 + +:ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 32-bit unscaled offset +# PATTERN x85004000/mask=xffa0e000 + +:ld1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ld1w(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc560c000/mask=xffe0e000 + +:ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #2"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc540c000/mask=xffe0e000 + +:ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ld2b_z_p_bi.xml: LD2B (scalar plus immediate) variant SVE +# PATTERN xa420e000/mask=xfff0e000 + +:ld2b "{"^Zt.B, Ztt.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m16to14] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zt.B & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m16to14 +{ + Zt.B = SVE_ld2b(Zt.B, Ztt.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m16to14); +} + +# ld2b_z_p_br.xml: LD2B (scalar plus scalar) variant SVE +# PATTERN xa420c000/mask=xffe0e000 + +:ld2b "{"^Zt.B, Ztt.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zt.B & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.B = SVE_ld2b(Zt.B, Ztt.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld2d_z_p_bi.xml: LD2D (scalar plus immediate) variant SVE +# PATTERN xa5a0e000/mask=xfff0e000 + +:ld2d "{"^Zt.D, Ztt.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m16to14] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zt.D & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m16to14 +{ + Zt.D = SVE_ld2d(Zt.D, Ztt.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m16to14); +} + +# ld2d_z_p_br.xml: LD2D (scalar plus scalar) variant SVE +# PATTERN xa5a0c000/mask=xffe0e000 + +:ld2d "{"^Zt.D, Ztt.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zt.D & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.D = SVE_ld2d(Zt.D, Ztt.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld2h_z_p_bi.xml: LD2H (scalar plus immediate) variant SVE +# PATTERN xa4a0e000/mask=xfff0e000 + +:ld2h "{"^Zt.H, Ztt.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m16to14] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zt.H & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m16to14 +{ + Zt.H = SVE_ld2h(Zt.H, Ztt.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m16to14); +} + +# ld2h_z_p_br.xml: LD2H (scalar plus scalar) variant SVE +# PATTERN xa4a0c000/mask=xffe0e000 + +:ld2h "{"^Zt.H, Ztt.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zt.H & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.H = SVE_ld2h(Zt.H, Ztt.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld2w_z_p_bi.xml: LD2W (scalar plus immediate) variant SVE +# PATTERN xa520e000/mask=xfff0e000 + +:ld2w "{"^Zt.S, Ztt.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m16to14] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m16to14 +{ + Zt.S = SVE_ld2w(Zt.S, Ztt.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m16to14); +} + +# ld2w_z_p_br.xml: LD2W (scalar plus scalar) variant SVE +# PATTERN xa520c000/mask=xffe0e000 + +:ld2w "{"^Zt.S, Ztt.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.S = SVE_ld2w(Zt.S, Ztt.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld3b_z_p_bi.xml: LD3B (scalar plus immediate) variant SVE +# PATTERN xa440e000/mask=xfff0e000 + +:ld3b "{"^Zt.B, Ztt.B, Zttt.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m24to21] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m24to21 +{ + Zt.B = SVE_ld3b(Zt.B, Ztt.B, Zttt.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m24to21); +} + +# ld3b_z_p_br.xml: LD3B (scalar plus scalar) variant SVE +# PATTERN xa440c000/mask=xffe0e000 + +:ld3b "{"^Zt.B, Ztt.B, Zttt.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.B = SVE_ld3b(Zt.B, Ztt.B, Zttt.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld3d_z_p_bi.xml: LD3D (scalar plus immediate) variant SVE +# PATTERN xa5c0e000/mask=xfff0e000 + +:ld3d "{"^Zt.D, Ztt.D, Zttt.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m24to21] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m24to21 +{ + Zt.D = SVE_ld3d(Zt.D, Ztt.D, Zttt.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m24to21); +} + +# ld3d_z_p_br.xml: LD3D (scalar plus scalar) variant SVE +# PATTERN xa5c0c000/mask=xffe0e000 + +:ld3d "{"^Zt.D, Ztt.D, Zttt.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.D = SVE_ld3d(Zt.D, Ztt.D, Zttt.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld3h_z_p_bi.xml: LD3H (scalar plus immediate) variant SVE +# PATTERN xa4c0e000/mask=xfff0e000 + +:ld3h "{"^Zt.H, Ztt.H, Zttt.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m24to21] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m24to21 +{ + Zt.H = SVE_ld3h(Zt.H, Ztt.H, Zttt.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m24to21); +} + +# ld3h_z_p_br.xml: LD3H (scalar plus scalar) variant SVE +# PATTERN xa4c0c000/mask=xffe0e000 + +:ld3h "{"^Zt.H, Ztt.H, Zttt.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.H = SVE_ld3h(Zt.H, Ztt.H, Zttt.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld3w_z_p_bi.xml: LD3W (scalar plus immediate) variant SVE +# PATTERN xa540e000/mask=xfff0e000 + +:ld3w "{"^Zt.S, Ztt.S, Zttt.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m24to21] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m24to21 +{ + Zt.S = SVE_ld3w(Zt.S, Ztt.S, Zttt.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m24to21); +} + +# ld3w_z_p_br.xml: LD3W (scalar plus scalar) variant SVE +# PATTERN xa540c000/mask=xffe0e000 + +:ld3w "{"^Zt.S, Ztt.S, Zttt.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.S = SVE_ld3w(Zt.S, Ztt.S, Zttt.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld4b_z_p_bi.xml: LD4B (scalar plus immediate) variant SVE +# PATTERN xa460e000/mask=xfff0e000 + +:ld4b "{"^Zt.B, Ztt.B, Zttt.B, Ztttt.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m32to28] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b11 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Ztttt.B & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m32to28 +{ + Zt.B = SVE_ld4b(Zt.B, Ztt.B, Zttt.B, Ztttt.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m32to28); +} + +# ld4b_z_p_br.xml: LD4B (scalar plus scalar) variant SVE +# PATTERN xa460c000/mask=xffe0e000 + +:ld4b "{"^Zt.B, Ztt.B, Zttt.B, Ztttt.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Ztttt.B & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.B = SVE_ld4b(Zt.B, Ztt.B, Zttt.B, Ztttt.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld4d_z_p_bi.xml: LD4D (scalar plus immediate) variant SVE +# PATTERN xa5e0e000/mask=xfff0e000 + +:ld4d "{"^Zt.D, Ztt.D, Zttt.D, Ztttt.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m32to28] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Ztttt.D & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m32to28 +{ + Zt.D = SVE_ld4d(Zt.D, Ztt.D, Zttt.D, Ztttt.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m32to28); +} + +# ld4d_z_p_br.xml: LD4D (scalar plus scalar) variant SVE +# PATTERN xa5e0c000/mask=xffe0e000 + +:ld4d "{"^Zt.D, Ztt.D, Zttt.D, Ztttt.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Ztttt.D & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.D = SVE_ld4d(Zt.D, Ztt.D, Zttt.D, Ztttt.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld4h_z_p_bi.xml: LD4H (scalar plus immediate) variant SVE +# PATTERN xa4e0e000/mask=xfff0e000 + +:ld4h "{"^Zt.H, Ztt.H, Zttt.H, Ztttt.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m32to28] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Ztttt.H & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m32to28 +{ + Zt.H = SVE_ld4h(Zt.H, Ztt.H, Zttt.H, Ztttt.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m32to28); +} + +# ld4h_z_p_br.xml: LD4H (scalar plus scalar) variant SVE +# PATTERN xa4e0c000/mask=xffe0e000 + +:ld4h "{"^Zt.H, Ztt.H, Zttt.H, Ztttt.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Ztttt.H & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.H = SVE_ld4h(Zt.H, Ztt.H, Zttt.H, Ztttt.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ld4w_z_p_bi.xml: LD4W (scalar plus immediate) variant SVE +# PATTERN xa560e000/mask=xfff0e000 + +:ld4w "{"^Zt.S, Ztt.S, Zttt.S, Ztttt.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m32to28] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Ztttt.S & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m32to28 +{ + Zt.S = SVE_ld4w(Zt.S, Ztt.S, Zttt.S, Ztttt.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m32to28); +} + +# ld4w_z_p_br.xml: LD4W (scalar plus scalar) variant SVE +# PATTERN xa560c000/mask=xffe0e000 + +:ld4w "{"^Zt.S, Ztt.S, Zttt.S, Ztttt.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Ztttt.S & Rn_GPR64xsp & Pg3_z & Rm_GPR64 +{ + Zt.S = SVE_ld4w(Zt.S, Ztt.S, Zttt.S, Ztttt.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1b_z_p_ai.xml: LDFF1B (vector plus immediate) variant 32-bit element +# PATTERN x8420e000/mask=xffe0e000 + +:ldff1b "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to31] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to31 +{ + Zd.S = SVE_ldff1b(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to31); +} + +# ldff1b_z_p_ai.xml: LDFF1B (vector plus immediate) variant 64-bit element +# PATTERN xc420e000/mask=xffe0e000 + +:ldff1b "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to31] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to31 +{ + Zd.D = SVE_ldff1b(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to31); +} + +# ldff1b_z_p_br.xml: LDFF1B (scalar plus scalar) variant 8-bit element +# PATTERN xa4006000/mask=xffe0e000 + +:ldff1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & Rm_GPR64 +{ + Zd.B = SVE_ldff1b(Zd.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1b_z_p_br.xml: LDFF1B (scalar plus scalar) variant 16-bit element +# PATTERN xa4206000/mask=xffe0e000 + +:ldff1b "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 +{ + Zd.H = SVE_ldff1b(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1b_z_p_br.xml: LDFF1B (scalar plus scalar) variant 32-bit element +# PATTERN xa4406000/mask=xffe0e000 + +:ldff1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ldff1b(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1b_z_p_br.xml: LDFF1B (scalar plus scalar) variant 64-bit element +# PATTERN xa4606000/mask=xffe0e000 + +:ldff1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ldff1b(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1b_z_p_bz.xml: LDFF1B (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc4006000/mask=xffa0e000 + +:ldff1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1b(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1b_z_p_bz.xml: LDFF1B (scalar plus vector) variant 32-bit unscaled offset +# PATTERN x84006000/mask=xffa0e000 + +:ldff1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ldff1b(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ldff1b_z_p_bz.xml: LDFF1B (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc440e000/mask=xffe0e000 + +:ldff1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1b(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1d_z_p_ai.xml: LDFF1D (vector plus immediate) variant SVE +# PATTERN xc5a0e000/mask=xffe0e000 + +:ldff1d "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to248] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to248 +{ + Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to248); +} + +# ldff1d_z_p_br.xml: LDFF1D (scalar plus scalar) variant SVE +# PATTERN xa5e06000/mask=xffe0e000 + +:ldff1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1d_z_p_bz.xml: LDFF1D (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc5a06000/mask=xffa0e000 + +:ldff1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #3"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1d_z_p_bz.xml: LDFF1D (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc5806000/mask=xffa0e000 + +:ldff1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1d_z_p_bz.xml: LDFF1D (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc5e0e000/mask=xffe0e000 + +:ldff1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #3"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1d_z_p_bz.xml: LDFF1D (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc5c0e000/mask=xffe0e000 + +:ldff1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1h_z_p_ai.xml: LDFF1H (vector plus immediate) variant 32-bit element +# PATTERN x84a0e000/mask=xffe0e000 + +:ldff1h "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to62] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to62 +{ + Zd.S = SVE_ldff1h(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to62); +} + +# ldff1h_z_p_ai.xml: LDFF1H (vector plus immediate) variant 64-bit element +# PATTERN xc4a0e000/mask=xffe0e000 + +:ldff1h "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to62] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to62 +{ + Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to62); +} + +# ldff1h_z_p_br.xml: LDFF1H (scalar plus scalar) variant 16-bit element +# PATTERN xa4a06000/mask=xffe0e000 + +:ldff1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 +{ + Zd.H = SVE_ldff1h(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1h_z_p_br.xml: LDFF1H (scalar plus scalar) variant 32-bit element +# PATTERN xa4c06000/mask=xffe0e000 + +:ldff1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ldff1h(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1h_z_p_br.xml: LDFF1H (scalar plus scalar) variant 64-bit element +# PATTERN xa4e06000/mask=xffe0e000 + +:ldff1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 32-bit scaled offset +# PATTERN x84a06000/mask=xffa0e000 + +:ldff1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] +is sve_b_2331=0b100001001 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ldff1h(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc4a06000/mask=xffa0e000 + +:ldff1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc4806000/mask=xffa0e000 + +:ldff1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 32-bit unscaled offset +# PATTERN x84806000/mask=xffa0e000 + +:ldff1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ldff1h(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc4e0e000/mask=xffe0e000 + +:ldff1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #1"] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc4c0e000/mask=xffe0e000 + +:ldff1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1sb_z_p_ai.xml: LDFF1SB (vector plus immediate) variant 32-bit element +# PATTERN x8420a000/mask=xffe0e000 + +:ldff1sb "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to31] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to31 +{ + Zd.S = SVE_ldff1sb(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to31); +} + +# ldff1sb_z_p_ai.xml: LDFF1SB (vector plus immediate) variant 64-bit element +# PATTERN xc420a000/mask=xffe0e000 + +:ldff1sb "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to31] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to31 +{ + Zd.D = SVE_ldff1sb(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to31); +} + +# ldff1sb_z_p_br.xml: LDFF1SB (scalar plus scalar) variant 16-bit element +# PATTERN xa5c06000/mask=xffe0e000 + +:ldff1sb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 +{ + Zd.H = SVE_ldff1sb(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1sb_z_p_br.xml: LDFF1SB (scalar plus scalar) variant 32-bit element +# PATTERN xa5a06000/mask=xffe0e000 + +:ldff1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ldff1sb(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1sb_z_p_br.xml: LDFF1SB (scalar plus scalar) variant 64-bit element +# PATTERN xa5806000/mask=xffe0e000 + +:ldff1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ldff1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1sb_z_p_bz.xml: LDFF1SB (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc4002000/mask=xffa0e000 + +:ldff1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1sb_z_p_bz.xml: LDFF1SB (scalar plus vector) variant 32-bit unscaled offset +# PATTERN x84002000/mask=xffa0e000 + +:ldff1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ldff1sb(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ldff1sb_z_p_bz.xml: LDFF1SB (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc440a000/mask=xffe0e000 + +:ldff1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1sh_z_p_ai.xml: LDFF1SH (vector plus immediate) variant 32-bit element +# PATTERN x84a0a000/mask=xffe0e000 + +:ldff1sh "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to62] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to62 +{ + Zd.S = SVE_ldff1sh(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to62); +} + +# ldff1sh_z_p_ai.xml: LDFF1SH (vector plus immediate) variant 64-bit element +# PATTERN xc4a0a000/mask=xffe0e000 + +:ldff1sh "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to62] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to62 +{ + Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to62); +} + +# ldff1sh_z_p_br.xml: LDFF1SH (scalar plus scalar) variant 32-bit element +# PATTERN xa5206000/mask=xffe0e000 + +:ldff1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ldff1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1sh_z_p_br.xml: LDFF1SH (scalar plus scalar) variant 64-bit element +# PATTERN xa5006000/mask=xffe0e000 + +:ldff1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 32-bit scaled offset +# PATTERN x84a02000/mask=xffa0e000 + +:ldff1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] +is sve_b_2331=0b100001001 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ldff1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc4a02000/mask=xffa0e000 + +:ldff1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc4802000/mask=xffa0e000 + +:ldff1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 32-bit unscaled offset +# PATTERN x84802000/mask=xffa0e000 + +:ldff1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ldff1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc4e0a000/mask=xffe0e000 + +:ldff1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #1"] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc4c0a000/mask=xffe0e000 + +:ldff1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1sw_z_p_ai.xml: LDFF1SW (vector plus immediate) variant SVE +# PATTERN xc520a000/mask=xffe0e000 + +:ldff1sw "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to124] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to124 +{ + Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to124); +} + +# ldff1sw_z_p_br.xml: LDFF1SW (scalar plus scalar) variant SVE +# PATTERN xa4806000/mask=xffe0e000 + +:ldff1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1sw_z_p_bz.xml: LDFF1SW (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc5202000/mask=xffa0e000 + +:ldff1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1sw_z_p_bz.xml: LDFF1SW (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc5002000/mask=xffa0e000 + +:ldff1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1sw_z_p_bz.xml: LDFF1SW (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc560a000/mask=xffe0e000 + +:ldff1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #2"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1sw_z_p_bz.xml: LDFF1SW (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc540a000/mask=xffe0e000 + +:ldff1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1w_z_p_ai.xml: LDFF1W (vector plus immediate) variant 32-bit element +# PATTERN x8520e000/mask=xffe0e000 + +:ldff1w "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to124] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to124 +{ + Zd.S = SVE_ldff1w(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to124); +} + +# ldff1w_z_p_ai.xml: LDFF1W (vector plus immediate) variant 64-bit element +# PATTERN xc520e000/mask=xffe0e000 + +:ldff1w "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to124] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to124 +{ + Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to124); +} + +# ldff1w_z_p_br.xml: LDFF1W (scalar plus scalar) variant 32-bit element +# PATTERN xa5406000/mask=xffe0e000 + +:ldff1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ldff1w(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1w_z_p_br.xml: LDFF1W (scalar plus scalar) variant 64-bit element +# PATTERN xa5606000/mask=xffe0e000 + +:ldff1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 32-bit scaled offset +# PATTERN x85206000/mask=xffa0e000 + +:ldff1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #2"] +is sve_b_2331=0b100001010 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ldff1w(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc5206000/mask=xffa0e000 + +:ldff1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xc5006000/mask=xffa0e000 + +:ldff1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod +{ + Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 32-bit unscaled offset +# PATTERN x85006000/mask=xffa0e000 + +:ldff1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod +{ + Zd.S = SVE_ldff1w(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc560e000/mask=xffe0e000 + +:ldff1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #2"] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xc540e000/mask=xffe0e000 + +:ldff1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D +{ + Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); +} + +# ldnf1b_z_p_bi.xml: LDNF1B variant 8-bit element +# PATTERN xa410a000/mask=xfff0e000 + +:ldnf1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & sve_mul4_1_m8to7 +{ + Zd.B = SVE_ldnf1b(Zd.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1b_z_p_bi.xml: LDNF1B variant 16-bit element +# PATTERN xa430a000/mask=xfff0e000 + +:ldnf1b "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 +{ + Zd.H = SVE_ldnf1b(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1b_z_p_bi.xml: LDNF1B variant 32-bit element +# PATTERN xa450a000/mask=xfff0e000 + +:ldnf1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ldnf1b(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1b_z_p_bi.xml: LDNF1B variant 64-bit element +# PATTERN xa470a000/mask=xfff0e000 + +:ldnf1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ldnf1b(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1d_z_p_bi.xml: LDNF1D variant SVE +# PATTERN xa5f0a000/mask=xfff0e000 + +:ldnf1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ldnf1d(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1h_z_p_bi.xml: LDNF1H variant 16-bit element +# PATTERN xa4b0a000/mask=xfff0e000 + +:ldnf1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 +{ + Zd.H = SVE_ldnf1h(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1h_z_p_bi.xml: LDNF1H variant 32-bit element +# PATTERN xa4d0a000/mask=xfff0e000 + +:ldnf1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ldnf1h(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1h_z_p_bi.xml: LDNF1H variant 64-bit element +# PATTERN xa4f0a000/mask=xfff0e000 + +:ldnf1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ldnf1h(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1sb_z_p_bi.xml: LDNF1SB variant 16-bit element +# PATTERN xa5d0a000/mask=xfff0e000 + +:ldnf1sb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 +{ + Zd.H = SVE_ldnf1sb(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1sb_z_p_bi.xml: LDNF1SB variant 32-bit element +# PATTERN xa5b0a000/mask=xfff0e000 + +:ldnf1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ldnf1sb(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1sb_z_p_bi.xml: LDNF1SB variant 64-bit element +# PATTERN xa590a000/mask=xfff0e000 + +:ldnf1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ldnf1sb(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1sh_z_p_bi.xml: LDNF1SH variant 32-bit element +# PATTERN xa530a000/mask=xfff0e000 + +:ldnf1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ldnf1sh(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1sh_z_p_bi.xml: LDNF1SH variant 64-bit element +# PATTERN xa510a000/mask=xfff0e000 + +:ldnf1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ldnf1sh(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1sw_z_p_bi.xml: LDNF1SW variant SVE +# PATTERN xa490a000/mask=xfff0e000 + +:ldnf1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ldnf1sw(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1w_z_p_bi.xml: LDNF1W variant 32-bit element +# PATTERN xa550a000/mask=xfff0e000 + +:ldnf1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ldnf1w(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnf1w_z_p_bi.xml: LDNF1W variant 64-bit element +# PATTERN xa570a000/mask=xfff0e000 + +:ldnf1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ldnf1w(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnt1b_z_p_bi.xml: LDNT1B (scalar plus immediate) variant SVE +# PATTERN xa400e000/mask=xfff0e000 + +:ldnt1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2022=0b000 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & sve_mul4_1_m8to7 +{ + Zd.B = SVE_ldnt1b(Zd.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnt1b_z_p_br.xml: LDNT1B (scalar plus scalar) variant SVE +# PATTERN xa400c000/mask=xffe0e000 + +:ldnt1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & Rm_GPR64 +{ + Zd.B = SVE_ldnt1b(Zd.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldnt1d_z_p_bi.xml: LDNT1D (scalar plus immediate) variant SVE +# PATTERN xa580e000/mask=xfff0e000 + +:ldnt1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2022=0b000 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 +{ + Zd.D = SVE_ldnt1d(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnt1d_z_p_br.xml: LDNT1D (scalar plus scalar) variant SVE +# PATTERN xa580c000/mask=xffe0e000 + +:ldnt1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 +{ + Zd.D = SVE_ldnt1d(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldnt1h_z_p_bi.xml: LDNT1H (scalar plus immediate) variant SVE +# PATTERN xa480e000/mask=xfff0e000 + +:ldnt1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2022=0b000 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 +{ + Zd.H = SVE_ldnt1h(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnt1h_z_p_br.xml: LDNT1H (scalar plus scalar) variant SVE +# PATTERN xa480c000/mask=xffe0e000 + +:ldnt1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 +{ + Zd.H = SVE_ldnt1h(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldnt1w_z_p_bi.xml: LDNT1W (scalar plus immediate) variant SVE +# PATTERN xa500e000/mask=xfff0e000 + +:ldnt1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2022=0b000 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 +{ + Zd.S = SVE_ldnt1w(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# ldnt1w_z_p_br.xml: LDNT1W (scalar plus scalar) variant SVE +# PATTERN xa500c000/mask=xffe0e000 + +:ldnt1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 +{ + Zd.S = SVE_ldnt1w(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); +} + +# ldr_p_bi.xml: LDR (predicate) variant SVE +# PATTERN x85800000/mask=xffc0e010 + +:ldr Pd, [Rn_GPR64xsp^sve_mul9_2_m256to255] +is sve_b_2231=0b1000010110 & sve_imm9h_1621 & sve_b_1315=0b000 & sve_imm9l_1012 & sve_rn_0509 & sve_b_04=0 & sve_pt_0003 & Rn_GPR64xsp & sve_mul9_2_m256to255 & Pd +{ + Pd = SVE_ldr(Pd, Rn_GPR64xsp, sve_mul9_2_m256to255); +} + +# ldr_z_bi.xml: LDR (vector) variant SVE +# PATTERN x85804000/mask=xffc0e000 + +:ldr Zd, [Rn_GPR64xsp^sve_mul9_2_m256to255] +is sve_b_2231=0b1000010110 & sve_imm9h_1621 & sve_b_1315=0b010 & sve_imm9l_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & sve_mul9_2_m256to255 & Zd +{ + Zd = SVE_ldr(Zd, Rn_GPR64xsp, sve_mul9_2_m256to255); +} + +# lsl_z_p_zi.xml: LSL (immediate, predicated) variant SVE +# PATTERN x04038000/mask=xff3fe000 + +:lsl Zd.T_tszh, Pg3_m, Zd.T_tszh_2, "#"^sve_imm_shift +is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_1921=0b000 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_tszl_0809 & sve_imm3_0507 & sve_zdn_0004 & sve_imm_shift & Zd.T_tszh & Zd.T_tszh_2 & Pg3_m +{ + Zd.T_tszh = SVE_lsl(Zd.T_tszh, Pg3_m, Zd.T_tszh_2, sve_imm_shift:1); +} + +# lsl_z_p_zw.xml: LSL (wide elements, predicated) variant SVE +# PATTERN x041b8000/mask=xff3fe000 + +:lsl Zd.T, Pg3_m, Zd.T_2, Zn.D +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Pg3_m & Zn.D +{ + Zd.T = SVE_lsl(Zd.T, Pg3_m, Zd.T_2, Zn.D); +} + +# lsl_z_p_zz.xml: LSL (vectors) variant SVE +# PATTERN x04138000/mask=xff3fe000 + +:lsl Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_lsl(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# lsl_z_zi.xml: LSL (immediate, unpredicated) variant SVE +# PATTERN x04209c00/mask=xff20fc00 + +:lsl Zd.T_tszh, Zn.T_tszh, "#"^sve_imm_shift +is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_21=1 & sve_tszl_1920 & sve_imm3_1618 & sve_b_1215=0b1001 & sve_b_11=1 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & sve_imm_shift & Zd.T_tszh & Zn.T_tszh +{ + Zd.T_tszh = SVE_lsl(Zd.T_tszh, Zn.T_tszh, sve_imm_shift:1); +} + +# lsl_z_zw.xml: LSL (wide elements, unpredicated) variant SVE +# PATTERN x04208c00/mask=xff20fc00 + +:lsl Zd.T, Zn.T, Zm.D +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1000 & sve_b_11=1 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Zm.D +{ + Zd.T = SVE_lsl(Zd.T, Zn.T, Zm.D); +} + +# lslr_z_p_zz.xml: LSLR variant SVE +# PATTERN x04178000/mask=xff3fe000 + +:lslr Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_lslr(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# lsr_z_p_zi.xml: LSR (immediate, predicated) variant SVE +# PATTERN x04018000/mask=xff3fe000 + +:lsr Zd.T_tszh, Pg3_m, Zd.T_tszh_2, "#"^sve_imm_shift +is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_tszl_0809 & sve_imm3_0507 & sve_zdn_0004 & sve_imm_shift & Zd.T_tszh & Zd.T_tszh_2 & Pg3_m +{ + Zd.T_tszh = SVE_lsr(Zd.T_tszh, Pg3_m, Zd.T_tszh_2, sve_imm_shift:1); +} + +# lsr_z_p_zw.xml: LSR (wide elements, predicated) variant SVE +# PATTERN x04198000/mask=xff3fe000 + +:lsr Zd.T, Pg3_m, Zd.T_2, Zn.D +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Pg3_m & Zn.D +{ + Zd.T = SVE_lsr(Zd.T, Pg3_m, Zd.T_2, Zn.D); +} + +# lsr_z_p_zz.xml: LSR (vectors) variant SVE +# PATTERN x04118000/mask=xff3fe000 + +:lsr Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_lsr(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# lsr_z_zi.xml: LSR (immediate, unpredicated) variant SVE +# PATTERN x04209400/mask=xff20fc00 + +:lsr Zd.T_tszh, Zn.T_tszh, "#"^sve_imm_shift +is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_21=1 & sve_tszl_1920 & sve_imm3_1618 & sve_b_1215=0b1001 & sve_b_11=0 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & sve_imm_shift & Zd.T_tszh & Zn.T_tszh +{ + Zd.T_tszh = SVE_lsr(Zd.T_tszh, Zn.T_tszh, sve_imm_shift:1); +} + +# lsr_z_zw.xml: LSR (wide elements, unpredicated) variant SVE +# PATTERN x04208400/mask=xff20fc00 + +:lsr Zd.T, Zn.T, Zm.D +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1000 & sve_b_11=0 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Zm.D +{ + Zd.T = SVE_lsr(Zd.T, Zn.T, Zm.D); +} + +# lsrr_z_p_zz.xml: LSRR variant SVE +# PATTERN x04158000/mask=xff3fe000 + +:lsrr Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_lsrr(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# mad_z_p_zzz.xml: MAD variant SVE +# PATTERN x0400c000/mask=xff20e000 + +:mad Zd.T, Pg3_m, Zm.T, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1415=0b11 & sve_b_13=0 & sve_pg_1012 & sve_za_0509 & sve_zdn_0004 & Zd.T & Zm.T & Zn.T & Pg3_m +{ + Zd.T = SVE_mad(Zd.T, Pg3_m, Zm.T, Zn.T); +} + +# mla_z_p_zzz.xml: MLA variant SVE +# PATTERN x04004000/mask=xff20e000 + +:mla Zd.T, Pg3_m, Zn.T, Zm.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1415=0b01 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m +{ + Zd.T = SVE_mla(Zd.T, Pg3_m, Zn.T, Zm.T); +} + +# mls_z_p_zzz.xml: MLS variant SVE +# PATTERN x04006000/mask=xff20e000 + +:mls Zd.T, Pg3_m, Zn.T, Zm.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1415=0b01 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m +{ + Zd.T = SVE_mls(Zd.T, Pg3_m, Zn.T, Zm.T); +} + +# mov_and_p_p_pp.xml: MOV (predicate, predicated, zeroing) variant Not flag setting +# ALIASEDBY AND .B, /Z, .B, .B if S == '0' && Pn == Pm +# PATTERN x25004000/mask=xfff0c210 + +# SKIPPING mov_and_p_p_pp.xml because x25004000/mask=xfff0c210 has already been defined + +# mov_cpy_z_p_i.xml: MOV (immediate, predicated) variant SVE +# ALIASEDBY CPY ., /, #{, } if Unconditionally +# PATTERN x05100000/mask=xff308000 + +# SKIPPING mov_cpy_z_p_i.xml because x05100000/mask=xff308000 has already been defined + +# mov_cpy_z_p_r.xml: MOV (scalar, predicated) variant SVE +# ALIASEDBY CPY ., /M, if Unconditionally +# PATTERN x0528a000/mask=xff3fe000 + +# SKIPPING mov_cpy_z_p_r.xml because x0528a000/mask=xff3fe000 has already been defined + +# mov_cpy_z_p_v.xml: MOV (SIMD&FP scalar, predicated) variant SVE +# ALIASEDBY CPY ., /M, if Unconditionally +# PATTERN x05208000/mask=xff3fe000 + +# SKIPPING mov_cpy_z_p_v.xml because x05208000/mask=xff3fe000 has already been defined + +# mov_dup_z_i.xml: MOV (immediate, unpredicated) variant SVE +# ALIASEDBY DUP ., #{, } if Unconditionally +# PATTERN x2538c000/mask=xff3fc000 + +# SKIPPING mov_dup_z_i.xml because x2538c000/mask=xff3fc000 has already been defined + +# mov_dup_z_r.xml: MOV (scalar, unpredicated) variant SVE +# ALIASEDBY DUP ., if Unconditionally +# PATTERN x05203800/mask=xff3ffc00 + +# SKIPPING mov_dup_z_r.xml because x05203800/mask=xff3ffc00 has already been defined + +# mov_dup_z_zi.xml: MOV (SIMD&FP scalar, unpredicated) variant SVE +# ALIASEDBY DUP ., .[0] if BitCount(imm2:tsz) == 1 +# ALIASEDBY DUP ., .[] if BitCount(imm2:tsz) > 1 +# PATTERN + +# SKIPPING mov_dup_z_zi.xml because there is a mismatch between the XML asmtemplate(4) and regdiagram(1) + +# mov_dupm_z_i.xml: MOV (bitmask immediate) variant SVE +# ALIASEDBY DUPM ., # if SVEMoveMaskPreferred(imm13) +# PATTERN x05c00000/mask=xfffc0000 + +# SKIPPING mov_dupm_z_i.xml because x05c00000/mask=xfffc0000 has already been defined + +# mov_orr_p_p_pp.xml: MOV (predicate, unpredicated) variant Not flag setting +# ALIASEDBY ORR .B, /Z, .B, .B if S == '0' && Pn == Pm && Pm == Pg +# PATTERN x25804000/mask=xfff0c210 + +# SKIPPING mov_orr_p_p_pp.xml because it is an alias: + +# mov_orr_z_zz.xml: MOV (vector, unpredicated) variant SVE +# ALIASEDBY ORR .D, .D, .D if Zn == Zm +# PATTERN x04603000/mask=xffe0fc00 + +# SKIPPING mov_orr_z_zz.xml because it is an alias: + +# mov_sel_p_p_pp.xml: MOV (predicate, predicated, merging) variant SVE +# ALIASEDBY SEL .B, , .B, .B if Pd == Pm +# PATTERN x25004210/mask=xfff0c210 + +# SKIPPING mov_sel_p_p_pp.xml because it is an alias: + +# mov_sel_z_p_zz.xml: MOV (vector, predicated) variant SVE +# ALIASEDBY SEL ., , ., . if Zd == Zm +# PATTERN x0520c000/mask=xff20c000 + +# SKIPPING mov_sel_z_p_zz.xml because it is an alias: + +# movprfx_z_p_z.xml: MOVPRFX (predicated) variant SVE +# PATTERN x04102000/mask=xff3ee000 + +:movprfx Zd.T, Pg3_zm, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=0 & sve_m_16 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Pg3_zm & Zd.T & Zn.T +{ + Zd.T = SVE_movprfx(Zd.T, Pg3_zm, Zn.T); +} + +# movprfx_z_z.xml: MOVPRFX (unpredicated) variant SVE +# PATTERN x0420bc00/mask=xfffffc00 + +:movprfx Zd, Zn +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_1720=0b0000 & sve_b_16=0 & sve_b_1015=0b101111 & sve_zn_0509 & sve_zd_0004 & Zn & Zd +{ + Zd = SVE_movprfx(Zd, Zn); +} + +# movs_and_p_p_pp.xml: MOVS (predicated) variant Flag setting +# ALIASEDBY ANDS .B, /Z, .B, .B if S == '1' && Pn == Pm +# PATTERN x25404000/mask=xfff0c210 + +# SKIPPING movs_and_p_p_pp.xml because x25404000/mask=xfff0c210 has already been defined + +# movs_orr_p_p_pp.xml: MOVS (unpredicated) variant Flag setting +# ALIASEDBY ORRS .B, /Z, .B, .B if S == '1' && Pn == Pm && Pm == Pg +# PATTERN x25c04000/mask=xfff0c210 + +# SKIPPING movs_orr_p_p_pp.xml because it is an alias: + +# msb_z_p_zzz.xml: MSB variant SVE +# PATTERN x0400e000/mask=xff20e000 + +:msb Zd.T, Pg3_m, Zm.T, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1415=0b11 & sve_b_13=1 & sve_pg_1012 & sve_za_0509 & sve_zdn_0004 & Zd.T & Zm.T & Zn.T & Pg3_m +{ + Zd.T = SVE_msb(Zd.T, Pg3_m, Zm.T, Zn.T); +} + +# mul_z_p_zz.xml: MUL (vectors) variant SVE +# PATTERN x04100000/mask=xff3fe000 + +:mul Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_mul(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# mul_z_zi.xml: MUL (immediate) variant SVE +# PATTERN x2530c000/mask=xff3fe000 + +:mul Zd.T, Zd.T_2, "#"^sve_imm8_1_m128to127 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b110 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1415=0b11 & sve_b_13=0 & sve_imm8_0512 & sve_zdn_0004 & Zd.T & Zd.T_2 & sve_imm8_1_m128to127 +{ + Zd.T = SVE_mul(Zd.T, Zd.T_2, sve_imm8_1_m128to127:1); +} + +# nand_p_p_pp.xml: NAND, NANDS variant Flag setting +# PATTERN x25c04210/mask=xfff0c210 + +:nands Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_nands(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# nand_p_p_pp.xml: NAND, NANDS variant Not flag setting +# PATTERN x25804210/mask=xfff0c210 + +:nand Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_nand(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# neg_z_p_z.xml: NEG variant SVE +# PATTERN x0417a000/mask=xff3fe000 + +:neg Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_neg(Zd.T, Pg3_m, Zn.T); +} + +# nor_p_p_pp.xml: NOR, NORS variant Flag setting +# PATTERN x25c04200/mask=xfff0c210 + +:nors Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_nors(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# nor_p_p_pp.xml: NOR, NORS variant Not flag setting +# PATTERN x25804200/mask=xfff0c210 + +:nor Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_nor(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# not_eor_p_p_pp.xml: NOT (predicate) variant Not flag setting +# ALIASEDBY EOR .B, /Z, .B, .B if Pm == Pg +# PATTERN x25004200/mask=xfff0c210 + +# SKIPPING not_eor_p_p_pp.xml because x25004200/mask=xfff0c210 has already been defined + +# not_z_p_z.xml: NOT (vector) variant SVE +# PATTERN x041ea000/mask=xff3fe000 + +:not Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_not(Zd.T, Pg3_m, Zn.T); +} + +# nots_eor_p_p_pp.xml: NOTS variant Flag setting +# ALIASEDBY EORS .B, /Z, .B, .B if Pm == Pg +# PATTERN x25404200/mask=xfff0c210 + +# SKIPPING nots_eor_p_p_pp.xml because x25404200/mask=xfff0c210 has already been defined + +# orn_orr_z_zi.xml: ORN (immediate) variant SVE +# ALIASEDBY ORR ., ., #(- - 1) if Never +# PATTERN x05000000/mask=xfffc0000 + +:orn Zd.T_imm13, Zd.T_imm13_2, "#"^sve_decode_bit_mask +is sve_b_2431=0b00000101 & sve_b_23=0 & sve_b_22=0 & sve_b_1821=0b0000 & sve_imm13_0517 & sve_zdn_0004 & sve_decode_bit_mask & Zd.T_imm13 & Zd.T_imm13_2 +{ + Zd.T_imm13 = SVE_orn(Zd.T_imm13, Zd.T_imm13_2, sve_decode_bit_mask:1); +} + +# orn_p_p_pp.xml: ORN, ORNS (predicates) variant Flag setting +# PATTERN x25c04010/mask=xfff0c210 + +:orns Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_orns(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# orn_p_p_pp.xml: ORN, ORNS (predicates) variant Not flag setting +# PATTERN x25804010/mask=xfff0c210 + +:orn Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_orn(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# orr_p_p_pp.xml: ORR, ORRS (predicates) variant Flag setting +# PATTERN x25c04000/mask=xfff0c210 + +:orrs Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=1 & sve_s_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_orrs(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# orr_p_p_pp.xml: ORR, ORRS (predicates) variant Not flag setting +# PATTERN x25804000/mask=xfff0c210 + +:orr Pd.B, Pg_z, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=1 & sve_s_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B +{ + Pd.B = SVE_orr(Pd.B, Pg_z, Pn.B, Pm.B); +} + +# orr_z_p_zz.xml: ORR (vectors, predicated) variant SVE +# PATTERN x04180000/mask=xff3fe000 + +:orr Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_orr(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# orr_z_zi.xml: ORR (immediate) variant SVE +# PATTERN x05000000/mask=xfffc0000 + +# SKIPPING orr_z_zi.xml because x05000000/mask=xfffc0000 has already been defined + +# orr_z_zz.xml: ORR (vectors, unpredicated) variant SVE +# PATTERN x04603000/mask=xffe0fc00 + +:orr Zd.D, Zn.D, Zm.D +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_zm_1620 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D +{ + Zd.D = SVE_orr(Zd.D, Zn.D, Zm.D); +} + +# orv_r_p_z.xml: ORV variant SVE +# PATTERN x04182000/mask=xff3fe000 + +:orv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_orv(Rd_FPR8, Pg3, Zn.T); +} + +# orv_r_p_z.xml: ORV variant SVE +# PATTERN x04182000/mask=xff3fe000 + +:orv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_orv(Rd_FPR32, Pg3, Zn.T); +} + +# orv_r_p_z.xml: ORV variant SVE +# PATTERN x04182000/mask=xff3fe000 + +:orv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_orv(Rd_FPR16, Pg3, Zn.T); +} + +# orv_r_p_z.xml: ORV variant SVE +# PATTERN x04182000/mask=xff3fe000 + +:orv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_orv(Rd_FPR64, Pg3, Zn.T); +} + +# pfalse_p.xml: PFALSE variant SVE +# PATTERN x2518e400/mask=xfffffff0 + +:pfalse Pd.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_1021=0b011000111001 & sve_b_0409=0b000000 & sve_pd_0003 & Pd.B +{ + Pd.B = SVE_pfalse(Pd.B); +} + +# pfirst_p_p_p.xml: PFIRST variant SVE +# PATTERN x2558c000/mask=xfffffe10 + +:pfirst Pd.B, Pn, Pd.B_2 +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1021=0b011000110000 & sve_b_09=0 & sve_pg_0508 & sve_b_04=0 & sve_pdn_0003 & Pd.B & Pd.B_2 & Pn +{ + Pd.B = SVE_pfirst(Pd.B, Pn, Pd.B_2); +} + +# pnext_p_p_p.xml: PNEXT variant SVE +# PATTERN x2519c400/mask=xff3ffe10 + +:pnext Pd.T, Pn, Pd.T_2 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1021=0b011001110001 & sve_b_09=0 & sve_pg_0508 & sve_b_04=0 & sve_pdn_0003 & Pd.T & Pd.T_2 & Pn +{ + Pd.T = SVE_pnext(Pd.T, Pn, Pd.T_2); +} + +# prfb_i_p_ai.xml: PRFB (vector plus immediate) variant 32-bit element +# PATTERN x8400e000/mask=xffe0e010 + +:prfb sve_prfop, Pg3, [Zn.S^sve_opt5_1_0to31] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.S & sve_opt5_1_0to31 & Pg3 +{ + SVE_prfb(sve_prfop:1, Pg3, Zn.S, sve_opt5_1_0to31); +} + +# prfb_i_p_ai.xml: PRFB (vector plus immediate) variant 64-bit element +# PATTERN xc400e000/mask=xffe0e010 + +:prfb sve_prfop, Pg3, [Zn.D^sve_opt5_1_0to31] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.D & sve_opt5_1_0to31 & Pg3 +{ + SVE_prfb(sve_prfop:1, Pg3, Zn.D, sve_opt5_1_0to31); +} + +# prfb_i_p_bi.xml: PRFB (scalar plus immediate) variant SVE +# PATTERN x85c00000/mask=xffc0e010 + +:prfb sve_prfop, Pg3, [Rn_GPR64xsp^sve_mul6_1_m32to31] +is sve_b_2231=0b1000010111 & sve_imm6_1621 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & sve_mul6_1_m32to31 & Pg3 +{ + SVE_prfb(sve_prfop:1, Pg3, Rn_GPR64xsp, sve_mul6_1_m32to31); +} + +# prfb_i_p_br.xml: PRFB (scalar plus scalar) variant SVE +# PATTERN x8400c000/mask=xffe0e010 + +:prfb sve_prfop, Pg3, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_prfb(sve_prfop:1, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# prfb_i_p_bz.xml: PRFB (scalar plus vector) variant 32-bit scaled offset +# PATTERN x84200000/mask=xffa0e010 + +:prfb sve_prfop, Pg3, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2331=0b100001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.S & sve_mod & Pg3 +{ + SVE_prfb(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# prfb_i_p_bz.xml: PRFB (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc4200000/mask=xffa0e010 + +:prfb sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2331=0b110001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & sve_mod & Pg3 +{ + SVE_prfb(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# prfb_i_p_bz.xml: PRFB (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc4608000/mask=xffe0e010 + +:prfb sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D] +is sve_b_2131=0b11000100011 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & Pg3 +{ + SVE_prfb(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D); +} + +# prfd_i_p_ai.xml: PRFD (vector plus immediate) variant 32-bit element +# PATTERN x8580e000/mask=xffe0e010 + +:prfd sve_prfop, Pg3, [Zn.S^sve_opt5_1_0to248] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.S & sve_opt5_1_0to248 & Pg3 +{ + SVE_prfd(sve_prfop:1, Pg3, Zn.S, sve_opt5_1_0to248); +} + +# prfd_i_p_ai.xml: PRFD (vector plus immediate) variant 64-bit element +# PATTERN xc580e000/mask=xffe0e010 + +:prfd sve_prfop, Pg3, [Zn.D^sve_opt5_1_0to248] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.D & sve_opt5_1_0to248 & Pg3 +{ + SVE_prfd(sve_prfop:1, Pg3, Zn.D, sve_opt5_1_0to248); +} + +# prfd_i_p_bi.xml: PRFD (scalar plus immediate) variant SVE +# PATTERN x85c06000/mask=xffc0e010 + +:prfd sve_prfop, Pg3, [Rn_GPR64xsp^sve_mul6_1_m32to31] +is sve_b_2231=0b1000010111 & sve_imm6_1621 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & sve_mul6_1_m32to31 & Pg3 +{ + SVE_prfd(sve_prfop:1, Pg3, Rn_GPR64xsp, sve_mul6_1_m32to31); +} + +# prfd_i_p_br.xml: PRFD (scalar plus scalar) variant SVE +# PATTERN x8580c000/mask=xffe0e010 + +:prfd sve_prfop, Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_prfd(sve_prfop:1, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# prfd_i_p_bz.xml: PRFD (scalar plus vector) variant 32-bit scaled offset +# PATTERN x84206000/mask=xffa0e010 + +:prfd sve_prfop, Pg3, [Rn_GPR64xsp, Zm.S, sve_mod^" #3"] +is sve_b_2331=0b100001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.S & sve_mod & Pg3 +{ + SVE_prfd(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# prfd_i_p_bz.xml: PRFD (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc4206000/mask=xffa0e010 + +:prfd sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #3"] +is sve_b_2331=0b110001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & sve_mod & Pg3 +{ + SVE_prfd(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# prfd_i_p_bz.xml: PRFD (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc460e000/mask=xffe0e010 + +:prfd sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, "lsl #3"] +is sve_b_2131=0b11000100011 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & Pg3 +{ + SVE_prfd(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D); +} + +# prfh_i_p_ai.xml: PRFH (vector plus immediate) variant 32-bit element +# PATTERN x8480e000/mask=xffe0e010 + +:prfh sve_prfop, Pg3, [Zn.S^sve_opt5_1_0to62] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.S & sve_opt5_1_0to62 & Pg3 +{ + SVE_prfh(sve_prfop:1, Pg3, Zn.S, sve_opt5_1_0to62); +} + +# prfh_i_p_ai.xml: PRFH (vector plus immediate) variant 64-bit element +# PATTERN xc480e000/mask=xffe0e010 + +:prfh sve_prfop, Pg3, [Zn.D^sve_opt5_1_0to62] +is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.D & sve_opt5_1_0to62 & Pg3 +{ + SVE_prfh(sve_prfop:1, Pg3, Zn.D, sve_opt5_1_0to62); +} + +# prfh_i_p_bi.xml: PRFH (scalar plus immediate) variant SVE +# PATTERN x85c02000/mask=xffc0e010 + +:prfh sve_prfop, Pg3, [Rn_GPR64xsp^sve_mul6_1_m32to31] +is sve_b_2231=0b1000010111 & sve_imm6_1621 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & sve_mul6_1_m32to31 & Pg3 +{ + SVE_prfh(sve_prfop:1, Pg3, Rn_GPR64xsp, sve_mul6_1_m32to31); +} + +# prfh_i_p_br.xml: PRFH (scalar plus scalar) variant SVE +# PATTERN x8480c000/mask=xffe0e010 + +:prfh sve_prfop, Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_prfh(sve_prfop:1, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# prfh_i_p_bz.xml: PRFH (scalar plus vector) variant 32-bit scaled offset +# PATTERN x84202000/mask=xffa0e010 + +:prfh sve_prfop, Pg3, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] +is sve_b_2331=0b100001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.S & sve_mod & Pg3 +{ + SVE_prfh(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# prfh_i_p_bz.xml: PRFH (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc4202000/mask=xffa0e010 + +:prfh sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] +is sve_b_2331=0b110001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & sve_mod & Pg3 +{ + SVE_prfh(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# prfh_i_p_bz.xml: PRFH (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc460a000/mask=xffe0e010 + +:prfh sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, "lsl #1"] +is sve_b_2131=0b11000100011 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & Pg3 +{ + SVE_prfh(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D); +} + +# prfw_i_p_ai.xml: PRFW (vector plus immediate) variant 32-bit element +# PATTERN x8500e000/mask=xffe0e010 + +:prfw sve_prfop, Pg3, [Zn.S^sve_opt5_1_0to124] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.S & sve_opt5_1_0to124 & Pg3 +{ + SVE_prfw(sve_prfop:1, Pg3, Zn.S, sve_opt5_1_0to124); +} + +# prfw_i_p_ai.xml: PRFW (vector plus immediate) variant 64-bit element +# PATTERN xc500e000/mask=xffe0e010 + +:prfw sve_prfop, Pg3, [Zn.D^sve_opt5_1_0to124] +is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.D & sve_opt5_1_0to124 & Pg3 +{ + SVE_prfw(sve_prfop:1, Pg3, Zn.D, sve_opt5_1_0to124); +} + +# prfw_i_p_bi.xml: PRFW (scalar plus immediate) variant SVE +# PATTERN x85c04000/mask=xffc0e010 + +:prfw sve_prfop, Pg3, [Rn_GPR64xsp^sve_mul6_1_m32to31] +is sve_b_2231=0b1000010111 & sve_imm6_1621 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & sve_mul6_1_m32to31 & Pg3 +{ + SVE_prfw(sve_prfop:1, Pg3, Rn_GPR64xsp, sve_mul6_1_m32to31); +} + +# prfw_i_p_br.xml: PRFW (scalar plus scalar) variant SVE +# PATTERN x8500c000/mask=xffe0e010 + +:prfw sve_prfop, Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_prfw(sve_prfop:1, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# prfw_i_p_bz.xml: PRFW (scalar plus vector) variant 32-bit scaled offset +# PATTERN x84204000/mask=xffa0e010 + +:prfw sve_prfop, Pg3, [Rn_GPR64xsp, Zm.S, sve_mod^" #2"] +is sve_b_2331=0b100001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.S & sve_mod & Pg3 +{ + SVE_prfw(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# prfw_i_p_bz.xml: PRFW (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xc4204000/mask=xffa0e010 + +:prfw sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] +is sve_b_2331=0b110001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & sve_mod & Pg3 +{ + SVE_prfw(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# prfw_i_p_bz.xml: PRFW (scalar plus vector) variant 64-bit scaled offset +# PATTERN xc460c000/mask=xffe0e010 + +:prfw sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, "lsl #2"] +is sve_b_2131=0b11000100011 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & Pg3 +{ + SVE_prfw(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D); +} + +# ptest_p_p.xml: PTEST variant SVE +# PATTERN x2550c000/mask=xffffc21f + +:ptest Pg, Pn.B +is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b01 & sve_b_1419=0b000011 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_b_03=0 & sve_b_02=0 & sve_b_01=0 & sve_b_00=0 & Pn.B & Pg +{ + SVE_ptest(Pg, Pn.B); +} + +# ptrue_p_s.xml: PTRUE, PTRUES variant Flag setting +# PATTERN x2519e000/mask=xff3ffc10 + +:ptrues Pd.T^sve_opt_pattern +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1721=0b01100 & sve_b_16=1 & sve_b_1015=0b111000 & sve_pattern_0509 & sve_b_04=0 & sve_pd_0003 & sve_pattern & Pd.T & sve_opt_pattern +{ + Pd.T = SVE_ptrues(Pd.T, sve_opt_pattern); +} + +# ptrue_p_s.xml: PTRUE, PTRUES variant Not flag setting +# PATTERN x2518e000/mask=xff3ffc10 + +:ptrue Pd.T^sve_opt_pattern +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1721=0b01100 & sve_b_16=0 & sve_b_1015=0b111000 & sve_pattern_0509 & sve_b_04=0 & sve_pd_0003 & sve_pattern & Pd.T & sve_opt_pattern +{ + Pd.T = SVE_ptrue(Pd.T, sve_opt_pattern); +} + +# punpkhi_p_p.xml: PUNPKHI, PUNPKLO variant High half +# PATTERN x05314000/mask=xfffffe10 + +:punpkhi Pd.H, Pn.B +is sve_b_1731=0b000001010011000 & sve_b_16=1 & sve_b_1015=0b010000 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.H & Pn.B +{ + Pd.H = SVE_punpkhi(Pd.H, Pn.B); +} + +# punpkhi_p_p.xml: PUNPKHI, PUNPKLO variant Low half +# PATTERN x05304000/mask=xfffffe10 + +:punpklo Pd.H, Pn.B +is sve_b_1731=0b000001010011000 & sve_b_16=0 & sve_b_1015=0b010000 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.H & Pn.B +{ + Pd.H = SVE_punpklo(Pd.H, Pn.B); +} + +# rbit_z_p_z.xml: RBIT variant SVE +# PATTERN x05278000/mask=xff3fe000 + +:rbit Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1001 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_rbit(Zd.T, Pg3_m, Zn.T); +} + +# rdffr_p_f.xml: RDFFR (unpredicated) variant SVE +# PATTERN x2519f000/mask=xfffffff0 + +:rdffr Pd.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_1021=0b011001111100 & sve_b_0409=0b000000 & sve_pd_0003 & Pd.B +{ + Pd.B = SVE_rdffr(Pd.B); +} + +# rdffr_p_p_f.xml: RDFFR, RDFFRS (predicated) variant Flag setting +# PATTERN x2558f000/mask=xfffffe10 + +:rdffrs Pd.B, Pn_z +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1021=0b011000111100 & sve_b_09=0 & sve_pg_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pn_z +{ + Pd.B = SVE_rdffrs(Pd.B, Pn_z); +} + +# rdffr_p_p_f.xml: RDFFR, RDFFRS (predicated) variant Not flag setting +# PATTERN x2518f000/mask=xfffffe10 + +:rdffr Pd.B, Pn_z +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_1021=0b011000111100 & sve_b_09=0 & sve_pg_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pn_z +{ + Pd.B = SVE_rdffr(Pd.B, Pn_z); +} + +# rdvl_r_i.xml: RDVL variant SVE +# PATTERN x04bf5000/mask=xfffff800 + +:rdvl Rd_GPR64, "#"^sve_imm6_1_m32to31 +is sve_b_2331=0b000001001 & sve_b_22=0 & sve_b_21=1 & sve_b_1720=0b1111 & sve_b_16=1 & sve_b_1115=0b01010 & sve_imm6_0510 & sve_rd_0004 & sve_imm6_1_m32to31 & Rd_GPR64 +{ + Rd_GPR64 = SVE_rdvl(Rd_GPR64, sve_imm6_1_m32to31:1); +} + +# rev_p_p.xml: REV (predicate) variant SVE +# PATTERN x05344000/mask=xff3ffe10 + +:rev Pd.T, Pn.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1021=0b110100010000 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T +{ + Pd.T = SVE_rev(Pd.T, Pn.T); +} + +# rev_z_z.xml: REV (vector) variant SVE +# PATTERN x05383800/mask=xff3ffc00 + +:rev Zd.T, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1021=0b111000001110 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T +{ + Zd.T = SVE_rev(Zd.T, Zn.T); +} + +# revb_z_z.xml: REVB, REVH, REVW variant Byte +# PATTERN x05248000/mask=xff3fe000 + +:revb Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1001 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_revb(Zd.T, Pg3_m, Zn.T); +} + +# revb_z_z.xml: REVB, REVH, REVW variant Halfword +# PATTERN x05258000/mask=xff3fe000 + +:revh Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1001 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_revh(Zd.T, Pg3_m, Zn.T); +} + +# revb_z_z.xml: REVB, REVH, REVW variant Word +# PATTERN x05268000/mask=xff3fe000 + +:revw Zd.D, Pg3_m, Zn.D +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1001 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m +{ + Zd.D = SVE_revw(Zd.D, Pg3_m, Zn.D); +} + +# sabd_z_p_zz.xml: SABD variant SVE +# PATTERN x040c0000/mask=xff3fe000 + +:sabd Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_sabd(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# saddv_r_p_z.xml: SADDV variant SVE +# PATTERN x04002000/mask=xff3fe000 + +:saddv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b000 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_saddv(Rd_FPR64, Zn.T, Pg3); +} + +# scvtf_z_p_z.xml: SCVTF variant 16-bit to half-precision +# PATTERN x6552a000/mask=xffffe000 + +:scvtf Zd.H, Pg3_m, Zn.H +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.H & Pg3_m +{ + Zd.H = SVE_scvtf(Zd.H, Pg3_m, Zn.H); +} + +# scvtf_z_p_z.xml: SCVTF variant 32-bit to half-precision +# PATTERN x6554a000/mask=xffffe000 + +:scvtf Zd.H, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.H & Pg3_m +{ + Zd.H = SVE_scvtf(Zd.H, Pg3_m, Zn.S); +} + +# scvtf_z_p_z.xml: SCVTF variant 32-bit to single-precision +# PATTERN x6594a000/mask=xffffe000 + +:scvtf Zd.S, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.S & Pg3_m +{ + Zd.S = SVE_scvtf(Zd.S, Pg3_m, Zn.S); +} + +# scvtf_z_p_z.xml: SCVTF variant 32-bit to double-precision +# PATTERN x65d0a000/mask=xffffe000 + +:scvtf Zd.D, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.D & Pg3_m +{ + Zd.D = SVE_scvtf(Zd.D, Pg3_m, Zn.S); +} + +# scvtf_z_p_z.xml: SCVTF variant 64-bit to half-precision +# PATTERN x6556a000/mask=xffffe000 + +:scvtf Zd.H, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.H & Pg3_m +{ + Zd.H = SVE_scvtf(Zd.H, Pg3_m, Zn.D); +} + +# scvtf_z_p_z.xml: SCVTF variant 64-bit to single-precision +# PATTERN x65d4a000/mask=xffffe000 + +:scvtf Zd.S, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.S & Pg3_m +{ + Zd.S = SVE_scvtf(Zd.S, Pg3_m, Zn.D); +} + +# scvtf_z_p_z.xml: SCVTF variant 64-bit to double-precision +# PATTERN x65d6a000/mask=xffffe000 + +:scvtf Zd.D, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m +{ + Zd.D = SVE_scvtf(Zd.D, Pg3_m, Zn.D); +} + +# sdiv_z_p_zz.xml: SDIV variant SVE +# PATTERN x04940000/mask=xffbfe000 + +:sdiv Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_sz_22 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T_sz & Zd.T_sz_2 & Zn.T_sz & Pg3_m +{ + Zd.T_sz = SVE_sdiv(Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz); +} + +# sdivr_z_p_zz.xml: SDIVR variant SVE +# PATTERN x04960000/mask=xffbfe000 + +:sdivr Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_sz_22 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T_sz & Zd.T_sz_2 & Zn.T_sz & Pg3_m +{ + Zd.T_sz = SVE_sdivr(Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz); +} + +# sdot_z_zzz.xml: SDOT (vectors) variant SVE +# PATTERN x44800000/mask=xffa0fc00 + +:sdot Zd.T_sz, Zn.Tb_sz, Zm.Tb_sz +is sve_b_2431=0b01000100 & sve_b_23=1 & sve_sz_22 & sve_b_21=0 & sve_zm_1620 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zm.Tb_sz & Zd.T_sz & Zn.Tb_sz +{ + Zd.T_sz = SVE_sdot(Zd.T_sz, Zn.Tb_sz, Zm.Tb_sz); +} + +# sdot_z_zzzi.xml: SDOT (indexed) variant 32-bit +# PATTERN x44a00000/mask=xffe0fc00 + +:sdot Zd.S, Zn.B, Zm3.B[sve_i2_1920] +is sve_b_2431=0b01000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zd.S & Zn.B & Zm3.B +{ + Zd.S = SVE_sdot(Zd.S, Zn.B, Zm3.B, sve_i2_1920:1); +} + +# sdot_z_zzzi.xml: SDOT (indexed) variant 64-bit +# PATTERN x44e00000/mask=xffe0fc00 + +:sdot Zd.D, Zn.H, Zm4.H[sve_i1_20] +is sve_b_2431=0b01000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zd.D & Zn.H & Zm4.H +{ + Zd.D = SVE_sdot(Zd.D, Zn.H, Zm4.H, sve_i1_20:1); +} + +# sel_p_p_pp.xml: SEL (predicates) variant SVE +# PATTERN x25004210/mask=xfff0c210 + +:sel Pd.B, Pg, Pn.B, Pm.B +is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pn.B & Pm.B & Pg +{ + Pd.B = SVE_sel(Pd.B, Pg, Pn.B, Pm.B); +} + +# sel_z_p_zz.xml: SEL (vectors) variant SVE +# PATTERN x0520c000/mask=xff20c000 + +:sel Zd.T, Pg, Zn.T, Zm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1415=0b11 & sve_pg_1013 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T & Pg +{ + Zd.T = SVE_sel(Zd.T, Pg, Zn.T, Zm.T); +} + +# setffr_f.xml: SETFFR variant SVE +# PATTERN x252c9000/mask=xffffffff + +:setffr "" +is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b10 & sve_b_1019=0b1100100100 & sve_b_0409=0b000000 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 +unimpl + +# smax_z_p_zz.xml: SMAX (vectors) variant SVE +# PATTERN x04080000/mask=xff3fe000 + +:smax Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_smax(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# smax_z_zi.xml: SMAX (immediate) variant SVE +# PATTERN x2528c000/mask=xff3fe000 + +:smax Zd.T, Zd.T_2, "#"^sve_imm8_1_m128to127 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b101 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1415=0b11 & sve_b_13=0 & sve_imm8_0512 & sve_zdn_0004 & Zd.T & Zd.T_2 & sve_imm8_1_m128to127 +{ + Zd.T = SVE_smax(Zd.T, Zd.T_2, sve_imm8_1_m128to127:1); +} + +# smaxv_r_p_z.xml: SMAXV variant SVE +# PATTERN x04082000/mask=xff3fe000 + +:smaxv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_smaxv(Rd_FPR8, Pg3, Zn.T); +} + +# smaxv_r_p_z.xml: SMAXV variant SVE +# PATTERN x04082000/mask=xff3fe000 + +:smaxv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_smaxv(Rd_FPR32, Pg3, Zn.T); +} + +# smaxv_r_p_z.xml: SMAXV variant SVE +# PATTERN x04082000/mask=xff3fe000 + +:smaxv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_smaxv(Rd_FPR16, Pg3, Zn.T); +} + +# smaxv_r_p_z.xml: SMAXV variant SVE +# PATTERN x04082000/mask=xff3fe000 + +:smaxv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_smaxv(Rd_FPR64, Pg3, Zn.T); +} + +# smin_z_p_zz.xml: SMIN (vectors) variant SVE +# PATTERN x040a0000/mask=xff3fe000 + +:smin Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_smin(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# smin_z_zi.xml: SMIN (immediate) variant SVE +# PATTERN x252ac000/mask=xff3fe000 + +:smin Zd.T, Zd.T_2, "#"^sve_imm8_1_m128to127 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b101 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1415=0b11 & sve_b_13=0 & sve_imm8_0512 & sve_zdn_0004 & Zd.T & Zd.T_2 & sve_imm8_1_m128to127 +{ + Zd.T = SVE_smin(Zd.T, Zd.T_2, sve_imm8_1_m128to127:1); +} + +# sminv_r_p_z.xml: SMINV variant SVE +# PATTERN x040a2000/mask=xff3fe000 + +:sminv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_sminv(Rd_FPR8, Pg3, Zn.T); +} + +# sminv_r_p_z.xml: SMINV variant SVE +# PATTERN x040a2000/mask=xff3fe000 + +:sminv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_sminv(Rd_FPR32, Pg3, Zn.T); +} + +# sminv_r_p_z.xml: SMINV variant SVE +# PATTERN x040a2000/mask=xff3fe000 + +:sminv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_sminv(Rd_FPR16, Pg3, Zn.T); +} + +# sminv_r_p_z.xml: SMINV variant SVE +# PATTERN x040a2000/mask=xff3fe000 + +:sminv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_sminv(Rd_FPR64, Pg3, Zn.T); +} + +# smulh_z_p_zz.xml: SMULH variant SVE +# PATTERN x04120000/mask=xff3fe000 + +:smulh Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_smulh(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# splice_z_p_zz.xml: SPLICE variant SVE +# PATTERN x052c8000/mask=xff3fe000 + +:splice Zd.T, Pg3, Zd.T_2, Zn.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1321=0b101100100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3 +{ + Zd.T = SVE_splice(Zd.T, Pg3, Zd.T_2, Zn.T); +} + +# sqadd_z_zi.xml: SQADD (immediate) variant SVE +# PATTERN x2524c000/mask=xff3fc000 + +:sqadd Zd.T, Zd.T_2, sve_shf8_1_0to255 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 +{ + Zd.T = SVE_sqadd(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); +} + +# sqadd_z_zz.xml: SQADD (vectors) variant SVE +# PATTERN x04201000/mask=xff20fc00 + +:sqadd Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b10 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_sqadd(Zd.T, Zn.T, Zm.T); +} + +# sqdecb_r_rs.xml: SQDECB variant 32-bit +# PATTERN x0420f800/mask=xfff0fc00 + +:sqdecb Rd_GPR64, Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqdecb(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqdecb_r_rs.xml: SQDECB variant 64-bit +# PATTERN x0430f800/mask=xfff0fc00 + +:sqdecb Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqdecb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqdecd_r_rs.xml: SQDECD (scalar) variant 32-bit +# PATTERN x04e0f800/mask=xfff0fc00 + +:sqdecd Rd_GPR64, Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqdecd(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqdecd_r_rs.xml: SQDECD (scalar) variant 64-bit +# PATTERN x04f0f800/mask=xfff0fc00 + +:sqdecd Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqdecd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqdecd_z_zs.xml: SQDECD (vector) variant SVE +# PATTERN x04e0c800/mask=xfff0fc00 + +:sqdecd Zd.D^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.D = SVE_sqdecd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqdech_r_rs.xml: SQDECH (scalar) variant 32-bit +# PATTERN x0460f800/mask=xfff0fc00 + +:sqdech Rd_GPR64, Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqdech(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqdech_r_rs.xml: SQDECH (scalar) variant 64-bit +# PATTERN x0470f800/mask=xfff0fc00 + +:sqdech Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqdech(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqdech_z_zs.xml: SQDECH (vector) variant SVE +# PATTERN x0460c800/mask=xfff0fc00 + +:sqdech Zd.H^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.H = SVE_sqdech(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqdecp_r_p_r.xml: SQDECP (scalar) variant 32-bit +# PATTERN x252a8800/mask=xff3ffe00 + +:sqdecp Rd_GPR64, Pn.T, Rd_GPR32 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=0 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR32 & Rd_GPR64 +{ + Rd_GPR64 = SVE_sqdecp(Rd_GPR64, Pn.T, Rd_GPR32); +} + +# sqdecp_r_p_r.xml: SQDECP (scalar) variant 64-bit +# PATTERN x252a8c00/mask=xff3ffe00 + +:sqdecp Rd_GPR64, Pn.T +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=0 & sve_b_1115=0b10001 & sve_b_10=1 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 +{ + Rd_GPR64 = SVE_sqdecp(Rd_GPR64, Pn.T); +} + +# sqdecp_z_p_z.xml: SQDECP (vector) variant SVE +# PATTERN x252a8000/mask=xff3ffe00 + +:sqdecp Zd.T, Pn +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=0 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn +{ + Zd.T = SVE_sqdecp(Zd.T, Pn); +} + +# sqdecw_r_rs.xml: SQDECW (scalar) variant 32-bit +# PATTERN x04a0f800/mask=xfff0fc00 + +:sqdecw Rd_GPR64, Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqdecw(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqdecw_r_rs.xml: SQDECW (scalar) variant 64-bit +# PATTERN x04b0f800/mask=xfff0fc00 + +:sqdecw Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqdecw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqdecw_z_zs.xml: SQDECW (vector) variant SVE +# PATTERN x04a0c800/mask=xfff0fc00 + +:sqdecw Zd.S^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.S = SVE_sqdecw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqincb_r_rs.xml: SQINCB variant 32-bit +# PATTERN x0420f000/mask=xfff0fc00 + +:sqincb Rd_GPR64, Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqincb(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqincb_r_rs.xml: SQINCB variant 64-bit +# PATTERN x0430f000/mask=xfff0fc00 + +:sqincb Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqincb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqincd_r_rs.xml: SQINCD (scalar) variant 32-bit +# PATTERN x04e0f000/mask=xfff0fc00 + +:sqincd Rd_GPR64, Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqincd(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqincd_r_rs.xml: SQINCD (scalar) variant 64-bit +# PATTERN x04f0f000/mask=xfff0fc00 + +:sqincd Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqincd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqincd_z_zs.xml: SQINCD (vector) variant SVE +# PATTERN x04e0c000/mask=xfff0fc00 + +:sqincd Zd.D^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.D = SVE_sqincd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqinch_r_rs.xml: SQINCH (scalar) variant 32-bit +# PATTERN x0460f000/mask=xfff0fc00 + +:sqinch Rd_GPR64, Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqinch(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqinch_r_rs.xml: SQINCH (scalar) variant 64-bit +# PATTERN x0470f000/mask=xfff0fc00 + +:sqinch Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqinch(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqinch_z_zs.xml: SQINCH (vector) variant SVE +# PATTERN x0460c000/mask=xfff0fc00 + +:sqinch Zd.H^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.H = SVE_sqinch(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqincp_r_p_r.xml: SQINCP (scalar) variant 32-bit +# PATTERN x25288800/mask=xff3ffe00 + +:sqincp Rd_GPR64, Pn.T, Rd_GPR32 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=0 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR32 & Rd_GPR64 +{ + Rd_GPR64 = SVE_sqincp(Rd_GPR64, Pn.T, Rd_GPR32); +} + +# sqincp_r_p_r.xml: SQINCP (scalar) variant 64-bit +# PATTERN x25288c00/mask=xff3ffe00 + +:sqincp Rd_GPR64, Pn.T +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=0 & sve_b_1115=0b10001 & sve_b_10=1 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 +{ + Rd_GPR64 = SVE_sqincp(Rd_GPR64, Pn.T); +} + +# sqincp_z_p_z.xml: SQINCP (vector) variant SVE +# PATTERN x25288000/mask=xff3ffe00 + +:sqincp Zd.T, Pn +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=0 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn +{ + Zd.T = SVE_sqincp(Zd.T, Pn); +} + +# sqincw_r_rs.xml: SQINCW (scalar) variant 32-bit +# PATTERN x04a0f000/mask=xfff0fc00 + +:sqincw Rd_GPR64, Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqincw(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqincw_r_rs.xml: SQINCW (scalar) variant 64-bit +# PATTERN x04b0f000/mask=xfff0fc00 + +:sqincw Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_sqincw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqincw_z_zs.xml: SQINCW (vector) variant SVE +# PATTERN x04a0c000/mask=xfff0fc00 + +:sqincw Zd.S^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.S = SVE_sqincw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# sqsub_z_zi.xml: SQSUB (immediate) variant SVE +# PATTERN x2526c000/mask=xff3fc000 + +:sqsub Zd.T, Zd.T_2, sve_shf8_1_0to255 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 +{ + Zd.T = SVE_sqsub(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); +} + +# sqsub_z_zz.xml: SQSUB (vectors) variant SVE +# PATTERN x04201800/mask=xff20fc00 + +:sqsub Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b11 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_sqsub(Zd.T, Zn.T, Zm.T); +} + +# st1b_z_p_ai.xml: ST1B (vector plus immediate) variant 32-bit element +# PATTERN xe460a000/mask=xffe0e000 + +:st1b "{"^Zd.S^"}", Pg3, [Zn.S^sve_opt5_1_0to31] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b11 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Zd.S & sve_opt5_1_0to31 & Pg3 +{ + SVE_st1b(Zd.S, Pg3, Zn.S, sve_opt5_1_0to31); +} + +# st1b_z_p_ai.xml: ST1B (vector plus immediate) variant 64-bit element +# PATTERN xe440a000/mask=xffe0e000 + +:st1b "{"^Zd.D^"}", Pg3, [Zn.D^sve_opt5_1_0to31] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Zd.D & sve_opt5_1_0to31 & Pg3 +{ + SVE_st1b(Zd.D, Pg3, Zn.D, sve_opt5_1_0to31); +} + +# st1b_z_p_bi.xml: ST1B (scalar plus immediate) variant SVE +# PATTERN xe400e000/mask=xff90e000 + +:st1b "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_size_2122 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & sve_mul4_1_m8to7 & Pg3 +{ + SVE_st1b(Zd.T_size_2122, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# st1b_z_p_br.xml: ST1B (scalar plus scalar) variant SVE +# PATTERN xe4004000/mask=xff80e000 + +:st1b "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_size_2122 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st1b(Zd.T_size_2122, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st1b_z_p_bz.xml: ST1B (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xe4008000/mask=xffe0a000 + +:st1b "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 +{ + SVE_st1b(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# st1b_z_p_bz.xml: ST1B (scalar plus vector) variant 32-bit unscaled offset +# PATTERN xe4408000/mask=xffe0a000 + +:st1b "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.S & Zd.S & sve_mod & Pg3 +{ + SVE_st1b(Zd.S, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# st1b_z_p_bz.xml: ST1B (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xe400a000/mask=xffe0e000 + +:st1b "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 +{ + SVE_st1b(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); +} + +# st1d_z_p_ai.xml: ST1D (vector plus immediate) variant SVE +# PATTERN xe5c0a000/mask=xffe0e000 + +:st1d "{"^Zd.D^"}", Pg3, [Zn.D^sve_opt5_1_0to248] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Zd.D & sve_opt5_1_0to248 & Pg3 +{ + SVE_st1d(Zd.D, Pg3, Zn.D, sve_opt5_1_0to248); +} + +# st1d_z_p_bi.xml: ST1D (scalar plus immediate) variant SVE +# PATTERN xe580e000/mask=xff90e000 + +:st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_size_2122 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.D & sve_mul4_1_m8to7 & Pg3 +{ + SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# st1d_z_p_br.xml: ST1D (scalar plus scalar) variant SVE +# PATTERN xe5804000/mask=xff80e000 + +:st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_size_2122 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.D & Rm_GPR64 & Pg3 +{ + SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st1d_z_p_bz.xml: ST1D (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xe5a08000/mask=xffe0a000 + +:st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #3"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 +{ + SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# st1d_z_p_bz.xml: ST1D (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xe5808000/mask=xffe0a000 + +:st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 +{ + SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# st1d_z_p_bz.xml: ST1D (scalar plus vector) variant 64-bit scaled offset +# PATTERN xe5a0a000/mask=xffe0e000 + +:st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, "lsl #3"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 +{ + SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); +} + +# st1d_z_p_bz.xml: ST1D (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xe580a000/mask=xffe0e000 + +:st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 +{ + SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); +} + +# st1h_z_p_ai.xml: ST1H (vector plus immediate) variant 32-bit element +# PATTERN xe4e0a000/mask=xffe0e000 + +:st1h "{"^Zd.S^"}", Pg3, [Zn.S^sve_opt5_1_0to62] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Zd.S & sve_opt5_1_0to62 & Pg3 +{ + SVE_st1h(Zd.S, Pg3, Zn.S, sve_opt5_1_0to62); +} + +# st1h_z_p_ai.xml: ST1H (vector plus immediate) variant 64-bit element +# PATTERN xe4c0a000/mask=xffe0e000 + +:st1h "{"^Zd.D^"}", Pg3, [Zn.D^sve_opt5_1_0to62] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Zd.D & sve_opt5_1_0to62 & Pg3 +{ + SVE_st1h(Zd.D, Pg3, Zn.D, sve_opt5_1_0to62); +} + +# st1h_z_p_bi.xml: ST1H (scalar plus immediate) variant SVE +# PATTERN xe480e000/mask=xff90e000 + +:st1h "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_size_2122 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & sve_mul4_1_m8to7 & Pg3 +{ + SVE_st1h(Zd.T_size_2122, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# st1h_z_p_br.xml: ST1H (scalar plus scalar) variant SVE +# PATTERN xe4804000/mask=xff80e000 + +:st1h "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_size_2122 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st1h(Zd.T_size_2122, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 32-bit scaled offset +# PATTERN xe4e08000/mask=xffe0a000 + +:st1h "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.S & Zd.S & sve_mod & Pg3 +{ + SVE_st1h(Zd.S, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xe4a08000/mask=xffe0a000 + +:st1h "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 +{ + SVE_st1h(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xe4808000/mask=xffe0a000 + +:st1h "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 +{ + SVE_st1h(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 32-bit unscaled offset +# PATTERN xe4c08000/mask=xffe0a000 + +:st1h "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.S & Zd.S & sve_mod & Pg3 +{ + SVE_st1h(Zd.S, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 64-bit scaled offset +# PATTERN xe4a0a000/mask=xffe0e000 + +:st1h "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, "lsl #1"] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 +{ + SVE_st1h(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); +} + +# st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xe480a000/mask=xffe0e000 + +:st1h "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 +{ + SVE_st1h(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); +} + +# st1w_z_p_ai.xml: ST1W (vector plus immediate) variant 32-bit element +# PATTERN xe560a000/mask=xffe0e000 + +:st1w "{"^Zd.S^"}", Pg3, [Zn.S^sve_opt5_1_0to124] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Zd.S & sve_opt5_1_0to124 & Pg3 +{ + SVE_st1w(Zd.S, Pg3, Zn.S, sve_opt5_1_0to124); +} + +# st1w_z_p_ai.xml: ST1W (vector plus immediate) variant 64-bit element +# PATTERN xe540a000/mask=xffe0e000 + +:st1w "{"^Zd.D^"}", Pg3, [Zn.D^sve_opt5_1_0to124] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Zd.D & sve_opt5_1_0to124 & Pg3 +{ + SVE_st1w(Zd.D, Pg3, Zn.D, sve_opt5_1_0to124); +} + +# st1w_z_p_bi.xml: ST1W (scalar plus immediate) variant SVE +# PATTERN xe500e000/mask=xff90e000 + +:st1w "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_size_2122 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & sve_mul4_1_m8to7 & Pg3 +{ + SVE_st1w(Zd.T_size_2122, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# st1w_z_p_br.xml: ST1W (scalar plus scalar) variant SVE +# PATTERN xe5004000/mask=xff80e000 + +:st1w "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_size_2122 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st1w(Zd.T_size_2122, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 32-bit scaled offset +# PATTERN xe5608000/mask=xffe0a000 + +:st1w "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Zm.S, sve_mod^" #2"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.S & Zd.S & sve_mod & Pg3 +{ + SVE_st1w(Zd.S, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 32-bit unpacked scaled offset +# PATTERN xe5208000/mask=xffe0a000 + +:st1w "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 +{ + SVE_st1w(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 32-bit unpacked unscaled offset +# PATTERN xe5008000/mask=xffe0a000 + +:st1w "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 +{ + SVE_st1w(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); +} + +# st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 32-bit unscaled offset +# PATTERN xe5408000/mask=xffe0a000 + +:st1w "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Zm.S, sve_mod] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.S & Zd.S & sve_mod & Pg3 +{ + SVE_st1w(Zd.S, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); +} + +# st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 64-bit scaled offset +# PATTERN xe520a000/mask=xffe0e000 + +:st1w "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, "lsl #2"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 +{ + SVE_st1w(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); +} + +# st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 64-bit unscaled offset +# PATTERN xe500a000/mask=xffe0e000 + +:st1w "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 +{ + SVE_st1w(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); +} + +# st2b_z_p_bi.xml: ST2B (scalar plus immediate) variant SVE +# PATTERN xe430e000/mask=xfff0e000 + +:st2b "{"^Zt.B, Ztt.B^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m16to14] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zt.B & Rn_GPR64xsp & sve_mul4_1_m16to14 & Pg3 +{ + SVE_st2b(Zt.B, Ztt.B, Pg3, Rn_GPR64xsp, sve_mul4_1_m16to14); +} + +# st2b_z_p_br.xml: ST2B (scalar plus scalar) variant SVE +# PATTERN xe4206000/mask=xffe0e000 + +:st2b "{"^Zt.B, Ztt.B^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zt.B & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st2b(Zt.B, Ztt.B, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st2d_z_p_bi.xml: ST2D (scalar plus immediate) variant SVE +# PATTERN xe5b0e000/mask=xfff0e000 + +:st2d "{"^Zt.D, Ztt.D^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m16to14] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zt.D & Rn_GPR64xsp & sve_mul4_1_m16to14 & Pg3 +{ + SVE_st2d(Zt.D, Ztt.D, Pg3, Rn_GPR64xsp, sve_mul4_1_m16to14); +} + +# st2d_z_p_br.xml: ST2D (scalar plus scalar) variant SVE +# PATTERN xe5a06000/mask=xffe0e000 + +:st2d "{"^Zt.D, Ztt.D^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zt.D & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st2d(Zt.D, Ztt.D, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st2h_z_p_bi.xml: ST2H (scalar plus immediate) variant SVE +# PATTERN xe4b0e000/mask=xfff0e000 + +:st2h "{"^Zt.H, Ztt.H^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m16to14] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zt.H & Rn_GPR64xsp & sve_mul4_1_m16to14 & Pg3 +{ + SVE_st2h(Zt.H, Ztt.H, Pg3, Rn_GPR64xsp, sve_mul4_1_m16to14); +} + +# st2h_z_p_br.xml: ST2H (scalar plus scalar) variant SVE +# PATTERN xe4a06000/mask=xffe0e000 + +:st2h "{"^Zt.H, Ztt.H^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zt.H & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st2h(Zt.H, Ztt.H, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st2w_z_p_bi.xml: ST2W (scalar plus immediate) variant SVE +# PATTERN xe530e000/mask=xfff0e000 + +:st2w "{"^Zt.S, Ztt.S^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m16to14] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Rn_GPR64xsp & sve_mul4_1_m16to14 & Pg3 +{ + SVE_st2w(Zt.S, Ztt.S, Pg3, Rn_GPR64xsp, sve_mul4_1_m16to14); +} + +# st2w_z_p_br.xml: ST2W (scalar plus scalar) variant SVE +# PATTERN xe5206000/mask=xffe0e000 + +:st2w "{"^Zt.S, Ztt.S^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st2w(Zt.S, Ztt.S, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st3b_z_p_bi.xml: ST3B (scalar plus immediate) variant SVE +# PATTERN xe450e000/mask=xfff0e000 + +:st3b "{"^Zt.B, Ztt.B, Zttt.B^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m24to21] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Rn_GPR64xsp & sve_mul4_1_m24to21 & Pg3 +{ + SVE_st3b(Zt.B, Ztt.B, Zttt.B, Pg3, Rn_GPR64xsp, sve_mul4_1_m24to21); +} + +# st3b_z_p_br.xml: ST3B (scalar plus scalar) variant SVE +# PATTERN xe4406000/mask=xffe0e000 + +:st3b "{"^Zt.B, Ztt.B, Zttt.B^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st3b(Zt.B, Ztt.B, Zttt.B, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st3d_z_p_bi.xml: ST3D (scalar plus immediate) variant SVE +# PATTERN xe5d0e000/mask=xfff0e000 + +:st3d "{"^Zt.D, Ztt.D, Zttt.D^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m24to21] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Rn_GPR64xsp & sve_mul4_1_m24to21 & Pg3 +{ + SVE_st3d(Zt.D, Ztt.D, Zttt.D, Pg3, Rn_GPR64xsp, sve_mul4_1_m24to21); +} + +# st3d_z_p_br.xml: ST3D (scalar plus scalar) variant SVE +# PATTERN xe5c06000/mask=xffe0e000 + +:st3d "{"^Zt.D, Ztt.D, Zttt.D^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st3d(Zt.D, Ztt.D, Zttt.D, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st3h_z_p_bi.xml: ST3H (scalar plus immediate) variant SVE +# PATTERN xe4d0e000/mask=xfff0e000 + +:st3h "{"^Zt.H, Ztt.H, Zttt.H^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m24to21] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Rn_GPR64xsp & sve_mul4_1_m24to21 & Pg3 +{ + SVE_st3h(Zt.H, Ztt.H, Zttt.H, Pg3, Rn_GPR64xsp, sve_mul4_1_m24to21); +} + +# st3h_z_p_br.xml: ST3H (scalar plus scalar) variant SVE +# PATTERN xe4c06000/mask=xffe0e000 + +:st3h "{"^Zt.H, Ztt.H, Zttt.H^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st3h(Zt.H, Ztt.H, Zttt.H, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st3w_z_p_bi.xml: ST3W (scalar plus immediate) variant SVE +# PATTERN xe550e000/mask=xfff0e000 + +:st3w "{"^Zt.S, Ztt.S, Zttt.S^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m24to21] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Rn_GPR64xsp & sve_mul4_1_m24to21 & Pg3 +{ + SVE_st3w(Zt.S, Ztt.S, Zttt.S, Pg3, Rn_GPR64xsp, sve_mul4_1_m24to21); +} + +# st3w_z_p_br.xml: ST3W (scalar plus scalar) variant SVE +# PATTERN xe5406000/mask=xffe0e000 + +:st3w "{"^Zt.S, Ztt.S, Zttt.S^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st3w(Zt.S, Ztt.S, Zttt.S, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st4b_z_p_bi.xml: ST4B (scalar plus immediate) variant SVE +# PATTERN xe470e000/mask=xfff0e000 + +:st4b "{"^Zt.B, Ztt.B, Zttt.B, Ztttt.B^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m32to28] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b11 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Ztttt.B & Rn_GPR64xsp & sve_mul4_1_m32to28 & Pg3 +{ + SVE_st4b(Zt.B, Ztt.B, Zttt.B, Ztttt.B, Pg3, Rn_GPR64xsp, sve_mul4_1_m32to28); +} + +# st4b_z_p_br.xml: ST4B (scalar plus scalar) variant SVE +# PATTERN xe4606000/mask=xffe0e000 + +:st4b "{"^Zt.B, Ztt.B, Zttt.B, Ztttt.B^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Ztttt.B & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st4b(Zt.B, Ztt.B, Zttt.B, Ztttt.B, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st4d_z_p_bi.xml: ST4D (scalar plus immediate) variant SVE +# PATTERN xe5f0e000/mask=xfff0e000 + +:st4d "{"^Zt.D, Ztt.D, Zttt.D, Ztttt.D^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m32to28] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Ztttt.D & Rn_GPR64xsp & sve_mul4_1_m32to28 & Pg3 +{ + SVE_st4d(Zt.D, Ztt.D, Zttt.D, Ztttt.D, Pg3, Rn_GPR64xsp, sve_mul4_1_m32to28); +} + +# st4d_z_p_br.xml: ST4D (scalar plus scalar) variant SVE +# PATTERN xe5e06000/mask=xffe0e000 + +:st4d "{"^Zt.D, Ztt.D, Zttt.D, Ztttt.D^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Ztttt.D & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st4d(Zt.D, Ztt.D, Zttt.D, Ztttt.D, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st4h_z_p_bi.xml: ST4H (scalar plus immediate) variant SVE +# PATTERN xe4f0e000/mask=xfff0e000 + +:st4h "{"^Zt.H, Ztt.H, Zttt.H, Ztttt.H^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m32to28] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Ztttt.H & Rn_GPR64xsp & sve_mul4_1_m32to28 & Pg3 +{ + SVE_st4h(Zt.H, Ztt.H, Zttt.H, Ztttt.H, Pg3, Rn_GPR64xsp, sve_mul4_1_m32to28); +} + +# st4h_z_p_br.xml: ST4H (scalar plus scalar) variant SVE +# PATTERN xe4e06000/mask=xffe0e000 + +:st4h "{"^Zt.H, Ztt.H, Zttt.H, Ztttt.H^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Ztttt.H & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st4h(Zt.H, Ztt.H, Zttt.H, Ztttt.H, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# st4w_z_p_bi.xml: ST4W (scalar plus immediate) variant SVE +# PATTERN xe570e000/mask=xfff0e000 + +:st4w "{"^Zt.S, Ztt.S, Zttt.S, Ztttt.S^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m32to28] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Ztttt.S & Rn_GPR64xsp & sve_mul4_1_m32to28 & Pg3 +{ + SVE_st4w(Zt.S, Ztt.S, Zttt.S, Ztttt.S, Pg3, Rn_GPR64xsp, sve_mul4_1_m32to28); +} + +# st4w_z_p_br.xml: ST4W (scalar plus scalar) variant SVE +# PATTERN xe5606000/mask=xffe0e000 + +:st4w "{"^Zt.S, Ztt.S, Zttt.S, Ztttt.S^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Ztttt.S & Rn_GPR64xsp & Rm_GPR64 & Pg3 +{ + SVE_st4w(Zt.S, Ztt.S, Zttt.S, Ztttt.S, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# stnt1b_z_p_bi.xml: STNT1B (scalar plus immediate) variant SVE +# PATTERN xe410e000/mask=xfff0e000 + +:stnt1b "{"^Zd.B^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2022=0b001 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.B & sve_mul4_1_m8to7 & Pg3 +{ + SVE_stnt1b(Zd.B, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# stnt1b_z_p_br.xml: STNT1B (scalar plus scalar) variant SVE +# PATTERN xe4006000/mask=xffe0e000 + +:stnt1b "{"^Zd.B^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.B & Rm_GPR64 & Pg3 +{ + SVE_stnt1b(Zd.B, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# stnt1d_z_p_bi.xml: STNT1D (scalar plus immediate) variant SVE +# PATTERN xe590e000/mask=xfff0e000 + +:stnt1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2022=0b001 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.D & sve_mul4_1_m8to7 & Pg3 +{ + SVE_stnt1d(Zd.D, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# stnt1d_z_p_br.xml: STNT1D (scalar plus scalar) variant SVE +# PATTERN xe5806000/mask=xffe0e000 + +:stnt1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.D & Rm_GPR64 & Pg3 +{ + SVE_stnt1d(Zd.D, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# stnt1h_z_p_bi.xml: STNT1H (scalar plus immediate) variant SVE +# PATTERN xe490e000/mask=xfff0e000 + +:stnt1h "{"^Zd.H^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2022=0b001 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.H & sve_mul4_1_m8to7 & Pg3 +{ + SVE_stnt1h(Zd.H, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# stnt1h_z_p_br.xml: STNT1H (scalar plus scalar) variant SVE +# PATTERN xe4806000/mask=xffe0e000 + +:stnt1h "{"^Zd.H^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] +is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.H & Rm_GPR64 & Pg3 +{ + SVE_stnt1h(Zd.H, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# stnt1w_z_p_bi.xml: STNT1W (scalar plus immediate) variant SVE +# PATTERN xe510e000/mask=xfff0e000 + +:stnt1w "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2022=0b001 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.S & sve_mul4_1_m8to7 & Pg3 +{ + SVE_stnt1w(Zd.S, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); +} + +# stnt1w_z_p_br.xml: STNT1W (scalar plus scalar) variant SVE +# PATTERN xe5006000/mask=xffe0e000 + +:stnt1w "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] +is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.S & Rm_GPR64 & Pg3 +{ + SVE_stnt1w(Zd.S, Pg3, Rn_GPR64xsp, Rm_GPR64); +} + +# str_p_bi.xml: STR (predicate) variant SVE +# PATTERN xe5800000/mask=xffc0e010 + +:str Pd, [Rn_GPR64xsp^sve_mul9_2_m256to255] +is sve_b_2231=0b1110010110 & sve_imm9h_1621 & sve_b_1315=0b000 & sve_imm9l_1012 & sve_rn_0509 & sve_b_04=0 & sve_pt_0003 & Rn_GPR64xsp & sve_mul9_2_m256to255 & Pd +{ + SVE_str(Pd, Rn_GPR64xsp, sve_mul9_2_m256to255); +} + +# str_z_bi.xml: STR (vector) variant SVE +# PATTERN xe5804000/mask=xffc0e000 + +:str Zd, [Rn_GPR64xsp^sve_mul9_2_m256to255] +is sve_b_2231=0b1110010110 & sve_imm9h_1621 & sve_b_1315=0b010 & sve_imm9l_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & sve_mul9_2_m256to255 & Zd +{ + SVE_str(Zd, Rn_GPR64xsp, sve_mul9_2_m256to255); +} + +# sub_z_p_zz.xml: SUB (vectors, predicated) variant SVE +# PATTERN x04010000/mask=xff3fe000 + +:sub Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_sub(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# sub_z_zi.xml: SUB (immediate) variant SVE +# PATTERN x2521c000/mask=xff3fc000 + +:sub Zd.T, Zd.T_2, sve_shf8_1_0to255 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 +{ + Zd.T = SVE_sub(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); +} + +# sub_z_zz.xml: SUB (vectors, unpredicated) variant SVE +# PATTERN x04200400/mask=xff20fc00 + +:sub Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b00 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_sub(Zd.T, Zn.T, Zm.T); +} + +# subr_z_p_zz.xml: SUBR (vectors) variant SVE +# PATTERN x04030000/mask=xff3fe000 + +:subr Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_subr(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# subr_z_zi.xml: SUBR (immediate) variant SVE +# PATTERN x2523c000/mask=xff3fc000 + +:subr Zd.T, Zd.T_2, sve_shf8_1_0to255 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 +{ + Zd.T = SVE_subr(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); +} + +# sunpkhi_z_z.xml: SUNPKHI, SUNPKLO variant High half +# PATTERN x05313800/mask=xff3ffc00 + +:sunpkhi Zd.T, Zn.Tb +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1100 & sve_b_17=0 & sve_b_16=1 & sve_b_1015=0b001110 & sve_zn_0509 & sve_zd_0004 & Zn.Tb & Zd.T +{ + Zd.T = SVE_sunpkhi(Zd.T, Zn.Tb); +} + +# sunpkhi_z_z.xml: SUNPKHI, SUNPKLO variant Low half +# PATTERN x05303800/mask=xff3ffc00 + +:sunpklo Zd.T, Zn.Tb +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1100 & sve_b_17=0 & sve_b_16=0 & sve_b_1015=0b001110 & sve_zn_0509 & sve_zd_0004 & Zn.Tb & Zd.T +{ + Zd.T = SVE_sunpklo(Zd.T, Zn.Tb); +} + +# sxtb_z_p_z.xml: SXTB, SXTH, SXTW variant Byte +# PATTERN x0410a000/mask=xff3fe000 + +:sxtb Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_sxtb(Zd.T, Pg3_m, Zn.T); +} + +# sxtb_z_p_z.xml: SXTB, SXTH, SXTW variant Halfword +# PATTERN x0412a000/mask=xff3fe000 + +:sxth Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_sxth(Zd.T, Pg3_m, Zn.T); +} + +# sxtb_z_p_z.xml: SXTB, SXTH, SXTW variant Word +# PATTERN x0414a000/mask=xff3fe000 + +:sxtw Zd.D, Pg3_m, Zn.D +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m +{ + Zd.D = SVE_sxtw(Zd.D, Pg3_m, Zn.D); +} + +# tbl_z_zz.xml: TBL variant SVE +# PATTERN x05203000/mask=xff20fc00 + +:tbl Zd.T, "{"^Zn.T^"}", Zm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_tbl(Zd.T, Zn.T, Zm.T); +} + +# trn1_p_pp.xml: TRN1, TRN2 (predicates) variant Even +# PATTERN x05205000/mask=xff30fe10 + +:trn1 Pd.T, Pn.T, Pm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=1 & sve_b_11=0 & sve_b_10=0 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T +{ + Pd.T = SVE_trn1(Pd.T, Pn.T, Pm.T); +} + +# trn1_p_pp.xml: TRN1, TRN2 (predicates) variant Odd +# PATTERN x05205400/mask=xff30fe10 + +:trn2 Pd.T, Pn.T, Pm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=1 & sve_b_11=0 & sve_b_10=1 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T +{ + Pd.T = SVE_trn2(Pd.T, Pn.T, Pm.T); +} + +# trn1_z_zz.xml: TRN1, TRN2 (vectors) variant Even +# PATTERN x05207000/mask=xff20fc00 + +:trn1 Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b10 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_trn1(Zd.T, Zn.T, Zm.T); +} + +# trn1_z_zz.xml: TRN1, TRN2 (vectors) variant Odd +# PATTERN x05207400/mask=xff20fc00 + +:trn2 Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b10 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_trn2(Zd.T, Zn.T, Zm.T); +} + +# uabd_z_p_zz.xml: UABD variant SVE +# PATTERN x040d0000/mask=xff3fe000 + +:uabd Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_uabd(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# uaddv_r_p_z.xml: UADDV variant SVE +# PATTERN x04012000/mask=xff3fe000 + +:uaddv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b000 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_uaddv(Rd_FPR64, Zn.T, Pg3); +} + +# ucvtf_z_p_z.xml: UCVTF variant 16-bit to half-precision +# PATTERN x6553a000/mask=xffffe000 + +:ucvtf Zd.H, Pg3_m, Zn.H +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.H & Pg3_m +{ + Zd.H = SVE_ucvtf(Zd.H, Pg3_m, Zn.H); +} + +# ucvtf_z_p_z.xml: UCVTF variant 32-bit to half-precision +# PATTERN x6555a000/mask=xffffe000 + +:ucvtf Zd.H, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.H & Pg3_m +{ + Zd.H = SVE_ucvtf(Zd.H, Pg3_m, Zn.S); +} + +# ucvtf_z_p_z.xml: UCVTF variant 32-bit to single-precision +# PATTERN x6595a000/mask=xffffe000 + +:ucvtf Zd.S, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.S & Pg3_m +{ + Zd.S = SVE_ucvtf(Zd.S, Pg3_m, Zn.S); +} + +# ucvtf_z_p_z.xml: UCVTF variant 32-bit to double-precision +# PATTERN x65d1a000/mask=xffffe000 + +:ucvtf Zd.D, Pg3_m, Zn.S +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.D & Pg3_m +{ + Zd.D = SVE_ucvtf(Zd.D, Pg3_m, Zn.S); +} + +# ucvtf_z_p_z.xml: UCVTF variant 64-bit to half-precision +# PATTERN x6557a000/mask=xffffe000 + +:ucvtf Zd.H, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.H & Pg3_m +{ + Zd.H = SVE_ucvtf(Zd.H, Pg3_m, Zn.D); +} + +# ucvtf_z_p_z.xml: UCVTF variant 64-bit to single-precision +# PATTERN x65d5a000/mask=xffffe000 + +:ucvtf Zd.S, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.S & Pg3_m +{ + Zd.S = SVE_ucvtf(Zd.S, Pg3_m, Zn.D); +} + +# ucvtf_z_p_z.xml: UCVTF variant 64-bit to double-precision +# PATTERN x65d7a000/mask=xffffe000 + +:ucvtf Zd.D, Pg3_m, Zn.D +is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m +{ + Zd.D = SVE_ucvtf(Zd.D, Pg3_m, Zn.D); +} + +# udiv_z_p_zz.xml: UDIV variant SVE +# PATTERN x04950000/mask=xffbfe000 + +:udiv Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_sz_22 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T_sz & Zd.T_sz_2 & Zn.T_sz & Pg3_m +{ + Zd.T_sz = SVE_udiv(Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz); +} + +# udivr_z_p_zz.xml: UDIVR variant SVE +# PATTERN x04970000/mask=xffbfe000 + +:udivr Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_sz_22 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T_sz & Zd.T_sz_2 & Zn.T_sz & Pg3_m +{ + Zd.T_sz = SVE_udivr(Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz); +} + +# udot_z_zzz.xml: UDOT (vectors) variant SVE +# PATTERN x44800400/mask=xffa0fc00 + +:udot Zd.T_sz, Zn.Tb_sz, Zm.Tb_sz +is sve_b_2431=0b01000100 & sve_b_23=1 & sve_sz_22 & sve_b_21=0 & sve_zm_1620 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zm.Tb_sz & Zd.T_sz & Zn.Tb_sz +{ + Zd.T_sz = SVE_udot(Zd.T_sz, Zn.Tb_sz, Zm.Tb_sz); +} + +# udot_z_zzzi.xml: UDOT (indexed) variant 32-bit +# PATTERN x44a00400/mask=xffe0fc00 + +:udot Zd.S, Zn.B, Zm3.B[sve_i2_1920] +is sve_b_2431=0b01000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zd.S & Zn.B & Zm3.B +{ + Zd.S = SVE_udot(Zd.S, Zn.B, Zm3.B, sve_i2_1920:1); +} + +# udot_z_zzzi.xml: UDOT (indexed) variant 64-bit +# PATTERN x44e00400/mask=xffe0fc00 + +:udot Zd.D, Zn.H, Zm4.H[sve_i1_20] +is sve_b_2431=0b01000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zd.D & Zn.H & Zm4.H +{ + Zd.D = SVE_udot(Zd.D, Zn.H, Zm4.H, sve_i1_20:1); +} + +# umax_z_p_zz.xml: UMAX (vectors) variant SVE +# PATTERN x04090000/mask=xff3fe000 + +:umax Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_umax(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# umax_z_zi.xml: UMAX (immediate) variant SVE +# PATTERN x2529c000/mask=xff3fe000 + +:umax Zd.T, Zd.T_2, "#"^sve_imm8_1_0to255 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b101 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1415=0b11 & sve_b_13=0 & sve_imm8_0512 & sve_zdn_0004 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 +{ + Zd.T = SVE_umax(Zd.T, Zd.T_2, sve_imm8_1_0to255:1); +} + +# umaxv_r_p_z.xml: UMAXV variant SVE +# PATTERN x04092000/mask=xff3fe000 + +:umaxv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_umaxv(Rd_FPR8, Pg3, Zn.T); +} + +# umaxv_r_p_z.xml: UMAXV variant SVE +# PATTERN x04092000/mask=xff3fe000 + +:umaxv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_umaxv(Rd_FPR32, Pg3, Zn.T); +} + +# umaxv_r_p_z.xml: UMAXV variant SVE +# PATTERN x04092000/mask=xff3fe000 + +:umaxv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_umaxv(Rd_FPR16, Pg3, Zn.T); +} + +# umaxv_r_p_z.xml: UMAXV variant SVE +# PATTERN x04092000/mask=xff3fe000 + +:umaxv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_umaxv(Rd_FPR64, Pg3, Zn.T); +} + +# umin_z_p_zz.xml: UMIN (vectors) variant SVE +# PATTERN x040b0000/mask=xff3fe000 + +:umin Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_umin(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# umin_z_zi.xml: UMIN (immediate) variant SVE +# PATTERN x252bc000/mask=xff3fe000 + +:umin Zd.T, Zd.T_2, "#"^sve_imm8_1_0to255 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b101 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1415=0b11 & sve_b_13=0 & sve_imm8_0512 & sve_zdn_0004 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 +{ + Zd.T = SVE_umin(Zd.T, Zd.T_2, sve_imm8_1_0to255:1); +} + +# uminv_r_p_z.xml: UMINV variant SVE +# PATTERN x040b2000/mask=xff3fe000 + +:uminv Rd_FPR8, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 +{ + Rd_FPR8 = SVE_uminv(Rd_FPR8, Pg3, Zn.T); +} + +# uminv_r_p_z.xml: UMINV variant SVE +# PATTERN x040b2000/mask=xff3fe000 + +:uminv Rd_FPR32, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 +{ + Rd_FPR32 = SVE_uminv(Rd_FPR32, Pg3, Zn.T); +} + +# uminv_r_p_z.xml: UMINV variant SVE +# PATTERN x040b2000/mask=xff3fe000 + +:uminv Rd_FPR16, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 +{ + Rd_FPR16 = SVE_uminv(Rd_FPR16, Pg3, Zn.T); +} + +# uminv_r_p_z.xml: UMINV variant SVE +# PATTERN x040b2000/mask=xff3fe000 + +:uminv Rd_FPR64, Pg3, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 +{ + Rd_FPR64 = SVE_uminv(Rd_FPR64, Pg3, Zn.T); +} + +# umulh_z_p_zz.xml: UMULH variant SVE +# PATTERN x04130000/mask=xff3fe000 + +:umulh Zd.T, Pg3_m, Zd.T_2, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m +{ + Zd.T = SVE_umulh(Zd.T, Pg3_m, Zd.T_2, Zn.T); +} + +# uqadd_z_zi.xml: UQADD (immediate) variant SVE +# PATTERN x2525c000/mask=xff3fc000 + +:uqadd Zd.T, Zd.T_2, sve_shf8_1_0to255 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 +{ + Zd.T = SVE_uqadd(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); +} + +# uqadd_z_zz.xml: UQADD (vectors) variant SVE +# PATTERN x04201400/mask=xff20fc00 + +:uqadd Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b10 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_uqadd(Zd.T, Zn.T, Zm.T); +} + +# uqdecb_r_rs.xml: UQDECB variant 32-bit +# PATTERN x0420fc00/mask=xfff0fc00 + +:uqdecb Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR32 = SVE_uqdecb(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqdecb_r_rs.xml: UQDECB variant 64-bit +# PATTERN x0430fc00/mask=xfff0fc00 + +:uqdecb Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_uqdecb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqdecd_r_rs.xml: UQDECD (scalar) variant 32-bit +# PATTERN x04e0fc00/mask=xfff0fc00 + +:uqdecd Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR32 = SVE_uqdecd(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqdecd_r_rs.xml: UQDECD (scalar) variant 64-bit +# PATTERN x04f0fc00/mask=xfff0fc00 + +:uqdecd Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_uqdecd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqdecd_z_zs.xml: UQDECD (vector) variant SVE +# PATTERN x04e0cc00/mask=xfff0fc00 + +:uqdecd Zd.D^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.D = SVE_uqdecd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqdech_r_rs.xml: UQDECH (scalar) variant 32-bit +# PATTERN x0460fc00/mask=xfff0fc00 + +:uqdech Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR32 = SVE_uqdech(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqdech_r_rs.xml: UQDECH (scalar) variant 64-bit +# PATTERN x0470fc00/mask=xfff0fc00 + +:uqdech Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_uqdech(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqdech_z_zs.xml: UQDECH (vector) variant SVE +# PATTERN x0460cc00/mask=xfff0fc00 + +:uqdech Zd.H^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.H = SVE_uqdech(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqdecp_r_p_r.xml: UQDECP (scalar) variant 32-bit +# PATTERN x252b8800/mask=xff3ffe00 + +:uqdecp Rd_GPR32, Pn.T +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=1 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR32 +{ + Rd_GPR32 = SVE_uqdecp(Rd_GPR32, Pn.T); +} + +# uqdecp_r_p_r.xml: UQDECP (scalar) variant 64-bit +# PATTERN x252b8c00/mask=xff3ffe00 + +:uqdecp Rd_GPR64, Pn.T +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=1 & sve_b_1115=0b10001 & sve_b_10=1 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 +{ + Rd_GPR64 = SVE_uqdecp(Rd_GPR64, Pn.T); +} + +# uqdecp_z_p_z.xml: UQDECP (vector) variant SVE +# PATTERN x252b8000/mask=xff3ffe00 + +:uqdecp Zd.T, Pn +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=1 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn +{ + Zd.T = SVE_uqdecp(Zd.T, Pn); +} + +# uqdecw_r_rs.xml: UQDECW (scalar) variant 32-bit +# PATTERN x04a0fc00/mask=xfff0fc00 + +:uqdecw Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR32 = SVE_uqdecw(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqdecw_r_rs.xml: UQDECW (scalar) variant 64-bit +# PATTERN x04b0fc00/mask=xfff0fc00 + +:uqdecw Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_uqdecw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqdecw_z_zs.xml: UQDECW (vector) variant SVE +# PATTERN x04a0cc00/mask=xfff0fc00 + +:uqdecw Zd.S^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.S = SVE_uqdecw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqincb_r_rs.xml: UQINCB variant 32-bit +# PATTERN x0420f400/mask=xfff0fc00 + +:uqincb Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR32 = SVE_uqincb(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqincb_r_rs.xml: UQINCB variant 64-bit +# PATTERN x0430f400/mask=xfff0fc00 + +:uqincb Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_uqincb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqincd_r_rs.xml: UQINCD (scalar) variant 32-bit +# PATTERN x04e0f400/mask=xfff0fc00 + +:uqincd Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR32 = SVE_uqincd(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqincd_r_rs.xml: UQINCD (scalar) variant 64-bit +# PATTERN x04f0f400/mask=xfff0fc00 + +:uqincd Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_uqincd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqincd_z_zs.xml: UQINCD (vector) variant SVE +# PATTERN x04e0c400/mask=xfff0fc00 + +:uqincd Zd.D^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.D = SVE_uqincd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqinch_r_rs.xml: UQINCH (scalar) variant 32-bit +# PATTERN x0460f400/mask=xfff0fc00 + +:uqinch Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR32 = SVE_uqinch(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqinch_r_rs.xml: UQINCH (scalar) variant 64-bit +# PATTERN x0470f400/mask=xfff0fc00 + +:uqinch Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_uqinch(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqinch_z_zs.xml: UQINCH (vector) variant SVE +# PATTERN x0460c400/mask=xfff0fc00 + +:uqinch Zd.H^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.H = SVE_uqinch(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqincp_r_p_r.xml: UQINCP (scalar) variant 32-bit +# PATTERN x25298800/mask=xff3ffe00 + +:uqincp Rd_GPR32, Pn.T +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=1 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR32 +{ + Rd_GPR32 = SVE_uqincp(Rd_GPR32, Pn.T); +} + +# uqincp_r_p_r.xml: UQINCP (scalar) variant 64-bit +# PATTERN x25298c00/mask=xff3ffe00 + +:uqincp Rd_GPR64, Pn.T +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=1 & sve_b_1115=0b10001 & sve_b_10=1 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 +{ + Rd_GPR64 = SVE_uqincp(Rd_GPR64, Pn.T); +} + +# uqincp_z_p_z.xml: UQINCP (vector) variant SVE +# PATTERN x25298000/mask=xff3ffe00 + +:uqincp Zd.T, Pn +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=1 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn +{ + Zd.T = SVE_uqincp(Zd.T, Pn); +} + +# uqincw_r_rs.xml: UQINCW (scalar) variant 32-bit +# PATTERN x04a0f400/mask=xfff0fc00 + +:uqincw Rd_GPR32^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR32 = SVE_uqincw(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqincw_r_rs.xml: UQINCW (scalar) variant 64-bit +# PATTERN x04b0f400/mask=xfff0fc00 + +:uqincw Rd_GPR64^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern +{ + Rd_GPR64 = SVE_uqincw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqincw_z_zs.xml: UQINCW (vector) variant SVE +# PATTERN x04a0c400/mask=xfff0fc00 + +:uqincw Zd.S^sve_mul_pattern +is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern +{ + Zd.S = SVE_uqincw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); +} + +# uqsub_z_zi.xml: UQSUB (immediate) variant SVE +# PATTERN x2527c000/mask=xff3fc000 + +:uqsub Zd.T, Zd.T_2, sve_shf8_1_0to255 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 +{ + Zd.T = SVE_uqsub(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); +} + +# uqsub_z_zz.xml: UQSUB (vectors) variant SVE +# PATTERN x04201c00/mask=xff20fc00 + +:uqsub Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b11 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_uqsub(Zd.T, Zn.T, Zm.T); +} + +# uunpkhi_z_z.xml: UUNPKHI, UUNPKLO variant High half +# PATTERN x05333800/mask=xff3ffc00 + +:uunpkhi Zd.T, Zn.Tb +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1100 & sve_b_17=1 & sve_b_16=1 & sve_b_1015=0b001110 & sve_zn_0509 & sve_zd_0004 & Zn.Tb & Zd.T +{ + Zd.T = SVE_uunpkhi(Zd.T, Zn.Tb); +} + +# uunpkhi_z_z.xml: UUNPKHI, UUNPKLO variant Low half +# PATTERN x05323800/mask=xff3ffc00 + +:uunpklo Zd.T, Zn.Tb +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1100 & sve_b_17=1 & sve_b_16=0 & sve_b_1015=0b001110 & sve_zn_0509 & sve_zd_0004 & Zn.Tb & Zd.T +{ + Zd.T = SVE_uunpklo(Zd.T, Zn.Tb); +} + +# uxtb_z_p_z.xml: UXTB, UXTH, UXTW variant Byte +# PATTERN x0411a000/mask=xff3fe000 + +:uxtb Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_uxtb(Zd.T, Pg3_m, Zn.T); +} + +# uxtb_z_p_z.xml: UXTB, UXTH, UXTW variant Halfword +# PATTERN x0413a000/mask=xff3fe000 + +:uxth Zd.T, Pg3_m, Zn.T +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m +{ + Zd.T = SVE_uxth(Zd.T, Pg3_m, Zn.T); +} + +# uxtb_z_p_z.xml: UXTB, UXTH, UXTW variant Word +# PATTERN x0415a000/mask=xff3fe000 + +:uxtw Zd.D, Pg3_m, Zn.D +is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m +{ + Zd.D = SVE_uxtw(Zd.D, Pg3_m, Zn.D); +} + +# uzp1_p_pp.xml: UZP1, UZP2 (predicates) variant Even +# PATTERN x05204800/mask=xff30fe10 + +:uzp1 Pd.T, Pn.T, Pm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=0 & sve_b_11=1 & sve_b_10=0 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T +{ + Pd.T = SVE_uzp1(Pd.T, Pn.T, Pm.T); +} + +# uzp1_p_pp.xml: UZP1, UZP2 (predicates) variant Odd +# PATTERN x05204c00/mask=xff30fe10 + +:uzp2 Pd.T, Pn.T, Pm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=0 & sve_b_11=1 & sve_b_10=1 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T +{ + Pd.T = SVE_uzp2(Pd.T, Pn.T, Pm.T); +} + +# uzp1_z_zz.xml: UZP1, UZP2 (vectors) variant Even +# PATTERN x05206800/mask=xff20fc00 + +:uzp1 Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b01 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_uzp1(Zd.T, Zn.T, Zm.T); +} + +# uzp1_z_zz.xml: UZP1, UZP2 (vectors) variant Odd +# PATTERN x05206c00/mask=xff20fc00 + +:uzp2 Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b01 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_uzp2(Zd.T, Zn.T, Zm.T); +} + +# whilele_p_p_rr.xml: WHILELE variant SVE +# PATTERN x25200410/mask=xff20ec10 + +:whilele Pd.T, Rn_GPR64, Rm_GPR64 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=1 & sve_b_11=0 & sve_b_10=1 & sve_rn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Rn_GPR64 & Rm_GPR64 +{ + Pd.T = SVE_whilele(Pd.T, Rn_GPR64, Rm_GPR64); +} + +# whilele_p_p_rr.xml: WHILELE variant SVE +# PATTERN x25200410/mask=xff20ec10 + +:whilele Pd.T, Rn_GPR32, Rm_GPR32 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=0 & sve_b_11=0 & sve_b_10=1 & sve_rn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Rn_GPR32 & Rm_GPR32 +{ + Pd.T = SVE_whilele(Pd.T, Rn_GPR32, Rm_GPR32); +} + +# whilelo_p_p_rr.xml: WHILELO variant SVE +# PATTERN x25200c00/mask=xff20ec10 + +:whilelo Pd.T, Rn_GPR64, Rm_GPR64 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=1 & sve_b_11=1 & sve_b_10=1 & sve_rn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Rn_GPR64 & Rm_GPR64 +{ + Pd.T = SVE_whilelo(Pd.T, Rn_GPR64, Rm_GPR64); +} + +# whilelo_p_p_rr.xml: WHILELO variant SVE +# PATTERN x25200c00/mask=xff20ec10 + +:whilelo Pd.T, Rn_GPR32, Rm_GPR32 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=0 & sve_b_11=1 & sve_b_10=1 & sve_rn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Rn_GPR32 & Rm_GPR32 +{ + Pd.T = SVE_whilelo(Pd.T, Rn_GPR32, Rm_GPR32); +} + +# whilels_p_p_rr.xml: WHILELS variant SVE +# PATTERN x25200c10/mask=xff20ec10 + +:whilels Pd.T, Rn_GPR64, Rm_GPR64 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=1 & sve_b_11=1 & sve_b_10=1 & sve_rn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Rn_GPR64 & Rm_GPR64 +{ + Pd.T = SVE_whilels(Pd.T, Rn_GPR64, Rm_GPR64); +} + +# whilels_p_p_rr.xml: WHILELS variant SVE +# PATTERN x25200c10/mask=xff20ec10 + +:whilels Pd.T, Rn_GPR32, Rm_GPR32 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=0 & sve_b_11=1 & sve_b_10=1 & sve_rn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Rn_GPR32 & Rm_GPR32 +{ + Pd.T = SVE_whilels(Pd.T, Rn_GPR32, Rm_GPR32); +} + +# whilelt_p_p_rr.xml: WHILELT variant SVE +# PATTERN x25200400/mask=xff20ec10 + +:whilelt Pd.T, Rn_GPR64, Rm_GPR64 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=1 & sve_b_11=0 & sve_b_10=1 & sve_rn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Rn_GPR64 & Rm_GPR64 +{ + Pd.T = SVE_whilelt(Pd.T, Rn_GPR64, Rm_GPR64); +} + +# whilelt_p_p_rr.xml: WHILELT variant SVE +# PATTERN x25200400/mask=xff20ec10 + +:whilelt Pd.T, Rn_GPR32, Rm_GPR32 +is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=0 & sve_b_11=0 & sve_b_10=1 & sve_rn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Rn_GPR32 & Rm_GPR32 +{ + Pd.T = SVE_whilelt(Pd.T, Rn_GPR32, Rm_GPR32); +} + +# wrffr_f_p.xml: WRFFR variant SVE +# PATTERN x25289000/mask=xfffffe1f + +:wrffr Pn.B +is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b10 & sve_b_1019=0b1000100100 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 & Pn.B +{ + SVE_wrffr(Pn.B); +} + +# zip1_p_pp.xml: ZIP1, ZIP2 (predicates) variant High halves +# PATTERN x05204400/mask=xff30fe10 + +:zip2 Pd.T, Pn.T, Pm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=0 & sve_b_11=0 & sve_b_10=1 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T +{ + Pd.T = SVE_zip2(Pd.T, Pn.T, Pm.T); +} + +# zip1_p_pp.xml: ZIP1, ZIP2 (predicates) variant Low halves +# PATTERN x05204000/mask=xff30fe10 + +:zip1 Pd.T, Pn.T, Pm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=0 & sve_b_11=0 & sve_b_10=0 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T +{ + Pd.T = SVE_zip1(Pd.T, Pn.T, Pm.T); +} + +# zip1_z_zz.xml: ZIP1, ZIP2 (vectors) variant High halves +# PATTERN x05206400/mask=xff20fc00 + +:zip2 Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b00 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_zip2(Zd.T, Zn.T, Zm.T); +} + +# zip1_z_zz.xml: ZIP1, ZIP2 (vectors) variant Low halves +# PATTERN x05206000/mask=xff20fc00 + +:zip1 Zd.T, Zn.T, Zm.T +is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b00 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T +{ + Zd.T = SVE_zip1(Zd.T, Zn.T, Zm.T); +} diff --git a/src/third-party/sleigh/processors/AARCH64/data/languages/AppleSilicon.ldefs b/src/third-party/sleigh/processors/AARCH64/data/languages/AppleSilicon.ldefs new file mode 100644 index 00000000..f8c0e540 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/languages/AppleSilicon.ldefs @@ -0,0 +1,18 @@ + + + + AppleSilicon ARM v8.5-A LE instructions, LE data, AMX extensions + + + + + + diff --git a/src/third-party/sleigh/processors/AARCH64/data/manuals/AARCH64.idx b/src/third-party/sleigh/processors/AARCH64/data/manuals/AARCH64.idx new file mode 100644 index 00000000..3122151d --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/manuals/AARCH64.idx @@ -0,0 +1,585 @@ +@DDI0487G_b_armv8_arm.pdf[ARM Architecture Reference Manual - ARMv8, for ARMv8-A architecture profile, 22 July 2021 (ARM DDI DDI0487G.b)] + +abs, 1525 +adc, 876 +adcs, 878 +add, 880 +addg, 887 +addhn, 1529 +addp, 1531 +adds, 888 +addv, 1535 +adr, 895 +adrp, 896 +aesd, 1537 +aese, 1538 +aesimc, 1539 +aesmc, 1540 +and, 897 +ands, 901 +asr, 905 +asrv, 909 +at, 911 +autda, 913 +autdb, 914 +autia, 915 +autib, 917 +axflag, 919 +b, 920 +bcax, 1543 +bfc, 922 +bfcvt, 1545 +bfcvtn, 1546 +bfdot, 1548 +bfi, 924 +bfm, 926 +bfmlalb, 1552 +bfmmla, 1556 +bfxil, 928 +bic, 930 +bics, 932 +bif, 1561 +bit, 1563 +bl, 934 +blr, 935 +blraa, 936 +br, 938 +braa, 939 +brk, 941 +bsl, 1565 +bti, 942 +cas, 951 +casb, 944 +cash, 946 +casp, 948 +cbnz, 954 +cbz, 955 +ccmn, 956 +ccmp, 960 +cfinv, 964 +cfp, 965 +cinc, 966 +cinv, 968 +clrex, 970 +cls, 971 +clz, 973 +cmeq, 1571 +cmge, 1576 +cmgt, 1582 +cmhi, 1588 +cmhs, 1591 +cmle, 1594 +cmlt, 1597 +cmn, 974 +cmp, 980 +cmpp, 986 +cmtst, 1599 +cneg, 987 +cnt, 1601 +cpp, 989 +crc32b, 990 +crc32cb, 992 +csdb, 994 +csel, 995 +cset, 997 +csetm, 999 +csinc, 1001 +csinv, 1003 +csneg, 1005 +dc, 1007 +dcps1, 1009 +dcps2, 1010 +dcps3, 1011 +dgh, 1012 +dmb, 1013 +drps, 1015 +dsb, 1016 +dup, 1603 +dvp, 1019 +eon, 1020 +eor, 1022 +eor3, 1610 +eret, 1026 +eretaa, 1027 +esb, 1028 +ext, 1611 +extr, 1029 +fabd, 1613 +fabs, 1616 +facge, 1620 +facgt, 1624 +fadd, 1628 +faddp, 1632 +fcadd, 1636 +fccmp, 1638 +fccmpe, 1640 +fcmeq, 1642 +fcmge, 1649 +fcmgt, 1656 +fcmla, 1663 +fcmle, 1669 +fcmlt, 1672 +fcmp, 1675 +fcmpe, 1677 +fcsel, 1679 +fcvt, 1681 +fcvtas, 1683 +fcvtau, 1688 +fcvtl, 1693 +fcvtms, 1695 +fcvtmu, 1700 +fcvtn, 1705 +fcvtns, 1707 +fcvtnu, 1712 +fcvtps, 1717 +fcvtpu, 1722 +fcvtxn, 1727 +fcvtzs, 1730 +fcvtzu, 1740 +fdiv, 1750 +fjcvtzs, 1754 +fmadd, 1755 +fmax, 1757 +fmaxnm, 1761 +fmaxnmp, 1765 +fmaxnmv, 1769 +fmaxp, 1771 +fmaxv, 1775 +fmin, 1777 +fminnm, 1781 +fminnmp, 1785 +fminnmv, 1789 +fminp, 1791 +fminv, 1795 +fmla, 1797 +fmlal, 1803 +fmls, 1807 +fmlsl, 1813 +fmov, 1817 +fmsub, 1826 +fmul, 1828 +fmulx, 1836 +fneg, 1843 +fnmadd, 1847 +fnmsub, 1849 +fnmul, 1851 +frecpe, 1853 +frecps, 1856 +frecpx, 1859 +frint32x, 1861 +frint32z, 1865 +frint64x, 1869 +frint64z, 1873 +frinta, 1877 +frinti, 1881 +frintm, 1885 +frintn, 1889 +frintp, 1893 +frintx, 1897 +frintz, 1901 +frsqrte, 1905 +frsqrts, 1908 +fsqrt, 1911 +fsub, 1915 +gmi, 1031 +hint, 1032 +hlt, 1034 +hvc, 1035 +ic, 1036 +ins, 1919 +irg, 1037 +isb, 1039 +ld1, 1923 +ld1r, 1931 +ld2, 1934 +ld2r, 1941 +ld3, 1944 +ld3r, 1951 +ld4, 1954 +ld4r, 1961 +ld64b, 1040 +ldadd, 1045 +ldaddb, 1041 +ldaddh, 1043 +ldapr, 1048 +ldaprb, 1050 +ldaprh, 1052 +ldapur, 1054 +ldapurb, 1056 +ldapurh, 1058 +ldapursb, 1060 +ldapursh, 1062 +ldapursw, 1064 +ldar, 1066 +ldarb, 1068 +ldarh, 1069 +ldaxp, 1070 +ldaxr, 1072 +ldaxrb, 1074 +ldaxrh, 1075 +ldclr, 1080 +ldclrb, 1076 +ldclrh, 1078 +ldeor, 1087 +ldeorb, 1083 +ldeorh, 1085 +ldg, 1090 +ldgm, 1091 +ldlar, 1095 +ldlarb, 1093 +ldlarh, 1094 +ldnp, 1097 +ldp, 1099 +ldpsw, 1103 +ldr, 1106 +ldraa, 1113 +ldrb, 1115 +ldrh, 1120 +ldrsb, 1125 +ldrsh, 1131 +ldrsw, 1137 +ldset, 1147 +ldsetb, 1143 +ldseth, 1145 +ldsmax, 1154 +ldsmaxb, 1150 +ldsmaxh, 1152 +ldsmin, 1161 +ldsminb, 1157 +ldsminh, 1159 +ldtr, 1164 +ldtrb, 1166 +ldtrh, 1168 +ldtrsb, 1170 +ldtrsh, 1172 +ldtrsw, 1174 +ldumax, 1180 +ldumaxb, 1176 +ldumaxh, 1178 +ldumin, 1187 +lduminb, 1183 +lduminh, 1185 +ldur, 1190 +ldurb, 1192 +ldurh, 1193 +ldursb, 1194 +ldursh, 1196 +ldursw, 1198 +ldxp, 1199 +ldxr, 1201 +ldxrb, 1203 +ldxrh, 1204 +lsl, 1205 +lslv, 1209 +lsr, 1211 +lsrv, 1215 +madd, 1217 +mla, 1981 +mls, 1985 +mneg, 1219 +mov, 1221 +movi, 1998 +movk, 1230 +movn, 1232 +movz, 1234 +mrs, 1236 +msr, 1237 +msub, 1241 +mul, 1243 +mvn, 1244 +mvni, 2006 +neg, 1246 +negs, 1248 +ngc, 1250 +ngcs, 1252 +nop, 1254 +not, 2011 +orn, 1255 +orr, 1257 +pacda, 1261 +pacdb, 1262 +pacga, 1263 +pacia, 1264 +pacib, 1267 +pmul, 2019 +pmull, 2021 +prfm, 1270 +prfum, 1276 +psb, 1278 +pssbb, 1279 +raddhn, 2023 +rax1, 2025 +rbit, 1280 +ret, 1282 +retaa, 1283 +rev, 1284 +rev16, 1286 +rev32, 1288 +rev64, 1290 +rmif, 1291 +ror, 1292 +rorv, 1296 +rshrn, 2034 +rsubhn, 2036 +saba, 2038 +sabal, 2040 +sabd, 2042 +sabdl, 2044 +sadalp, 2046 +saddl, 2048 +saddlp, 2050 +saddlv, 2052 +saddw, 2054 +sb, 1298 +sbc, 1299 +sbcs, 1301 +sbfiz, 1303 +sbfm, 1305 +sbfx, 1308 +scvtf, 2056 +sdiv, 1310 +sdot, 2066 +setf8, 1311 +sev, 1312 +sevl, 1313 +sha1c, 2070 +sha1h, 2071 +sha1m, 2072 +sha1p, 2073 +sha1su0, 2074 +sha1su1, 2075 +sha256h, 2077 +sha256h2, 2076 +sha256su0, 2078 +sha256su1, 2079 +sha512h, 2081 +sha512h2, 2083 +sha512su0, 2085 +sha512su1, 2086 +shadd, 2088 +shl, 2090 +shll, 2093 +shrn, 2095 +shsub, 2097 +sli, 2099 +sm3partw1, 2102 +sm3partw2, 2104 +sm3ss1, 2106 +sm3tt1a, 2108 +sm3tt1b, 2110 +sm3tt2a, 2112 +sm3tt2b, 2114 +sm4e, 2116 +sm4ekey, 2118 +smaddl, 1314 +smax, 2120 +smaxp, 2122 +smaxv, 2124 +smc, 1316 +smin, 2126 +sminp, 2128 +sminv, 2130 +smlal, 2132 +smlsl, 2137 +smmla, 2142 +smnegl, 1317 +smov, 2143 +smsubl, 1318 +smulh, 1320 +smull, 1321 +sqabs, 2150 +sqadd, 2152 +sqdmlal, 2154 +sqdmlsl, 2161 +sqdmulh, 2168 +sqdmull, 2173 +sqneg, 2179 +sqrdmlah, 2181 +sqrdmlsh, 2187 +sqrdmulh, 2193 +sqrshl, 2198 +sqrshrn, 2200 +sqrshrun, 2217 +sqshl, 2206 +sqshlu, 2211 +sqshrn, 2214 +sqshrun, 2217 +sqsub, 2220 +sqxtn, 2222 +sqxtun, 2225 +srhadd, 2228 +sri, 2230 +srshl, 2233 +srshr, 2235 +srsra, 2238 +ssbb, 1322 +sshl, 2241 +sshll, 2244 +sshr, 2246 +ssra, 2249 +ssubl, 2252 +ssubw, 2254 +st1, 2256 +st2, 2264 +st2g, 1323 +st3, 2271 +st4, 2278 +st64b, 1325 +st64bv, 1326 +st64bv0, 1328 +stadd, 1334 +staddb, 1330 +staddh, 1332 +stclr, 1340 +stclrb, 1336 +stclrh, 1338 +steor, 1346 +steorb, 1342 +steorh, 1344 +stg, 1348 +stgm, 1350 +stgp, 1351 +stllr, 1356 +stllrb, 1354 +stllrh, 1355 +stlr, 1358 +stlrb, 1360 +stlrh, 1361 +stlur, 1362 +stlurb, 1364 +stlurh, 1366 +stlxp, 1368 +stlxr, 1371 +stlxrb, 1374 +stlxrh, 1376 +stnp, 1378 +stp, 1380 +str, 1383 +strb, 1388 +strh, 1393 +stset, 1402 +stsetb, 1398 +stseth, 1400 +stsmax, 1408 +stsmaxb, 1404 +stsmaxh, 1406 +stsmin, 1414 +stsminb, 1410 +stsminh, 1412 +sttr, 1416 +sttrb, 1418 +sttrh, 1420 +stumax, 1426 +stumaxb, 1422 +stumaxh, 1424 +stumin, 1432 +stuminb, 1428 +stuminh, 1430 +stur, 1434 +sturb, 1436 +sturh, 1437 +stxp, 1438 +stxr, 1441 +stxrb, 1443 +stxrh, 1445 +stz2g, 1447 +stzg, 1449 +stzgm, 1451 +sub, 1452 +subg, 1459 +subhn, 2301 +subp, 1460 +subps, 1461 +subs, 1463 +sudot, 2303 +suqadd, 2305 +svc, 1470 +swp, 1475 +swpb, 1471 +swph, 1473 +sxtb, 1477 +sxth, 1479 +sxtl, 2307 +sxtw, 1481 +sys, 1482 +sysl, 1484 +tbl, 2309 +tbnz, 1485 +tbx, 2311 +tbz, 1486 +tlbi, 1487 +trn1, 2313 +trn2, 2315 +tsb, 1490 +tst, 1491 +uaba, 2317 +uabal, 2319 +uabd, 2321 +uabdl, 2323 +uadalp, 2325 +uaddl, 2327 +uaddlp, 2329 +uaddlv, 2331 +uaddw, 2333 +ubfiz, 1494 +ubfm, 1496 +ubfx, 1499 +ucvtf, 2335 +udf, 1501 +udiv, 1502 +udot, 2345 +uhadd, 2349 +uhsub, 2351 +umaddl, 1503 +umax, 2353 +umaxp, 2355 +umaxv, 2357 +umin, 2359 +uminp, 2361 +uminv, 2363 +umlal, 2365 +umlsl, 2370 +ummla, 2375 +umnegl, 1505 +umov, 2376 +umsubl, 1506 +umulh, 1508 +umull, 2378 +uqadd, 2383 +uqrshl, 2385 +uqrshrn, 2387 +uqshl, 2390 +uqshrn, 2395 +uqsub, 2398 +uqxtn, 2400 +urecpe, 2403 +urhadd, 2404 +urshl, 2406 +urshr, 2408 +ursqrte, 2411 +ursra, 2412 +usdot, 2415 +ushl, 2419 +ushll, 2422 +ushr, 2424 +usmmla, 2427 +usqadd, 2428 +usra, 2430 +usubl, 2433 +usubw, 2435 +uxtb, 1510 +uxth, 1511 +uxtl, 2437 +uzp1, 2439 +uzp2, 2411 +wfe, 1512 +wfet, 1513 +wfi, 1514 +wfit, 1515 +xaflag, 1516 +xar, 2443 +xpacd, 1517 +xtn, 2444 +yield, 1519 +zip1, 2446 +zip2, 2448 \ No newline at end of file diff --git a/src/third-party/sleigh/processors/AARCH64/data/patterns/AARCH64_LE_patterns.xml b/src/third-party/sleigh/processors/AARCH64/data/patterns/AARCH64_LE_patterns.xml new file mode 100644 index 00000000..bae3968d --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/patterns/AARCH64_LE_patterns.xml @@ -0,0 +1,68 @@ + + + + 0xc0 0x03 0x5f 0xd6 + 0xff 0x0f 0x5f 0xd6 + ........ ........ ........ 000101.. + 0x20 0x00 0x20 0xd4 + + + + 0xfd 0x7b 0xbf 0xa9 + 0xfe .0001111 0x1. 0xf8 + 111..... .1....11 10...... 0xa9 + 11101..1 001..011 1011.... 0x6d + 0xff ..000011 000..... 0xd1 + 0x7f 0x23 0x03 0xd5 + + + + + + + 111..... .1....11 10...... 0xa9 + + + + + 0x........ 111..... .1....11 10...... 0xa9 + + + + + 0xfe .0001111 0x1. 0xf8 + + + + + 0x........ 0xfe .0001111 0x1. 0xf8 + + + + + 0xfd 0x7b 0xbf 0xa9 0xfd 0x03 0x00 0x91 + + + + + + 0x7f 0x23 0x03 0xd5 0xff ..000011 00000... 0xd1 + + + + + + ...10000 ........ ........ 1..10000 + 00010001 ........ 01...... 0xf9 + 0x10 ......10 00...... 0x91 + 0x20 0x02 0x1f 0xd6 + + + + + + diff --git a/src/third-party/sleigh/processors/AARCH64/data/patterns/patternconstraints.xml b/src/third-party/sleigh/processors/AARCH64/data/patterns/patternconstraints.xml new file mode 100644 index 00000000..ce0cd4ad --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/data/patterns/patternconstraints.xml @@ -0,0 +1,5 @@ + + + AARCH64_LE_patterns.xml + + diff --git a/src/third-party/sleigh/processors/AARCH64/src/main/java/DecodeBitMasks.java b/src/third-party/sleigh/processors/AARCH64/src/main/java/DecodeBitMasks.java new file mode 100644 index 00000000..9baecfc3 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/main/java/DecodeBitMasks.java @@ -0,0 +1,228 @@ +/* ### + * IP: GHIDRA + * REVIEWED: YES + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import java.math.BigInteger; + +public class DecodeBitMasks { + + long tmask; + long wmask; + + long immN, immr, imms; + + int M; + + int HighestSetBit(long x, int bitSize) { + long mask = 0x1 << (bitSize - 1); + for (int i = bitSize - 1; i >= 0; i--) { + if ((mask & x) == mask) { + return i; + } + mask = mask >> 1; + } + return -1; + } + + long ZeroExtend(long x, int bitSize, int extSize) { + long mask = Ones(bitSize); + + x = x & mask; + + return x; + } + + private long Ones(int bitSize) { + long mask = 0x0; + + for (int i = 0; i < bitSize; i++) { + mask = (mask << 1) | 1; + } + + return mask; + } + + long Replicate(long x, int bitSize, int startBit, int repSize, int extSize) { + long repval = (x >> startBit) & Ones(repSize); + int times = extSize / repSize; + long val = 0; + for (int i = 0; i < times; i++) { + val = (val << repSize) | repval; + } + repval = val << startBit; + + x = x | repval; + return x; + } + + long ROR(long x, int esize, long rotate) { + long a = x << (esize - rotate) & Ones(esize); + long r = x >> (rotate) & Ones(esize); + return ((a | r) & Ones(esize)); + } + + boolean decode(long iN, long is, long ir, boolean immediate, int Msize) { + + immN = iN; + imms = is; + immr = ir; + + M = Msize; + + tmask = wmask = 0; + + long levels; + + // Compute log2 of element size + // 2^len must be in range [2, M] + // immN:NOT(imms)); + int len = HighestSetBit(immN << 6 | ((~imms) & Ones(6)), 7); + + if (len < 1) { + System.out.println("bad value " + immN + ":" + immr + ":" + imms); + return false; + } + + assert (M >= (1 << len)); + + // Determine S, R and S - R parameters + levels = ZeroExtend(Ones(len), 6, 6); + + // For logical immediates an all-ones value of S is reserved + // since it would generate a useless all-ones result (many times) + if (immediate && (imms & levels) == levels) { + System.out.println("All-Ones " + immN + ":" + immr + ":" + imms); + return false; + } + + long S = imms & levels; + long R = immr & levels; + + long diff = S - R; // 6-bit subtract with borrow + + int esize = 1 << len; + + long d = diff & Ones(len - 1); + + long welem = ZeroExtend(Ones((int) (S + 1)), esize, esize); + long telem = ZeroExtend(Ones((int) (d + 1)), esize, esize); + + //wmask = Replicate(ROR(welem, R)); + + wmask = Replicate(ROR(welem, esize, R), esize, 0, esize, M); + + // Replicate(telem); + tmask = Replicate(telem, esize, 0, esize, M); + + return true; + } + + static String bitStr(long value, int bitSize) { + BigInteger val = BigInteger.valueOf(value); + val = val.and(new BigInteger("FFFFFFFFFFFFFFFF", 16)); + + String str = val.toString(2); + int len = str.length(); + for (; len < bitSize; len++) { + str = "0" + str; + } + return str; + } + + void printit() { + System.out.println(bitStr(immN, 1) + ":" + bitStr(immr, 6) + ":" + bitStr(imms, 6) + " = " + + bitStr(wmask, M) + " " + bitStr(tmask, M)); + } + + /** + * @param args + */ + public static void main(String[] args) { + DecodeBitMasks bm = new DecodeBitMasks(); + boolean valid; + + valid = bm.decode(0, 0, 0, true, 64); + if (valid) { + bm.printit(); + } + + int immN = 0; + //for (int immN = 0; immN <= 1; immN++) { + for (int immr = 0; immr <= 0x3f; immr++) { + for (int imms = 0; imms <= 0x3f; imms++) { + valid = bm.decode(immN, imms, immr, true, 32); + if (valid) { + bm.printit(); + } + } + } + //} + + //for (int immr = 0; immr <= 0x3f; immr++) { + for (int imms = 0; imms <= 0x3f; imms++) { + valid = bm.decode(immN, imms, 0, true, 32); + if (valid) { + bm.printit(); + } + } + //} + + if (bm.decode(0, 0x1E, 0x1F, true, 32)) { + bm.printit(); + } + + if (bm.decode(0, 0x1D, 0x1E, true, 32)) { + bm.printit(); + } + + + immN = 0; + for (int immr = 0; immr <= 0x3f; immr++) { + for (int imms = 0; imms <= 0x3f; imms++) { + valid = bm.decode(immN, imms, immr, true, 64); + if (valid) { + bm.printit(); + } + } + } + + immN = 1; + for (int immr = 0; immr <= 0x3f; immr++) { + for (int imms = 0; imms <= 0x3f; imms++) { + valid = bm.decode(immN, imms, immr, true, 64); + if (valid) { + bm.printit(); + } + } + } + + immN = 0; + for (int imms = 0; imms <= 0x3f; imms++) { + valid = bm.decode(immN, imms, 0, true, 64); + if (valid) { + bm.printit(); + } + } + + immN = 1; + for (int imms = 0; imms <= 0x3f; imms++) { + valid = bm.decode(immN, imms, 0, true, 64); + if (valid) { + bm.printit(); + } + } + } + +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/plugin/core/analysis/AARCH64PltThunkAnalyzer.java b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/plugin/core/analysis/AARCH64PltThunkAnalyzer.java new file mode 100644 index 00000000..0024f4f9 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/plugin/core/analysis/AARCH64PltThunkAnalyzer.java @@ -0,0 +1,275 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.plugin.core.analysis; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; + +import org.xml.sax.SAXException; + +import generic.jar.ResourceFile; +import ghidra.app.cmd.disassemble.DisassembleCommand; +import ghidra.app.cmd.function.CreateFunctionCmd; +import ghidra.app.cmd.function.CreateThunkFunctionCmd; +import ghidra.app.services.*; +import ghidra.app.util.importer.MessageLog; +import ghidra.framework.Application; +import ghidra.program.model.address.*; +import ghidra.program.model.lang.*; +import ghidra.program.model.listing.*; +import ghidra.program.model.mem.*; +import ghidra.program.model.symbol.RefType; +import ghidra.program.util.*; +import ghidra.util.Msg; +import ghidra.util.bytesearch.*; +import ghidra.util.exception.AssertException; +import ghidra.util.exception.CancelledException; +import ghidra.util.task.TaskMonitor; + +public class AARCH64PltThunkAnalyzer extends AbstractAnalyzer { + + private static final String NAME = "AARCH64 ELF PLT Thunks"; + private static final String DESCRIPTION = "Create AARM64 ELF PLT thunk functions"; + private static final String PROCESSOR_NAME = "AARCH64"; + + private static final String PLT_THUNK_PATTERN_FILE = "aarch64-pltThunks.xml"; + + private static boolean patternLoadFailed; + private static ArrayList leThunkPatterns; + private static int maxPatternLength; + + private Register x17Reg; + + public AARCH64PltThunkAnalyzer() { + super(NAME, DESCRIPTION, AnalyzerType.BYTE_ANALYZER); // assumes ELF Loader disassembled PLT section + setDefaultEnablement(true); + setPriority(AnalysisPriority.FORMAT_ANALYSIS); + } + + @Override + public boolean canAnalyze(Program program) { + Language language = program.getLanguage(); + // TODO: what about 32/64 hybrid case? + if (PROCESSOR_NAME.equals(language.getProcessor().toString()) && + patternsLoaded(language.isBigEndian())) { + x17Reg = program.getRegister("x17"); + return x17Reg != null; + } + return false; + } + + private static synchronized boolean patternsLoaded(boolean bigEndian) { + if (patternLoadFailed) { + return false; + } + if (leThunkPatterns != null) { + return true; + } + + try { + ResourceFile patternFile = Application.getModuleDataFile(PLT_THUNK_PATTERN_FILE); + + leThunkPatterns = new ArrayList<>(); + Pattern.readPatterns(patternFile, leThunkPatterns, null); + + maxPatternLength = 0; + for (Pattern pattern : leThunkPatterns) { + int len = pattern.getSize(); + if ((len % 4) != 0) { + throw new SAXException("pattern must contain multiple of 4-bytes"); + } + if (len > maxPatternLength) { + maxPatternLength = len; + } + } + + } catch (FileNotFoundException e) { + Msg.error(AARCH64PltThunkAnalyzer.class, "AARCH64 resource file not found: " + PLT_THUNK_PATTERN_FILE); + patternLoadFailed = true; + return false; + } catch (SAXException | IOException e) { + Msg.error(AARCH64PltThunkAnalyzer.class, "Failed to parse byte pattern file: " + PLT_THUNK_PATTERN_FILE, e); + patternLoadFailed = true; + return false; + } + + return true; + } + + @Override + public boolean added(Program program, AddressSetView set, TaskMonitor monitor, MessageLog log) + throws CancelledException { + + Memory memory = program.getMemory(); + MemoryBlock block = memory.getBlock(".plt"); + if (block == null) { + return true; + } + + set = set.intersectRange(block.getStart(), block.getEnd()); + set = removeFunctionBodies(program, set, monitor); + if (set.isEmpty()) { + return true; + } + + SequenceSearchState sequenceSearchState = SequenceSearchState.buildStateMachine( + leThunkPatterns); + + monitor.setIndeterminate(true); + monitor.setProgress(0); + + ArrayList matches = new ArrayList<>(); + + try { + for (AddressRange range : set.getAddressRanges()) { + + byte[] bytes = new byte[(int)range.getLength()]; + if (block.getBytes(range.getMinAddress(), bytes, 0, bytes.length) != bytes.length) { + log.appendMsg("Expected initialized .plt section block"); + return false; + } + + matches.clear(); + sequenceSearchState.apply(bytes, matches); + + for (Match match : matches) { + Address addr = range.getMinAddress().add(match.getMarkOffset()); + analyzePltThunk(program, addr, match.getSequenceSize(), monitor); + } + + } + } catch (MemoryAccessException | AddressOutOfBoundsException e) { + log.appendMsg("Expected initialized .plt section block: " + e.getMessage()); + } + + return true; + } + + private AddressSetView removeFunctionBodies(Program program, AddressSetView set, TaskMonitor monitor) throws CancelledException { + if (set.isEmpty()) { + return set; + } + // Only processing importer disassembly not yet claimed by function bodies + for (Function f : program.getFunctionManager().getFunctions(set, true)) { + monitor.checkCanceled(); + set = set.subtract(f.getBody()); + } + return set; + } + + private void analyzePltThunk(Program program, Address entryAddr, int thunkSize, TaskMonitor monitor) + throws CancelledException { + + SymbolicPropogator symEval = new SymbolicPropogator(program); + symEval.setParamRefCheck(false); + symEval.setReturnRefCheck(false); + symEval.setStoredRefCheck(false); + + AddressSet thunkBody = new AddressSet(entryAddr, entryAddr.add(thunkSize - 1)); + + ContextEvaluator eval = new ContextEvaluatorAdapter() { + + @Override + public boolean followFalseConditionalBranches() { + return false; // should never happen - just in case + } + + @Override + public boolean evaluateReference(VarnodeContext context, Instruction instr, int pcodeop, Address address, + int size, RefType refType) { + return true; + } + + @Override + public boolean evaluateDestination(VarnodeContext context, Instruction instruction) { + + // We only handle indirect branch through x17 register + if (!"br".equals(instruction.getMnemonicString()) || !x17Reg.equals(instruction.getRegister(0))) { + return true; + } + + // Change br flow to call-return + instruction.setFlowOverride(FlowOverride.CALL_RETURN); + + RegisterValue x17Value = context.getRegisterValue(x17Reg); + if (x17Value != null && x17Value.hasValue()) { + Address destAddr = entryAddr.getNewAddress(x17Value.getUnsignedValue().longValue()); + Function thunkedFunction = createDestinationFunction(program, destAddr, instruction.getAddress(), + monitor); + if (thunkedFunction != null) { + CreateThunkFunctionCmd cmd = new CreateThunkFunctionCmd(entryAddr, thunkBody, + thunkedFunction.getEntryPoint()); + cmd.applyTo(program); + } + } + + return true; + } + + @Override + public boolean allowAccess(VarnodeContext context, Address address) { + return true; + } + }; + + symEval.flowConstants(entryAddr, thunkBody, eval, false, monitor); + } + + private Function createDestinationFunction(Program program, Address addr, Address flowFromAddr, TaskMonitor monitor) { + + Listing listing = program.getListing(); + BookmarkManager bookmarkMgr = program.getBookmarkManager(); + + if (!program.getMemory().contains(addr)) { + bookmarkMgr.setBookmark(flowFromAddr, BookmarkType.ERROR, "Bad Reference", "No memory for PLT Thunk destination at " + addr); + return null; + } + + Function function = listing.getFunctionAt(addr); + if (function != null) { + return function; + } + + CodeUnit cu = listing.getCodeUnitContaining(addr); + if (cu == null) { + throw new AssertException("expected code unit in memory"); + } + if (!addr.equals(cu.getMinAddress())) { + bookmarkMgr.setBookmark(cu.getMinAddress(), BookmarkType.ERROR, "Code Unit Conflict", + "Expected function entry at " + addr + " referenced by PLT Thunk at " + flowFromAddr); + return null; + } + if (cu instanceof Data) { + Data d = (Data)cu; + if (d.isDefined()) { + bookmarkMgr.setBookmark(addr, BookmarkType.ERROR, "Code Unit Conflict", "Expected function entry referenced by PLT Thunk at " + flowFromAddr); + return null; + } + DisassembleCommand cmd = new DisassembleCommand(addr, null, true); + if (!cmd.applyTo(program, monitor)) { + return null; + } + } + + CreateFunctionCmd cmd = new CreateFunctionCmd(addr); + if (cmd.applyTo(program, monitor)) { + return cmd.getFunction(); + } + return null; + } + +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/elf/extend/AARCH64_ElfExtension.java b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/elf/extend/AARCH64_ElfExtension.java new file mode 100644 index 00000000..741a3460 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/elf/extend/AARCH64_ElfExtension.java @@ -0,0 +1,82 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.extend; + +import ghidra.app.util.bin.format.elf.*; +import ghidra.program.model.address.Address; +import ghidra.program.model.lang.Language; + +public class AARCH64_ElfExtension extends ElfExtension { + + // Elf Program Header Extensions + public static final ElfProgramHeaderType PT_AARCH64_ARCHEXT = + new ElfProgramHeaderType(0x70000000, "PT_AARCH64_ARCHEXT", "AARCH64 extension"); + + // Elf Section Header Extensions + public static final ElfSectionHeaderType SHT_AARCH64_ATTRIBUTES = + new ElfSectionHeaderType(0x70000003, "SHT_AARCH64_ATTRIBUTES", "Attribute section"); + + // Section header flags + private static final int SHF_ENTRYSECT = 0x10000000; // section contains entry point + private static final int SHF_COMDEF = 0x80000000; // section may be multiply defined + + @Override + public boolean canHandle(ElfHeader elf) { + return elf.e_machine() == ElfConstants.EM_AARCH64; + } + + @Override + public boolean canHandle(ElfLoadHelper elfLoadHelper) { + Language language = elfLoadHelper.getProgram().getLanguage(); + return canHandle(elfLoadHelper.getElfHeader()) && + "AARCH64".equals(language.getProcessor().toString()); + } + + @Override + public String getDataTypeSuffix() { + return "_AARCH64"; + } + + @Override + public Address evaluateElfSymbol(ElfLoadHelper elfLoadHelper, ElfSymbol elfSymbol, + Address address, boolean isExternal) { + + if (isExternal) { + return address; + } + + String symName = elfSymbol.getNameAsString(); + + if ("$x".equals(symName) || symName.startsWith("$x.")) { + elfLoadHelper.markAsCode(address); + + // do not retain $x symbols in program due to potential function/thunk naming interference + elfLoadHelper.setElfSymbolAddress(elfSymbol, address); + return null; + } + else if ("$d".equals(symName) || symName.startsWith("$d.")) { + // is data, need to protect as data + elfLoadHelper.createUndefinedData(address, (int) elfSymbol.getSize()); + + // do not retain $x symbols in program due to excessive duplicate symbols + elfLoadHelper.setElfSymbolAddress(elfSymbol, address); + return null; + } + + return address; + } + +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/elf/relocation/AARCH64_ElfRelocationConstants.java b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/elf/relocation/AARCH64_ElfRelocationConstants.java new file mode 100644 index 00000000..7b01bfc1 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/elf/relocation/AARCH64_ElfRelocationConstants.java @@ -0,0 +1,349 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.relocation; + +public class AARCH64_ElfRelocationConstants { + + public static final int R_AARCH64_NONE = 0; + + // .word: (S+A) + public static final int R_AARCH64_P32_ABS32 = 1; + + // .half: (S+A) + public static final int R_AARCH64_P32_ABS16 = 2; + + // .word: (S+A-P) + public static final int R_AARCH64_P32_PREL32 = 3; + + // .half: (S+A-P) + public static final int R_AARCH64_P32_PREL16 = 4; + + // MOV[ZK]: ((S+A) >> 0) & 0xffff + public static final int R_AARCH64_P32_MOVW_UABS_G0 = 5; + + // MOV[ZK]: ((S+A) >> 0) & 0xffff + public static final int R_AARCH64_P32_MOVW_UABS_G0_NC = 6; + + // MOV[ZK]: ((S+A) >> 16) & 0xffff + public static final int R_AARCH64_P32_MOVW_UABS_G1 = 7; + + // MOV[ZN]: ((S+A) >> 0) & 0xffff + public static final int R_AARCH64_P32_MOVW_SABS_G0 = 8; + + // LD-lit: ((S+A-P) >> 2) & 0x7ffff + public static final int R_AARCH64_P32_LD_PREL_LO19 = 9; + + // ADR: (S+A-P) & 0x1fffff + public static final int R_AARCH64_P32_ADR_PREL_LO21 = 10; + + // ADRH: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff + public static final int R_AARCH64_P32_ADR_PREL_PG_HI21 = 11; + + // ADD: (S+A) & 0xfff + public static final int R_AARCH64_P32_ADD_ABS_LO12_NC = 12; + + // LD/ST8: (S+A) & 0xfff + public static final int R_AARCH64_P32_LDST8_ABS_LO12_NC = 13; + + // LD/ST16: (S+A) & 0xffe + public static final int R_AARCH64_P32_LDST16_ABS_LO12_NC = 14; + + // LD/ST32: (S+A) & 0xffc + public static final int R_AARCH64_P32_LDST32_ABS_LO12_NC = 15; + + // LD/ST64: (S+A) & 0xff8 + public static final int R_AARCH64_P32_LDST64_ABS_LO12_NC = 16; + + // LD/ST128: (S+A) & 0xff0 + public static final int R_AARCH64_P32_LDST128_ABS_LO12_NC = 17; + + // TBZ/NZ: ((S+A-P) >> 2) & 0x3fff. + public static final int R_AARCH64_P32_TSTBR14 = 18; + + // B.cond: ((S+A-P) >> 2) & 0x7ffff. + public static final int R_AARCH64_P32_CONDBR19 = 19; + + // B: ((S+A-P) >> 2) & 0x3ffffff. + public static final int R_AARCH64_P32_JUMP26 = 20; + + // BL: ((S+A-P) >> 2) & 0x3ffffff. + public static final int R_AARCH64_P32_CALL26 = 21; + + + public static final int R_AARCH64_P32_GOT_LD_PREL19 = 25; + public static final int R_AARCH64_P32_ADR_GOT_PAGE = 26; + public static final int R_AARCH64_P32_LD32_GOT_LO12_NC = 27; + public static final int R_AARCH64_P32_LD32_GOTPAGE_LO14 = 28; + + public static final int R_AARCH64_P32_TLSGD_ADR_PREL21 = 80; + public static final int R_AARCH64_P32_TLSGD_ADR_PAGE21 = 81; + public static final int R_AARCH64_P32_TLSGD_ADD_LO12_NC = 82; + public static final int R_AARCH64_P32_TLSLD_ADR_PREL21 = 83; + public static final int R_AARCH64_P32_TLSLD_ADR_PAGE21 = 84; + public static final int R_AARCH64_P32_TLSLD_ADD_LO12_NC = 85; + public static final int R_AARCH64_P32_TLSLD_MOVW_DTPREL_G1 = 87; + public static final int R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0 = 88; + public static final int R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0_NC = 89; + public static final int R_AARCH64_P32_TLSLD_ADD_DTPREL_HI12 = 90; + public static final int R_AARCH64_P32_TLSLD_ADD_DTPREL_LO12 = 91; + public static final int R_AARCH64_P32_TLSLD_ADD_DTPREL_LO12_NC = 92; + public static final int R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21 = 103; + public static final int R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC = 104; + public static final int R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19 = 105; + public static final int R_AARCH64_P32_TLSLE_MOVW_TPREL_G1 = 106; + public static final int R_AARCH64_P32_TLSLE_MOVW_TPREL_G0 = 107; + public static final int R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC = 108; + public static final int R_AARCH64_P32_TLSLE_ADD_TPREL_HI12 = 109; + public static final int R_AARCH64_P32_TLSLE_ADD_TPREL_LO12 = 110; + public static final int R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC = 111; + + public static final int R_AARCH64_P32_TLSDESC_LD_PREL19 = 122; + public static final int R_AARCH64_P32_TLSDESC_ADR_PREL21 = 123; + public static final int R_AARCH64_P32_TLSDESC_ADR_PAGE21 = 124; + public static final int R_AARCH64_P32_TLSDESC_LD32_LO12_NC = 125; + public static final int R_AARCH64_P32_TLSDESC_ADD_LO12_NC = 126; + public static final int R_AARCH64_P32_TLSDESC_CALL = 127; + + // Copy symbol at runtime. + public static final int R_AARCH64_P32_COPY = 180; + + // Create GOT entry. + public static final int R_AARCH64_P32_GLOB_DAT = 181; + + // Create PLT entry. + public static final int R_AARCH64_P32_JUMP_SLOT = 182; + + // Adjust by program base. + public static final int R_AARCH64_P32_RELATIVE = 183; + public static final int R_AARCH64_P32_TLS_DTPMOD = 184; + public static final int R_AARCH64_P32_TLS_DTPREL = 185; + public static final int R_AARCH64_P32_TLS_TPREL = 186; + public static final int R_AARCH64_P32_TLSDESC = 187; + public static final int R_AARCH64_P32_IRELATIVE = 188; + + public static final int R_AARCH64_NULL = 256; // No reloc + + // Basic data relocations. + + // .xword: (S+A) + public static final int R_AARCH64_ABS64 = 257; + + // .word: (S+A) + public static final int R_AARCH64_ABS32 = 258; + + // .half: (S+A) + public static final int R_AARCH64_ABS16 = 259; + + // .xword: (S+A-P) + public static final int R_AARCH64_PREL64 = 260; + + // .word: (S+A-P) + public static final int R_AARCH64_PREL32 = 261; + + // .half: (S+A-P) + public static final int R_AARCH64_PREL16 = 262; + + // MOV[ZK]: ((S+A) >> 0) & 0xffff + public static final int R_AARCH64_MOVW_UABS_G0 = 263; + + // MOV[ZK]: ((S+A) >> 0) & 0xffff + public static final int R_AARCH64_MOVW_UABS_G0_NC = 264; + + // MOV[ZK]: ((S+A) >> 16) & 0xffff + public static final int R_AARCH64_MOVW_UABS_G1 = 265; + + // MOV[ZK]: ((S+A) >> 16) & 0xffff + public static final int R_AARCH64_MOVW_UABS_G1_NC = 266; + + // MOV[ZK]: ((S+A) >> 32) & 0xffff + public static final int R_AARCH64_MOVW_UABS_G2 = 267; + + // MOV[ZK]: ((S+A) >> 32) & 0xffff + public static final int R_AARCH64_MOVW_UABS_G2_NC = 268; + + // MOV[ZK]: ((S+A) >> 48) & 0xffff + public static final int R_AARCH64_MOVW_UABS_G3 = 269; + + // MOV[ZN]: ((S+A) >> 0) & 0xffff + public static final int R_AARCH64_MOVW_SABS_G0 = 270; + + // MOV[ZN]: ((S+A) >> 16) & 0xffff + public static final int R_AARCH64_MOVW_SABS_G1 = 271; + + // MOV[ZN]: ((S+A) >> 32) & 0xffff + public static final int R_AARCH64_MOVW_SABS_G2 = 272; + + // LD-lit: ((S+A-P) >> 2) & 0x7ffff + public static final int R_AARCH64_LD_PREL_LO19 = 273; + + // ADR: (S+A-P) & 0x1fffff + public static final int R_AARCH64_ADR_PREL_LO21 = 274; + + // ADRH: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff + public static final int R_AARCH64_ADR_PREL_PG_HI21 = 275; + + // ADRH: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff + public static final int R_AARCH64_ADR_PREL_PG_HI21_NC = 276; + + // ADD: (S+A) & 0xfff + public static final int R_AARCH64_ADD_ABS_LO12_NC = 277; + + // LD/ST8: (S+A) & 0xfff + public static final int R_AARCH64_LDST8_ABS_LO12_NC = 278; + + // TBZ/NZ: ((S+A-P) >> 2) & 0x3fff. + public static final int R_AARCH64_TSTBR14 = 279; + + // B.cond: ((S+A-P) >> 2) & 0x7ffff. + public static final int R_AARCH64_CONDBR19 = 280; + + // B: ((S+A-P) >> 2) & 0x3ffffff. + public static final int R_AARCH64_JUMP26 = 282; + + // BL: ((S+A-P) >> 2) & 0x3ffffff. + public static final int R_AARCH64_CALL26 = 283; + + // LD/ST16: (S+A) & 0xffe + public static final int R_AARCH64_LDST16_ABS_LO12_NC = 284; + + // LD/ST32: (S+A) & 0xffc + public static final int R_AARCH64_LDST32_ABS_LO12_NC = 285; + + // LD/ST64: (S+A) & 0xff8 + public static final int R_AARCH64_LDST64_ABS_LO12_NC = 286; + + public static final int R_AARCH64_MOVW_PREL_G0 = 287; + public static final int R_AARCH64_MOVW_PREL_G0_NC = 288; + public static final int R_AARCH64_MOVW_PREL_G1 = 289; + public static final int R_AARCH64_MOVW_PREL_G1_NC = 290; + public static final int R_AARCH64_MOVW_PREL_G2 = 291; + public static final int R_AARCH64_MOVW_PREL_G2_NC = 292; + public static final int R_AARCH64_MOVW_PREL_G3 = 293; + + // LD/ST128: (S+A) & 0xff0 + public static final int R_AARCH64_LDST128_ABS_LO12_NC = 299; + + public static final int R_AARCH64_MOVW_GOTOFF_G0 = 300; + public static final int R_AARCH64_MOVW_GOTOFF_G0_NC = 301; + public static final int R_AARCH64_MOVW_GOTOFF_G1 = 302; + public static final int R_AARCH64_MOVW_GOTOFF_G1_NC = 303; + public static final int R_AARCH64_MOVW_GOTOFF_G2 = 304; + public static final int R_AARCH64_MOVW_GOTOFF_G2_NC = 305; + public static final int R_AARCH64_MOVW_GOTOFF_G3 = 306; + + public static final int R_AARCH64_GOTREL64 = 307; + public static final int R_AARCH64_GOTREL32 = 308; + + public static final int R_AARCH64_GOT_LD_PREL19 = 309; + public static final int R_AARCH64_LD64_GOTOFF_LO15 = 310; + public static final int R_AARCH64_ADR_GOT_PAGE = 311; + public static final int R_AARCH64_LD64_GOT_LO12_NC = 312; + public static final int R_AARCH64_LD64_GOTPAGE_LO15 = 313; + + public static final int R_AARCH64_TLSGD_ADR_PREL21 = 512; + public static final int R_AARCH64_TLSGD_ADR_PAGE21 = 513; + public static final int R_AARCH64_TLSGD_ADD_LO12_NC = 514; + public static final int R_AARCH64_TLSGD_MOVW_G1 = 515; + public static final int R_AARCH64_TLSGD_MOVW_G0_NC = 516; + + public static final int R_AARCH64_TLSLD_ADR_PREL21 = 517; + public static final int R_AARCH64_TLSLD_ADR_PAGE21 = 518; + public static final int R_AARCH64_TLSLD_ADD_LO12_NC = 519; + public static final int R_AARCH64_TLSLD_MOVW_G1 = 520; + public static final int R_AARCH64_TLSLD_MOVW_G0_NC = 521; + public static final int R_AARCH64_TLSLD_LD_PREL19 = 522; + public static final int R_AARCH64_TLSLD_MOVW_DTPREL_G2 = 523; + public static final int R_AARCH64_TLSLD_MOVW_DTPREL_G1 = 524; + public static final int R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC = 525; + public static final int R_AARCH64_TLSLD_MOVW_DTPREL_G0 = 526; + public static final int R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC = 527; + public static final int R_AARCH64_TLSLD_ADD_DTPREL_HI12 = 528; + public static final int R_AARCH64_TLSLD_ADD_DTPREL_LO12 = 529; + public static final int R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC = 530; + public static final int R_AARCH64_TLSLD_LDST8_DTPREL_LO12 = 531; + public static final int R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC = 532; + public static final int R_AARCH64_TLSLD_LDST16_DTPREL_LO12 = 533; + public static final int R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC = 534; + public static final int R_AARCH64_TLSLD_LDST32_DTPREL_LO12 = 535; + public static final int R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC = 536; + public static final int R_AARCH64_TLSLD_LDST64_DTPREL_LO12 = 537; + public static final int R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC = 538; + + public static final int R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 = 539; + public static final int R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC = 540; + public static final int R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 = 541; + public static final int R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC = 542; + public static final int R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 = 543; + + public static final int R_AARCH64_TLSLE_MOVW_TPREL_G2 = 544; + public static final int R_AARCH64_TLSLE_MOVW_TPREL_G1 = 545; + public static final int R_AARCH64_TLSLE_MOVW_TPREL_G1_NC = 546; + public static final int R_AARCH64_TLSLE_MOVW_TPREL_G0 = 547; + public static final int R_AARCH64_TLSLE_MOVW_TPREL_G0_NC = 548; + public static final int R_AARCH64_TLSLE_ADD_TPREL_HI12 = 549; + public static final int R_AARCH64_TLSLE_ADD_TPREL_LO12 = 550; + public static final int R_AARCH64_TLSLE_ADD_TPREL_LO12_NC = 551; + public static final int R_AARCH64_TLSLE_LDST8_TPREL_LO12 = 552; + public static final int R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC = 553; + public static final int R_AARCH64_TLSLE_LDST16_TPREL_LO12 = 554; + public static final int R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC = 555; + public static final int R_AARCH64_TLSLE_LDST32_TPREL_LO12 = 556; + public static final int R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC = 557; + public static final int R_AARCH64_TLSLE_LDST64_TPREL_LO12 = 558; + public static final int R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC = 559; + + public static final int R_AARCH64_TLSDESC_LD_PREL19 = 560; + public static final int R_AARCH64_TLSDESC_ADR_PREL21 = 561; + public static final int R_AARCH64_TLSDESC_ADR_PAGE21 = 562; + public static final int R_AARCH64_TLSDESC_LD64_LO12_NC = 563; + public static final int R_AARCH64_TLSDESC_ADD_LO12_NC = 564; + public static final int R_AARCH64_TLSDESC_OFF_G1 = 565; + public static final int R_AARCH64_TLSDESC_OFF_G0_NC = 566; + public static final int R_AARCH64_TLSDESC_LDR = 567; + public static final int R_AARCH64_TLSDESC_ADD = 568; + public static final int R_AARCH64_TLSDESC_CALL = 569; + + public static final int R_AARCH64_TLSLE_LDST128_TPREL_LO12 = 570; + public static final int R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC = 571; + public static final int R_AARCH64_TLSLD_LDST128_DTPREL_LO12 = 572; + public static final int R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC = 573; + + // Copy symbol at runtime. + public static final int R_AARCH64_COPY = 1024; + + // Create GOT entry. + public static final int R_AARCH64_GLOB_DAT = 1025; + + // Create PLT entry. + public static final int R_AARCH64_JUMP_SLOT = 1026; + + // Adjust by program base. + public static final int R_AARCH64_RELATIVE = 1027; + public static final int R_AARCH64_TLS_DTPMOD64 = 1028; + public static final int R_AARCH64_TLS_DTPREL64 = 1029; + public static final int R_AARCH64_TLS_TPREL64 = 1030; + + public static final int R_AARCH64_TLS_DTPMOD = 1028; + public static final int R_AARCH64_TLS_DTPREL = 1029; + public static final int R_AARCH64_TLS_TPREL = 1030; + + public static final int R_AARCH64_TLSDESC = 1031; + public static final int R_AARCH64_IRELATIVE = 1032; + + private AARCH64_ElfRelocationConstants() { + // no construct + } +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/elf/relocation/AARCH64_ElfRelocationHandler.java b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/elf/relocation/AARCH64_ElfRelocationHandler.java new file mode 100644 index 00000000..9d5d2a57 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/elf/relocation/AARCH64_ElfRelocationHandler.java @@ -0,0 +1,278 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.relocation; + +import ghidra.app.util.bin.format.elf.*; +import ghidra.program.model.address.Address; +import ghidra.program.model.listing.Function; +import ghidra.program.model.listing.Program; +import ghidra.program.model.mem.*; +import ghidra.util.exception.NotFoundException; + +public class AARCH64_ElfRelocationHandler extends ElfRelocationHandler { + + @Override + public boolean canRelocate(ElfHeader elf) { + return elf.e_machine() == ElfConstants.EM_AARCH64; + } + + @Override + public int getRelrRelocationType() { + return AARCH64_ElfRelocationConstants.R_AARCH64_RELATIVE; + } + + @Override + public void relocate(ElfRelocationContext elfRelocationContext, ElfRelocation relocation, + Address relocationAddress) throws MemoryAccessException, NotFoundException { + + ElfHeader elf = elfRelocationContext.getElfHeader(); + if (elf.e_machine() != ElfConstants.EM_AARCH64) { + return; + } + + Program program = elfRelocationContext.getProgram(); + Memory memory = program.getMemory(); + + int type = relocation.getType(); + if (type == AARCH64_ElfRelocationConstants.R_AARCH64_NONE) { + return; + } + int symbolIndex = relocation.getSymbolIndex(); + + long addend = relocation.getAddend(); // will be 0 for REL case + + ElfSymbol sym = elfRelocationContext.getSymbol(symbolIndex); + String symbolName = sym.getNameAsString(); + + //boolean isThumb = isThumb(sym); + + long offset = (int) relocationAddress.getOffset(); + + boolean isBigEndianInstructions = + program.getLanguage().getLanguageDescription().getInstructionEndian().isBigEndian(); + + Address symbolAddr = elfRelocationContext.getSymbolAddress(sym); + long symbolValue = elfRelocationContext.getSymbolValue(sym); + long newValue = 0; + + switch (type) { + // .xword: (S+A) + case AARCH64_ElfRelocationConstants.R_AARCH64_ABS64: { + if (addend != 0 && isUnsupportedExternalRelocation(program, relocationAddress, + symbolAddr, symbolName, addend, elfRelocationContext.getLog())) { + addend = 0; // prefer bad fixup for EXTERNAL over really-bad fixup + } + newValue = (symbolValue + addend); + memory.setLong(relocationAddress, newValue); + break; + } + + // .word: (S+A) + case AARCH64_ElfRelocationConstants.R_AARCH64_ABS32: { + newValue = (symbolValue + addend); + memory.setInt(relocationAddress, (int) (newValue & 0xffffffff)); + break; + } + + // .half: (S+A) + case AARCH64_ElfRelocationConstants.R_AARCH64_ABS16: { + newValue = (symbolValue + addend); + memory.setShort(relocationAddress, (short) (newValue & 0xffff)); + break; + } + + // .xword: (S+A-P) + case AARCH64_ElfRelocationConstants.R_AARCH64_PREL64: { + newValue = (symbolValue + addend); + newValue -= (offset); // PC relative + memory.setLong(relocationAddress, newValue); + break; + } + + // .word: (S+A-P) + case AARCH64_ElfRelocationConstants.R_AARCH64_PREL32: { + newValue = (symbolValue + addend); + newValue -= (offset); // PC relative + memory.setInt(relocationAddress, (int) (newValue & 0xffffffff)); + break; + } + + // .half: (S+A-P) + case AARCH64_ElfRelocationConstants.R_AARCH64_PREL16: { + newValue = (symbolValue + addend); + newValue -= (offset); // PC relative + memory.setShort(relocationAddress, (short) (newValue & 0xffff)); + break; + } + + // ADRH: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff + case AARCH64_ElfRelocationConstants.R_AARCH64_ADR_PREL_PG_HI21: { + int oldValue = memory.getInt(relocationAddress, isBigEndianInstructions); + newValue = ((PG(symbolValue + addend) - PG(offset)) >> 12) & 0x1fffff; + + newValue = (oldValue & 0x9f00001f) | ((newValue << 3) & 0xffffe0) | + ((newValue & 0x3) << 29); + + memory.setInt(relocationAddress, (int) newValue, isBigEndianInstructions); + break; + } + + // ADD: (S+A) & 0xfff + case AARCH64_ElfRelocationConstants.R_AARCH64_ADD_ABS_LO12_NC: { + int oldValue = memory.getInt(relocationAddress, isBigEndianInstructions); + newValue = (int) (symbolValue + addend) & 0xfff; + + newValue = oldValue | (newValue << 10); + + memory.setInt(relocationAddress, (int) newValue, isBigEndianInstructions); + break; + } + + // LD/ST8: (S+A) & 0xfff + case AARCH64_ElfRelocationConstants.R_AARCH64_LDST8_ABS_LO12_NC: { + int oldValue = memory.getInt(relocationAddress, isBigEndianInstructions); + newValue = (int) (symbolValue + addend) & 0xfff; + + newValue = oldValue | (newValue << 10); + + memory.setInt(relocationAddress, (int) newValue, isBigEndianInstructions); + break; + } + + // B: ((S+A-P) >> 2) & 0x3ffffff. + // BL: ((S+A-P) >> 2) & 0x3ffffff + case AARCH64_ElfRelocationConstants.R_AARCH64_JUMP26: + case AARCH64_ElfRelocationConstants.R_AARCH64_CALL26: { + int oldValue = memory.getInt(relocationAddress, isBigEndianInstructions); + newValue = (symbolValue + addend); + + newValue -= (offset); // PC relative + + newValue = oldValue | ((newValue >> 2) & 0x03ffffff); + + memory.setInt(relocationAddress, (int) newValue, isBigEndianInstructions); + break; + } + + // LD/ST16: (S+A) & 0xffe + case AARCH64_ElfRelocationConstants.R_AARCH64_LDST16_ABS_LO12_NC: { + int oldValue = memory.getInt(relocationAddress, isBigEndianInstructions); + newValue = (int) ((symbolValue + addend) & 0xffe) >> 1; + + newValue = oldValue | (newValue << 10); + + memory.setInt(relocationAddress, (int) newValue, isBigEndianInstructions); + break; + } + + // LD/ST32: (S+A) & 0xffc + case AARCH64_ElfRelocationConstants.R_AARCH64_LDST32_ABS_LO12_NC: { + int oldValue = memory.getInt(relocationAddress, isBigEndianInstructions); + newValue = (int) ((symbolValue + addend) & 0xffc) >> 2; + + newValue = oldValue | (newValue << 10); + + memory.setInt(relocationAddress, (int) newValue, isBigEndianInstructions); + break; + } + + // LD/ST64: (S+A) & 0xff8 + case AARCH64_ElfRelocationConstants.R_AARCH64_LDST64_ABS_LO12_NC: + case AARCH64_ElfRelocationConstants.R_AARCH64_LD64_GOT_LO12_NC: { + int oldValue = memory.getInt(relocationAddress, isBigEndianInstructions); + newValue = (int) ((symbolValue + addend) & 0xff8) >> 3; + + newValue = oldValue | (newValue << 10); + + memory.setInt(relocationAddress, (int) newValue, isBigEndianInstructions); + break; + } + + // LD/ST128: (S+A) & 0xff0 + case AARCH64_ElfRelocationConstants.R_AARCH64_LDST128_ABS_LO12_NC: { + int oldValue = memory.getInt(relocationAddress, isBigEndianInstructions); + newValue = (int) ((symbolValue + addend) & 0xff0) >> 4; + + newValue = oldValue | (newValue << 10); + + memory.setInt(relocationAddress, (int) newValue, isBigEndianInstructions); + break; + } + + case AARCH64_ElfRelocationConstants.R_AARCH64_GLOB_DAT: { + // Corresponds to resolved local/EXTERNAL symbols within GOT + if (elfRelocationContext.extractAddend()) { + addend = memory.getLong(relocationAddress); + } + newValue = symbolValue + addend; + memory.setLong(relocationAddress, newValue); + break; + } + + case AARCH64_ElfRelocationConstants.R_AARCH64_JUMP_SLOT: { + // Corresponds to lazy dynamically linked external symbols within + // GOT/PLT symbolValue corresponds to PLT entry for which we need to + // create and external function location. Don't bother changing + // GOT entry bytes if it refers to .plt block + Address symAddress = elfRelocationContext.getSymbolAddress(sym); + MemoryBlock block = memory.getBlock(symAddress); + boolean isPltSym = block != null && block.getName().startsWith(".plt"); + boolean isExternalSym = + block != null && MemoryBlock.EXTERNAL_BLOCK_NAME.equals(block.getName()); + if (!isPltSym) { + memory.setLong(relocationAddress, symAddress.getOffset()); + } + if (isPltSym || isExternalSym) { + Function extFunction = + elfRelocationContext.getLoadHelper().createExternalFunctionLinkage( + symbolName, symAddress, null); + if (extFunction == null) { + markAsError(program, relocationAddress, "R_AARCH64_JUMP_SLOT", symbolName, + "Failed to create R_AARCH64_JUMP_SLOT external function", + elfRelocationContext.getLog()); + return; + } + } + break; + } + + case AARCH64_ElfRelocationConstants.R_AARCH64_RELATIVE: { + if (elfRelocationContext.extractAddend()) { + addend = memory.getLong(relocationAddress); + } + newValue = elfRelocationContext.getImageBaseWordAdjustmentOffset() + addend; + memory.setLong(relocationAddress, newValue); + break; + } + + case AARCH64_ElfRelocationConstants.R_AARCH64_COPY: { + markAsWarning(program, relocationAddress, "R_AARCH64_COPY", symbolName, symbolIndex, + "Runtime copy not supported", elfRelocationContext.getLog()); + } + + default: { + markAsUnhandled(program, relocationAddress, type, symbolIndex, symbolName, + elfRelocationContext.getLog()); + break; + } + } + } + + long PG(long addr) { + return addr & (~0xfff); + } + +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/macho/relocation/AARCH64_MachoRelocationConstants.java b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/macho/relocation/AARCH64_MachoRelocationConstants.java new file mode 100644 index 00000000..5726b880 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/macho/relocation/AARCH64_MachoRelocationConstants.java @@ -0,0 +1,84 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.macho.relocation; + +/** + * {@link AARCH64_MachoRelocationHandler} constants + * + * @see mach-o/arm64/reloc.h + */ +public class AARCH64_MachoRelocationConstants { + + /** + * For pointers + */ + public final static int ARM64_RELOC_UNSIGNED = 0; + + /** + * Must be followed by a ARM64_RELOC_UNSIGNED + */ + public final static int ARM64_RELOC_SUBTRACTOR = 1; + + /** + * A B/BL instruction with 26-bit displacement + */ + public final static int ARM64_RELOC_BRANCH26 = 2; + + /** + * PC-rel distance to page of target + */ + public final static int ARM64_RELOC_PAGE21 = 3; + + /** + * Offset within page, scaled by r_length + */ + public final static int ARM64_RELOC_PAGEOFF12 = 4; + + /** + * PC-rel distance to page of GOT slot + */ + public final static int ARM64_RELOC_GOT_LOAD_PAGE21 = 5; + + /** + * Offset within page of GOT slot, scaled by r_length + */ + public final static int ARM64_RELOC_GOT_LOAD_PAGEOFF12 = 6; + + /** + * For pointers to GOT slots + */ + public final static int ARM64_RELOC_POINTER_TO_GOT = 7; + + /** + * PC-rel distance to page of TLVP slot + */ + public final static int ARM64_RELOC_TLVP_LOAD_PAGE21 = 8; + + /** + * Offset within page of TLVP slot, scaled by r_length + */ + public final static int ARM64_RELOC_TLVP_LOAD_PAGEOFF12 = 9; + + /** + * Must be followed by PAGE21 or PAGEOFF12 + */ + public final static int ARM64_RELOC_ADDEND = 10; + + /** + * Like ARM64_RELOC_UNSIGNED, but addend in lower 32-bits + */ + public final static int ARM64_RELOC_AUTHENTICATED_POINTER = 11; +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/macho/relocation/AARCH64_MachoRelocationHandler.java b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/macho/relocation/AARCH64_MachoRelocationHandler.java new file mode 100644 index 00000000..d0489dc6 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/app/util/bin/format/macho/relocation/AARCH64_MachoRelocationHandler.java @@ -0,0 +1,156 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.macho.relocation; + +import static ghidra.app.util.bin.format.macho.relocation.AARCH64_MachoRelocationConstants.*; + +import ghidra.app.util.bin.format.macho.*; +import ghidra.program.model.address.Address; +import ghidra.program.model.mem.MemoryAccessException; +import ghidra.util.Conv; +import ghidra.util.exception.NotFoundException; + +/** + * A {@link MachoRelocationHandler} for AARCH64 + * + * @see mach-o/arm64/reloc.h + */ +public class AARCH64_MachoRelocationHandler extends MachoRelocationHandler { + + @Override + public boolean canRelocate(MachHeader header) { + return header.getCpuType() == CpuTypes.CPU_TYPE_ARM_64; + } + + @Override + public boolean isPairedRelocation(RelocationInfo relocation) { + return relocation.getType() == ARM64_RELOC_SUBTRACTOR || + relocation.getType() == ARM64_RELOC_ADDEND; + } + + @Override + public void relocate(MachoRelocation relocation) + throws MemoryAccessException, NotFoundException { + + if (!relocation.requiresRelocation()) { + return; + } + + RelocationInfo relocationInfo = relocation.getRelocationInfo(); + Address relocAddr = relocation.getRelocationAddress(); + Address targetAddr; + long addendFromReloc; + if (relocationInfo.getType() == ARM64_RELOC_ADDEND) { + // ARM64_RELOC_ADDEND is a paired relocation, but it's a bit unique because it doesn't + // define its own relocation target...simply an addend value to be applied to the 2nd + // part of the relocation. We'll just save off the addend value and proceed as if the + // "extra" part of the relocation pair is a normal unpaired relocation. + targetAddr = relocation.getTargetAddressExtra(); + addendFromReloc = relocationInfo.getValue(); + relocationInfo = relocation.getRelocationInfoExtra(); + } + else { + targetAddr = relocation.getTargetAddress(); + addendFromReloc = 0; + + } + long orig = read(relocation); + + switch (relocationInfo.getType()) { + case ARM64_RELOC_UNSIGNED: + case ARM64_RELOC_POINTER_TO_GOT: { + long addend = orig; + long value = targetAddr.getOffset() + addend; + write(relocation, value); + break; + } + case ARM64_RELOC_SUBTRACTOR: { + Address targetAddrExtra = relocation.getTargetAddressExtra(); + if (orig > 0) { + write(relocation, targetAddrExtra.add(orig).subtract(targetAddr)); + } + else { + write(relocation, targetAddr.add(orig).subtract(targetAddrExtra)); + } + break; + } + case ARM64_RELOC_BRANCH26: { + long addend = orig & 0x3ffffff; + long value = (targetAddr.subtract(relocAddr) >> 2) + addend; + long instr = orig | (value & 0x3ffffff); + write(relocation, instr); + break; + } + case ARM64_RELOC_PAGE21: + case ARM64_RELOC_GOT_LOAD_PAGE21: { + // ADRP + long immlo = (orig >> 29) & 0x3; + long immhi = (orig >> 5) & 0x7ffff; + long addend = ((immhi << 2) | immlo) << 12; + addend += addendFromReloc; + long pageTarget = PG(targetAddr.getOffset() + addend); + long pageReloc = PG(relocAddr.getOffset()); + long value = ((pageTarget - pageReloc) >> 12) & 0x1fffff; + long instr = + (orig & 0x9f00001f) | ((value << 3) & 0x7ffffe0) | ((value & 0x3) << 29); + write(relocation, instr); + break; + } + case ARM64_RELOC_PAGEOFF12: + case ARM64_RELOC_GOT_LOAD_PAGEOFF12: { + long instr; + long addend = addendFromReloc; + if ((orig & 0x08000000) > 0) { + // LDR/STR + long size = (orig >> 30) & 0x3; + addend += (orig >> 10) & 0xfff; + long value = ((targetAddr.getOffset() + addend) & 0xfff) >> size; + instr = orig | (value << 10); + } + else { + // ADD + addend += (orig >> 10) & 0xfff; + long value = (targetAddr.getOffset() + addend) & 0xfff; + instr = orig | (value << 10); + } + write(relocation, instr); + break; + } + case ARM64_RELOC_AUTHENTICATED_POINTER: { + long addend = orig & Conv.INT_MASK; + long value = targetAddr.getOffset() + addend; + write(relocation, value); + break; + } + + case ARM64_RELOC_TLVP_LOAD_PAGE21: // not seen yet + case ARM64_RELOC_TLVP_LOAD_PAGEOFF12: // not seen yet + case ARM64_RELOC_ADDEND: // should never see on its own here + default: + throw new NotFoundException("Unimplemented relocation"); + } + } + + /** + * Returns the page address of the given address (assumes 4KB page) + * + * @param addr The address to get the page of + * @return The page address of the given address + */ + private long PG(long addr) { + return addr & (~0xfff); + } +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/program/emulation/AARCH64EmulateInstructionStateModifier.java b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/program/emulation/AARCH64EmulateInstructionStateModifier.java new file mode 100644 index 00000000..f2f9a551 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/main/java/ghidra/program/emulation/AARCH64EmulateInstructionStateModifier.java @@ -0,0 +1,1672 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.program.emulation; + +import java.math.BigInteger; + +import ghidra.pcode.emulate.Emulate; +import ghidra.pcode.emulate.EmulateInstructionStateModifier; +import ghidra.pcode.emulate.callother.OpBehaviorOther; +import ghidra.pcode.error.LowlevelError; +import ghidra.pcode.memstate.MemoryState; +import ghidra.program.model.pcode.Varnode; +//import ghidra.pcode.emulate.callother.SignalingNaNOpBehavior; + +public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionStateModifier { + + public AARCH64EmulateInstructionStateModifier(Emulate emu) { + super(emu); + + // The following SIMD and MP versions of SLEIGH + // primitives are implemented in java for AARCH64 + + // BLANK: + // COPY: +// registerPcodeOpBehavior("SIMD_COPY", new SIMD_COPY()); + // LOAD: + // STORE: + // BRANCH: + // CBRANCH: + // BRANCHIND: + // CALL: + // CALLIND: + // CALLOTHER: + // RETURN: + // INT_EQUAL: + // registerPcodeOpBehavior("MP_INT_EQUAL", new MP_INT_EQUAL()); + // INT_NOTEQUAL: + // INT_SLESS: + // INT_SLESSEQUAL: + // INT_LESS: + // INT_LESSEQUAL: + // INT_ZEXT: +// registerPcodeOpBehavior("SIMD_INT_ZEXT", new SIMD_INT_ZEXT()); + // INT_SEXT: +// registerPcodeOpBehavior("SIMD_INT_SEXT", new SIMD_INT_SEXT()); + // INT_ABS (no equivalent SLEIGH primitive): + registerPcodeOpBehavior("MP_INT_ABS", new MP_INT_ABS()); +// registerPcodeOpBehavior("SIMD_INT_ABS", new SIMD_INT_ABS()); + // INT_ADD: + // registerPcodeOpBehavior("SIMD_INT_ADD", new SIMD_INT_ADD()); + // registerPcodeOpBehavior("SIPD_INT_ADD", new SIPD_INT_ADD()); + // INT_SUB: +// registerPcodeOpBehavior("SIMD_INT_SUB", new SIMD_INT_SUB()); + // INT_CARRY: + // INT_SCARRY: + // INT_SBORROW: + // INT_2COMP: +// registerPcodeOpBehavior("SIMD_INT_2COMP", new SIMD_INT_2COMP()); + // INT_NEGATE: + // registerPcodeOpBehavior("MP_INT_NEGATE", new MP_INT_NEGATE()); +// registerPcodeOpBehavior("SIMD_INT_NEGATE", new SIMD_INT_NEGATE()); + // INT_XOR: +// registerPcodeOpBehavior("SIMD_INT_XOR", new SIMD_INT_XOR()); + // INT_AND: + // registerPcodeOpBehavior("MP_INT_AND", new MP_INT_AND()); +// registerPcodeOpBehavior("SIMD_INT_AND", new SIMD_INT_AND()); + // INT_OR: +// registerPcodeOpBehavior("SIMD_INT_OR", new SIMD_INT_OR()); + // INT_LEFT: +// registerPcodeOpBehavior("SIMD_INT_LEFT", new SIMD_INT_LEFT()); + // INT_RIGHT: +// registerPcodeOpBehavior("SIMD_INT_RIGHT", new SIMD_INT_RIGHT()); + registerPcodeOpBehavior("MP_INT_RIGHT", new MP_INT_RIGHT()); + // INT_SRIGHT: +// registerPcodeOpBehavior("SIMD_INT_SRIGHT", new SIMD_INT_SRIGHT()); + // INT_MULT: +// registerPcodeOpBehavior("SIMD_INT_MULT", new SIMD_INT_MULT()); + registerPcodeOpBehavior("MP_INT_MULT", new MP_INT_MULT()); + registerPcodeOpBehavior("MP_INT_UMULT", new MP_INT_UMULT()); + // INT_DIV: + // INT_SDIV: + // INT_REM: + // INT_SREM: + // BOOL_NEGATE: + // BOOL_XOR: + // BOOL_AND: + // BOOL_OR: + // FLOAT_EQUAL: + // FLOAT_NOTEQUAL: + // FLOAT_LESS: + // FLOAT_LESSEQUAL: + // UNUSED1: + // FLOAT_NAN: + // FLOAT_ADD: +// registerPcodeOpBehavior("SIMD_FLOAT_ADD", new SIMD_FLOAT_ADD()); + // registerPcodeOpBehavior("SIPD_FLOAT_ADD", new SIPD_FLOAT_ADD()); + // FLOAT_DIV: +// registerPcodeOpBehavior("SIMD_FLOAT_DIV", new SIMD_FLOAT_DIV()); + // FLOAT_MULT: +// registerPcodeOpBehavior("SIMD_FLOAT_MULT", new SIMD_FLOAT_MULT()); + // FLOAT_SUB: +// registerPcodeOpBehavior("SIMD_FLOAT_SUB", new SIMD_FLOAT_SUB()); + // FLOAT_NEG: +// registerPcodeOpBehavior("SIMD_FLOAT_NEG", new SIMD_FLOAT_NEG()); + // FLOAT_ABS: +// registerPcodeOpBehavior("SIMD_FLOAT_ABS", new SIMD_FLOAT_ABS()); + // FLOAT_SQRT: + // INT2FLOAT: + // FLOAT2FLOAT: +// registerPcodeOpBehavior("SIMD_FLOAT2FLOAT", new SIMD_FLOAT2FLOAT()); + // TRUNC: +// registerPcodeOpBehavior("SIMD_TRUNC", new SIMD_TRUNC()); + // CEIL: + // FLOOR: + // ROUND: + // registerPcodeOpBehavior("SIMD_FLOAT_ROUND", new SIMD_FLOAT_ROUND()); + // BUILD: + // DELAY_SLOT: + // PIECE: + registerPcodeOpBehavior("SIMD_PIECE", new SIMD_PIECE()); + // SUBPIECE: + // CAST: + // LABEL: + // CROSSBUILD: + // SEGMENTOP: + // CPOOLREF: + // NEW: + + // CONCAT: no sleigh equivalent + // registerPcodeOpBehavior("a64_CONCAT", new a64_CONCAT()); + + // The following AARCH64 instructions are implemented + // in java as a pcodeop + + // TBL/TBX: + registerPcodeOpBehavior("a64_TBL", new a64_TBL()); + } + + // Helper functions + + private long getmask(long esize) { + long mask = -1; + if (esize < 8) { + mask = mask >>> ((8 - esize) * 8); + } + return mask; + } + + // Simple versions of half precision + // This is for demonstration purposes only, + // NaN, rounding, normalization is ignored. + + private float shortBitsToFloat(long x) { + long sign = (x >>> 15) & 0x1; + long exp = (x >>> 10) & 0x1f - 15 + 127; + long mant = (x & 0x3ff) << 13; + return Float.intBitsToFloat((int) (sign << 31 | exp << 23 | mant)); + } + + private long floatToShortBits(float x) { + long fbits = Float.floatToIntBits(x); + long sign = (fbits >>> 31) & 0x1; + long exp = (fbits >>> 23) & 0xff - 127 + 15; + long mant = (fbits & 0x7fffff) >>> 13; + return (long) (sign << 15 | exp << 10 | mant); + } + + // Convert a byte array to a long + // assume that lsb is the least significant byte + // and there are at most esize bytes. + // the byte array is in big endian order + + protected long bytes_to_long(byte[] bytes, int lsb, int esize) { + if (lsb <= 0) { + return 0; + } + + int i = lsb - esize; + if (i < 0) { + i = 0; + } + + long result = bytes[i]; + i += 1; + while (i < lsb) { + result = result << 8; + result = result | (bytes[i] & 0xff); + i += 1; + } + return result; + } + + // Insert size bytes from the long value into the byte + // array. + + protected void insert_long(long value, byte[] outBytes, int lsb, int esize) { + if (lsb - esize < 0) { + throw new LowlevelError("insert_long: byte array too small"); + } + for (int j = 0; j < esize; j++) { + outBytes[lsb - j - 1] = (byte) (value & 0xff); + value = value >> 8; + } + } + + // Allocate a byte array of the correct size to hold + // the output initialize to all zeros, and copy any + // value in the init varnode (with sign extension to a + // size boundary) + + protected byte[] varnode_to_bytes(Varnode outputVarnode, byte[] initBytes, int esize) { + + byte[] outBytes = new byte[outputVarnode.getSize()]; + if (initBytes == null) { + return outBytes; + } + + byte ext = 0; + + for (int i = outBytes.length, j = initBytes.length; i > 0; i--, j--) { + if (j > 0) { + outBytes[i - 1] = initBytes[j - 1]; + ext = (byte) ((initBytes[j - 1] >= 0) ? 0 : 0xff); + } else { + outBytes[i - 1] = ext; + if (((i - 1) % esize) == 0) { + break; + } + } + } + + return outBytes; + } + + // Abstract classes for unary and binary operations + + // Generic simd unary operation + // + // Must be extended with op method + // + // Vd = SIMD_OP1(Vm, size); + // + // Vd: output variable + // Vm: value to operate on + // size: size of lanes to add + + private abstract class SIMD_OP1 implements OpBehaviorOther { + + protected abstract long op1(long x, int esize); + + void check_args(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + // Requires 1 input + + int numArgs = inputs.length - 1; + if (numArgs != 2) { + throw new LowlevelError(this.getClass().getName() + ": requires 2 inputs (op, size), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError(this.getClass().getName() + ": missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + + // Get the simd variable to output, the value to copy, and the offset + + Varnode simdVarnode = inputs[1]; + int esize = (int) memoryState.getValue(inputs[2]); + + if (outputVarnode.getSize() < simdVarnode.getSize()) { + throw new LowlevelError(this.getClass().getName() + ": input size (" + simdVarnode.getSize() + + ") exceeds output size (" + outputVarnode.getSize() + ")"); + } + + if (esize != 1 && esize != 2 && esize != 4 && esize != 8) { + throw new LowlevelError(this.getClass().getName() + ": operand must be 1, 2, 4, or 8 bytes: got " + esize); + } + + if ((outputVarnode.getSize() % esize) != 0) { + throw new LowlevelError(this.getClass().getName() + ": output size (" + outputVarnode.getSize() + + ") must be a multiple of operand size (" + esize + ")"); + } + } + + } + + // Signed SIMD_OP1 + + private abstract class SIMD_SOP1 extends SIMD_OP1 { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + check_args(emu, outputVarnode, inputs); + + MemoryState memoryState = emu.getMemoryState(); + + Varnode simdVarnode = inputs[1]; + int esize = (int) memoryState.getValue(inputs[2]); + + byte[] simdBytes = memoryState.getBigInteger(simdVarnode, true).toByteArray(); + byte[] outBytes = varnode_to_bytes(outputVarnode, null, esize); + + for (int outLSB = outBytes.length, simdLSB = simdBytes.length; + outLSB > 0; + outLSB -= esize, simdLSB -= esize) { + + long simdLane = bytes_to_long(simdBytes, simdLSB, esize); + + // Perform the operation + + simdLane = op1(simdLane, esize); + + // Put the result back into the output + + insert_long(simdLane, outBytes, outLSB, esize); + } + + memoryState.setValue(outputVarnode, new BigInteger(outBytes)); + } + } + + // Unsigned SIMD_OP1 + + private abstract class SIMD_UOP1 extends SIMD_OP1 { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + check_args(emu, outputVarnode, inputs); + + MemoryState memoryState = emu.getMemoryState(); + + Varnode simdVarnode = inputs[1]; + int esize = (int) memoryState.getValue(inputs[2]); + + byte[] simdBytes = memoryState.getBigInteger(simdVarnode, false).toByteArray(); + byte[] outBytes = varnode_to_bytes(outputVarnode, null, esize); + + for (int outLSB = outBytes.length, simdLSB = simdBytes.length; + outLSB > 0; + outLSB -= esize, simdLSB -= esize) { + + long simdLane = bytes_to_long(simdBytes, simdLSB, esize); + + // Perform the operation + + simdLane = op1(simdLane, esize); + + // Put the result back into the output + + insert_long(simdLane, outBytes, outLSB, esize); + } + + memoryState.setValue(outputVarnode, new BigInteger(outBytes)); + } + } + + // Generic simd unary operation with extension + // + // Vd = SIMD_OP1E(Vm, size); + // + // Vd: output variable + // Vm: value to operate on + // size: size of lanes to add + // + // Output lanes are a multiple of the size of the input lanes, + // but they can't be larger than 8. + // + // For extension, + // + // output:16 = SIMD_OP1E(input:8, 2:1) + // + // extends the 2-byte lanes in the input to 4-byte lanes in + // the output. + // + // For contraction, + // + // output:8 = SIMD_OP1E(input:16, 2:1) + // + // contracts the 2-byte lands in the input to 1-byte lanes in + // the output. + // + // The class be extended with an op1e method + + private abstract class SIMD_OP1E implements OpBehaviorOther { + + protected abstract long op1e(long x, int s_size, int d_size); + + void check_args(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + // Requires 2 input + + int numArgs = inputs.length - 1; + if (numArgs != 2) throw new LowlevelError(this.getClass().getName() + ": requires 2 inputs (op, size), got " + numArgs); + + if (outputVarnode == null) throw new LowlevelError(this.getClass().getName() + ": missing required output"); + + MemoryState memoryState = emu.getMemoryState(); + + // Get the simd variable to output, the value to copy, and the offset + + Varnode simdVarnode = inputs[1]; + int s_size = (int) memoryState.getValue(inputs[2]); + + if (outputVarnode.getSize() != 2 * simdVarnode.getSize()) + throw new LowlevelError(this.getClass().getName() + ": input size (" + simdVarnode.getSize() + + ") must be exactly half of the output size (" + outputVarnode.getSize() + ")"); + + if (s_size != 1 && s_size != 2 && s_size != 4 && s_size != 8) + throw new LowlevelError(this.getClass().getName() + ": input elements must be 1, 2, 4, or 8 bytes: got " + s_size); + + int d_size = (s_size * outputVarnode.getSize()) / simdVarnode.getSize(); + if (d_size != 1 && d_size != 2 && d_size != 4 && d_size != 8) + throw new LowlevelError(this.getClass().getName() + ": the output elements must be 1, 2, 4, or 8 bytes: got " + d_size); + + if ((simdVarnode.getSize() % s_size) != 0) + throw new LowlevelError(this.getClass().getName() + ": input size (" + simdVarnode.getSize() + + ") must be a multiple of input element size (" + s_size + ")"); + + if ((outputVarnode.getSize() % d_size) != 0) + throw new LowlevelError(this.getClass().getName() + ": output size (" + simdVarnode.getSize() + + ") must be a multiple of output element size (" + d_size + ")"); + } + } + + // Signed OP1E + + private abstract class SIMD_SOP1E extends SIMD_OP1E { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + check_args(emu, outputVarnode, inputs); + + MemoryState memoryState = emu.getMemoryState(); + + Varnode simdVarnode = inputs[1]; + int s_size = (int) memoryState.getValue(inputs[2]); + int d_size = (s_size * outputVarnode.getSize()) / simdVarnode.getSize(); + + byte[] simdBytes = memoryState.getBigInteger(simdVarnode, true).toByteArray(); + byte[] outBytes = varnode_to_bytes(outputVarnode, null, d_size); + + for (int outLSB = outBytes.length, simdLSB = simdBytes.length; + outLSB > 0; + outLSB -= d_size, simdLSB -= s_size) { + + long simdLane = bytes_to_long(simdBytes, simdLSB, s_size); + + // Perform the operation + + simdLane = op1e(simdLane, s_size, d_size); + + // Put the result back into the output + + insert_long(simdLane, outBytes, outLSB, d_size); + } + + memoryState.setValue(outputVarnode, new BigInteger(outBytes)); + } + } + + // Unsigned OP1E + + private abstract class SIMD_UOP1E extends SIMD_OP1E { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + check_args(emu, outputVarnode, inputs); + + MemoryState memoryState = emu.getMemoryState(); + + Varnode simdVarnode = inputs[1]; + int s_size = (int) memoryState.getValue(inputs[2]); + int d_size = (s_size * outputVarnode.getSize()) / simdVarnode.getSize(); + + byte[] simdBytes = memoryState.getBigInteger(simdVarnode, false).toByteArray(); + byte[] outBytes = varnode_to_bytes(outputVarnode, null, d_size); + + for (int outLSB = outBytes.length, simdLSB = simdBytes.length; + outLSB > 0; + outLSB -= d_size, simdLSB -= s_size) { + + long simdLane = bytes_to_long(simdBytes, simdLSB, s_size); + + // Perform the operation + + simdLane = op1e(simdLane, s_size, d_size); + + // Put the result back into the output + + insert_long(simdLane, outBytes, outLSB, d_size); + } + + memoryState.setValue(outputVarnode, new BigInteger(outBytes)); + } + } + + + // Generic simd binary operation + // + // Must be extended with op method + // + // Vd = SIMD_OP2(Vm, Vn, esize); + // + // Vd: output variable + // Vm, Vn: op1 and op2 + // esize: optional size of lanes to add, of omitted, Vn is a + // constant operand to each lane of Vm (of that size) + + private abstract class SIMD_OP2 implements OpBehaviorOther { + + protected abstract long op2(long x, long y, int esize); + + void check_args(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + // Requires 2 or 3 inputs + + int numArgs = inputs.length - 1; + if (numArgs != 2 && numArgs != 3) { + throw new LowlevelError(this.getClass().getName() + ": requires 3 inputs (simd, op, esize), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError(this.getClass().getName() + ": missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + + // Get the simd variable to output, the value to copy, and the offset + + Varnode simdVarnode = inputs[1]; + Varnode opVarnode = inputs[2]; + + int esize = opVarnode.getSize(); + boolean opConstant = (numArgs == 2); + if (! opConstant) { + esize = (int) memoryState.getValue(inputs[3]); + } + + if (outputVarnode.getSize() < simdVarnode.getSize()) { + throw new LowlevelError(this.getClass().getName() + ": input size (" + simdVarnode.getSize() + + ") exceeds output size (" + outputVarnode.getSize() + ")"); + } + + if (esize != 1 && esize != 2 && esize != 4 && esize != 8) { + throw new LowlevelError(this.getClass().getName() + ": operand must be 1, 2, 4, or 8 bytes: got " + esize); + } + + if ((outputVarnode.getSize() % esize) != 0) { + throw new LowlevelError(this.getClass().getName() + ": output size (" + outputVarnode.getSize() + + ") must be a multiple of operand size (" + esize + ")"); + } + + if (! opConstant && simdVarnode.getSize() != opVarnode.getSize()) { + throw new LowlevelError(this.getClass().getName() + ": simd size (" + outputVarnode.getSize() + + ") and operand size (" + esize + ") must be the same for simd operation"); + } + + } + } + + // Signed SIMD_OP2 + + private abstract class SIMD_SOP2 extends SIMD_OP2 { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + check_args(emu, outputVarnode, inputs); + + MemoryState memoryState = emu.getMemoryState(); + + Varnode simdVarnode = inputs[1]; + Varnode opVarnode = inputs[2]; + boolean opConstant = (inputs.length == 3); + int esize = opVarnode.getSize(); + if (! opConstant) { + esize = (int) memoryState.getValue(inputs[3]); + } + int opstep = (opConstant ? 0 : esize); + + byte[] simdBytes = memoryState.getBigInteger(simdVarnode, true).toByteArray(); + byte[] opBytes = memoryState.getBigInteger(opVarnode, true).toByteArray(); + byte[] outBytes = varnode_to_bytes(outputVarnode, null, esize); + + for (int outLSB = outBytes.length, simdLSB = simdBytes.length, opLSB = opBytes.length; + outLSB > 0; + outLSB -= esize, simdLSB -= esize, opLSB -= opstep) { + + long simdLane = bytes_to_long(simdBytes, simdLSB, esize); + long opLane = bytes_to_long(opBytes, opLSB, esize); + + // Perform the operation + + simdLane = op2(simdLane, opLane, esize); + + // Put the result back into the output + + insert_long(simdLane, outBytes, outLSB, esize); + } + + memoryState.setValue(outputVarnode, new BigInteger(outBytes)); + } + } + + // Unsigned SIMD_OP2 + + private abstract class SIMD_UOP2 extends SIMD_OP2 { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + check_args(emu, outputVarnode, inputs); + + MemoryState memoryState = emu.getMemoryState(); + + Varnode simdVarnode = inputs[1]; + Varnode opVarnode = inputs[2]; + boolean opConstant = (inputs.length == 3); + int esize = opVarnode.getSize(); + if (! opConstant) { + esize = (int) memoryState.getValue(inputs[3]); + } + int opstep = (opConstant ? 0 : esize); + + byte[] simdBytes = memoryState.getBigInteger(simdVarnode, false).toByteArray(); + byte[] opBytes = memoryState.getBigInteger(opVarnode, false).toByteArray(); + byte[] outBytes = varnode_to_bytes(outputVarnode, null, esize); + + for (int outLSB = outBytes.length, simdLSB = simdBytes.length, opLSB = opBytes.length; + outLSB > 0; + outLSB -= esize, simdLSB -= esize, opLSB -= opstep) { + + long simdLane = bytes_to_long(simdBytes, simdLSB, esize); + long opLane = bytes_to_long(opBytes, opLSB, esize); + + // Perform the operation + + simdLane = op2(simdLane, opLane, esize); + + // Put the result back into the output + + insert_long(simdLane, outBytes, outLSB, esize); + } + + memoryState.setValue(outputVarnode, new BigInteger(outBytes)); + } + } + + // Generic sipd (paired data) binary operation + // + // Must be extended with op method + // + // Vd = SIPD_OP2(Vn, esize); + // + // Vd: output variable + // Vn1, Vn2: op (optional Vn2 to concatenate) + // iesize: size of input lanes to add + + private abstract class SIPD_OP2 implements OpBehaviorOther { + + protected abstract long op2(long x, long y, int iesize, int oesize); + + void check_args(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + // Requires 2 inputs + + int numArgs = inputs.length - 1; + if (numArgs != 2 && numArgs != 3) { + throw new LowlevelError(this.getClass().getName() + ": requires 2 or 3 inputs (pairData*, esize), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError(this.getClass().getName() + ": missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + + // Get the paired variables and the offset + + Varnode p1Varnode = inputs[1]; + int isize = p1Varnode.getSize(); + Varnode p2Varnode = null; + if (numArgs == 3) { + p2Varnode = inputs[2]; + isize += p2Varnode.getSize(); + } + + int iesize = (int) memoryState.getValue(inputs[numArgs]); + int osize = outputVarnode.getSize(); + int oesize = (iesize * osize) / isize; + + if (iesize != 1 && iesize != 2 && iesize != 4 && iesize != 8) { + throw new LowlevelError(this.getClass().getName() + ": operand lanes must be 1, 2, 4, or 8 bytes: got " + iesize); + } + + if (oesize != 1 && oesize != 2 && oesize != 4 && oesize != 8) { + throw new LowlevelError(this.getClass().getName() + ": output lanes must be 1, 2, 4, or 8 bytes: got " + oesize); + } + } + + } + + // Signed SIPD_OP2 + + private abstract class SIPD_SOP2 extends SIPD_OP2 { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + check_args(emu, outputVarnode, inputs); + + MemoryState memoryState = emu.getMemoryState(); + + int numArgs = inputs.length - 1; + Varnode p1Varnode = inputs[1]; + int isize = p1Varnode.getSize(); + Varnode p2Varnode = null; + if (numArgs == 3) { + p2Varnode = inputs[2]; + isize += p2Varnode.getSize(); + } + + int iesize = (int) memoryState.getValue(inputs[numArgs]); + int osize = outputVarnode.getSize(); + int oesize = (iesize * osize) / isize; + + // create pairBytes, concatenating if + // necessary. If there were 2 arguments, the + // second one is least significant + + byte[] pairBytes = new byte[isize]; + + if (p2Varnode != null) { + byte[] p2Bytes = memoryState.getBigInteger(p2Varnode, true).toByteArray(); + for (int i = p2Bytes.length, pi = pairBytes.length; + i > 0 && pi > 0; + i -= 1, pi -= 1) { + + pairBytes[pi - 1] = p2Bytes[i - 1]; + } + } + byte[] p1Bytes = memoryState.getBigInteger(p1Varnode, true).toByteArray(); + for (int i = p1Bytes.length, pi = p1Varnode.getSize(); + i > 0 && pi > 0; + i -= 1, pi -= 1) { + + pairBytes[pi - 1] = p1Bytes[i - 1]; + } + + byte[] outBytes = varnode_to_bytes(outputVarnode, null, osize); + + for (int outLSB = outBytes.length, opLSB = pairBytes.length; + outLSB > 0 && opLSB > 0; + outLSB -= oesize, opLSB -= iesize) { + + long arg1Lane = bytes_to_long(pairBytes, opLSB, iesize); + long arg2Lane = bytes_to_long(pairBytes, opLSB - iesize, iesize); + + // Perform the operation + + arg1Lane = op2(arg1Lane, arg2Lane, iesize, oesize); + + // Put the result back into the output + + insert_long(arg1Lane, outBytes, outLSB, oesize); + } + + memoryState.setValue(outputVarnode, new BigInteger(outBytes)); + } + } + + // Unigned SIPD_OP2 + + private abstract class SIPD_UOP2 extends SIPD_OP2 { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + check_args(emu, outputVarnode, inputs); + + MemoryState memoryState = emu.getMemoryState(); + + int numArgs = inputs.length - 1; + Varnode p1Varnode = inputs[1]; + int isize = p1Varnode.getSize(); + Varnode p2Varnode = null; + if (numArgs == 3) { + p2Varnode = inputs[2]; + isize += p2Varnode.getSize(); + } + + int iesize = (int) memoryState.getValue(inputs[numArgs]); + int osize = outputVarnode.getSize(); + int oesize = (iesize * osize) / isize; + + // create pairBytes, concatenating if + // necessary. If there were 2 arguments, the + // second one is least significant + + byte[] pairBytes = new byte[isize]; + + if (p2Varnode != null) { + byte[] p2Bytes = memoryState.getBigInteger(p2Varnode, false).toByteArray(); + for (int i = p2Bytes.length, pi = pairBytes.length; + i > 0 && pi > 0; + i -= 1, pi -= 1) { + + pairBytes[pi - 1] = p2Bytes[i - 1]; + } + } + byte[] p1Bytes = memoryState.getBigInteger(p1Varnode, false).toByteArray(); + for (int i = p1Bytes.length, pi = p1Varnode.getSize(); + i > 0 && pi > 0; + i -= 1, pi -= 1) { + + pairBytes[pi - 1] = p1Bytes[i - 1]; + } + + byte[] outBytes = varnode_to_bytes(outputVarnode, null, osize); + + for (int outLSB = outBytes.length, opLSB = pairBytes.length; + outLSB > 0 && opLSB > 0; + outLSB -= oesize, opLSB -= iesize) { + + long arg1Lane = bytes_to_long(pairBytes, opLSB, iesize); + long arg2Lane = bytes_to_long(pairBytes, opLSB - iesize, iesize); + + // Perform the operation + + arg1Lane = op2(arg1Lane, arg2Lane, iesize, oesize); + + // Put the result back into the output + + insert_long(arg1Lane, outBytes, outLSB, oesize); + } + + memoryState.setValue(outputVarnode, new BigInteger(outBytes)); + } + } + + // Implementations of pcodeops + + // Copy a the input value to the given output lane + // + // Vd = SIMD_COPY(Vinit, Vn, offset); + // + // Vd = destination varnode + // Vinit = default values for non-affected lanes, usually just Vd + // Vn = value to copy + // offset = optional integer to specify the lane (0 = least significant) + // to copy the value. if not specified, then all lanes are copied + + private class SIMD_COPY implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + // Requires 2 inputs + + int numArgs = inputs.length - 1; + if (numArgs != 2 && numArgs != 3) throw new LowlevelError("SIMD_COPY: requires 2 or 3 inputs, got " + numArgs); + + if (outputVarnode == null) throw new LowlevelError("SIMD_COPY: missing required output"); + + MemoryState memoryState = emu.getMemoryState(); + + // Get the init variable to output, the value to copy, and the offset + + Varnode initVarnode = inputs[1]; + Varnode valueVarnode = inputs[2]; + int offset = -1; + if (numArgs == 3) offset = (int) memoryState.getValue(inputs[3]); + + if (outputVarnode.getSize() < initVarnode.getSize()) + throw new LowlevelError("SIMD_COPY: output size (" + outputVarnode.getSize() + + ") is smaller than the init size (" + initVarnode.getSize() + ")"); + + if (offset >= 0) { + if (outputVarnode.getSize() < offset + valueVarnode.getSize()) + throw new LowlevelError("SIMD_COPY: output size (" + outputVarnode.getSize() + + ") too small to copy input size (" + valueVarnode.getSize() + ") to offset (" + offset + ")"); + } else { + if (outputVarnode.getSize() < valueVarnode.getSize() || (outputVarnode.getSize() % valueVarnode.getSize()) != 0) + throw new LowlevelError("SIMD_COPY: output size (" + outputVarnode.getSize() + + ") must be multiple of input size (" + valueVarnode.getSize() + ")"); + } + + // Allocate a byte array of the correct size to hold the output + // initialized to all zeros + + int outSize = outputVarnode.getSize(); + byte[] outBytes = new byte[outSize]; + byte[] initBytes = memoryState.getBigInteger(initVarnode, false).toByteArray(); + for (int i = 0; i < initBytes.length && i < outSize; i++) + outBytes[outSize - 1 - i] = initBytes[initBytes.length - 1 - i]; + + // Get the bytes to copy (treat them as unsigned) + // The byte arrays are always in big endian order, and may be truncated. + // and if it's a negative value, then it needs to be sign extended. + + int valueSize = valueVarnode.getSize(); + byte[] copyBytes = new byte[valueSize]; + { + byte[] valueBytes = memoryState.getBigInteger(valueVarnode, false).toByteArray(); + byte ext = 0; + for (int i = valueSize - 1, j = valueBytes.length - 1; i >= 0; i--, j--) { + if (j >= 0) { + copyBytes[i] = valueBytes[j]; + ext = (byte) ((valueBytes[j] < 0) ? -1 : 0); + } else { + copyBytes[i] = ext; + } + } + } + + for (int i = 0; i < valueSize; i++) { + if (offset >= 0) { + outBytes[outSize - offset * valueSize - valueSize + i] = copyBytes[i]; + } else { + for (int offs = 0; offs * valueSize < outSize; offs += 1) + outBytes[outSize - offs * valueSize - valueSize + i] = copyBytes[i]; + } + } + + memoryState.setValue(outputVarnode, new BigInteger(outBytes)); + } + } + + // Multiprecision compare + // + // Vd = MP_INT_EQUAL(Vn, Vm); + // + // Vd: destination varnode (will be 0 or 1) + // Vn, Vm: multiplicands + + @SuppressWarnings("unused") + private class MP_INT_EQUAL implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + int numArgs = inputs.length - 1; + if (numArgs != 2) { + throw new LowlevelError("MP_INT_EQUAL: requires 2 (Vm, Vn), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_EQUAL: missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + BigInteger cmp1 = memoryState.getBigInteger(inputs[1], false); + BigInteger cmp2 = memoryState.getBigInteger(inputs[2], false); + + BigInteger result = (cmp1.compareTo(cmp2) == 0) ? BigInteger.ONE : BigInteger.ZERO; + + memoryState.setValue(outputVarnode, result); + } + } + + // Convert element from s_size to d_size by truncation + // The input may be longer than s_size, e.g. if it was sign + // extended, so truncate to the smaller of s_size and d_size + + private class SIMD_INT_ZEXT extends SIMD_UOP1E { + protected long op1e(long x, int s_size, int d_size) { return x & getmask(s_size) & getmask(d_size); } + } + + // Convert element from s_size to d_size with sign extend + // the input is, or should be, sign extended, so it can simply + // be truncated to the output size + + private class SIMD_INT_SEXT extends SIMD_SOP1E { + protected long op1e(long x, int s_size, int d_size) { return x & getmask(d_size); } + } + + private class MP_INT_ABS implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + int numArgs = inputs.length - 1; + if (numArgs != 1) { + throw new LowlevelError("MP_INT_ABS: requires 1 (Vn), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_ABS: missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + BigInteger op = memoryState.getBigInteger(inputs[1], true); + + BigInteger result = op.abs(); + // System.out.print(String.format("MP_INT_ABS %s to %s (%x)\n", op.toString(), result.toString(), result.longValue())); + + memoryState.setValue(outputVarnode, result); + } + } + + private class SIMD_INT_ABS extends SIMD_SOP1 { + protected long op1(long x, int esize) { return (x < 0) ? -x : x; } + } + + @SuppressWarnings("unused") + private class SIMD_INT_ADD extends SIMD_SOP2 { + protected long op2(long x, long y, int esize) { return x + y; } + } + + @SuppressWarnings("unused") + private class SIPD_INT_ADD extends SIPD_SOP2 { + @Override + protected long op2(long x, long y, int iesize, int oesize) { return x + y; } + } + + private class SIMD_INT_SUB extends SIMD_SOP2 { + protected long op2(long x, long y, int esize) { return x - y; } + } + + private class SIMD_INT_2COMP extends SIMD_SOP1 { + protected long op1(long x, int esize) { return -x; } + } + + // Multiprecision NOT + // + // Vd = MP_INT_NEGATE(Vn); + // + // Vd: destination varnode + // Vn: value to bitwise negate + + @SuppressWarnings("unused") + private class MP_INT_NEGATE implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + int numArgs = inputs.length - 1; + if (numArgs != 1) { + throw new LowlevelError("MP_INT_NEGATE: requires 1 (Vn), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_NEGATE: missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + byte[] value = memoryState.getBigInteger(inputs[1], true).toByteArray(); + + // Need to perform bitwise negation manually + // to get the right size + + int outSize = outputVarnode.getSize(); + byte[] result = new byte[outSize]; + + for (int i = outSize - 1, j = value.length - 1; i >= 0; i--, j--) { + if (j >= 0) { + result[i] = (byte) (~value[j] & 0xff); + } else { + result[i] = (byte) 0xff; + } + } + + memoryState.setValue(outputVarnode, new BigInteger(result)); + } + } + + private class SIMD_INT_NEGATE extends SIMD_UOP1 { + protected long op1(long x, int esize) { return ~x; } + } + + // Multiprecision AND + // + // Vd = MP_INT_AND(Vn, Vm); + // + // Vd: destination varnode + // Vn, Vm: values to bitwise and together + + @SuppressWarnings("unused") + private class MP_INT_AND implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + int numArgs = inputs.length - 1; + if (numArgs != 2) { + throw new LowlevelError("MP_INT_AND: requires 2 (Vm, Vn), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_AND: missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + BigInteger value = memoryState.getBigInteger(inputs[1], false); + BigInteger mask = memoryState.getBigInteger(inputs[2], false); + + BigInteger result = value.and(mask); + + memoryState.setValue(outputVarnode, result); + } + } + + private class SIMD_INT_XOR extends SIMD_UOP2 { + protected long op2(long x, long y, int esize) { return x ^ y; } + } + + private class SIMD_INT_AND extends SIMD_UOP2 { + protected long op2(long x, long y, int esize) { return x & y; } + } + + private class SIMD_INT_OR extends SIMD_UOP2 { + protected long op2(long x, long y, int esize) { return x | y; } + } + + private class SIMD_INT_LEFT extends SIMD_UOP2 { + protected long op2(long x, long y, int esize) { return x << y; } + } + + private class SIMD_INT_RIGHT extends SIMD_SOP2 { + protected long op2(long x, long y, int esize) { return x >>> y; } + } + + // Multiprecision (logical) right shift + // + // Vd = MP_INT_RIGHT(Vn, shift); + // + // Vd: destination varnode + // Vn: value to shift + // shift: amount to shift + + private class MP_INT_RIGHT implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + int numArgs = inputs.length - 1; + if (numArgs != 2) { + throw new LowlevelError("MP_INT_RIGHT: requires 2 (Vn, shift), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_RIGHT: missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + + // By extracting an unsigned value, the right shift is logical and not sign extended + + BigInteger value = memoryState.getBigInteger(inputs[1], false); + int shift = (int) memoryState.getValue(inputs[2]); + + BigInteger result = value.shiftRight(shift); + + memoryState.setValue(outputVarnode, result); + } + } + + private class SIMD_INT_SRIGHT extends SIMD_UOP2 { + protected long op2(long x, long y, int esize) { return x >> y; } + } + + private class SIMD_INT_MULT extends SIMD_SOP2 { + protected long op2(long x, long y, int esize) { return x * y; } + } + + // Multiprecision Multiply. + // + // Vd = MP_INT_MULT(Vn, Vm); + // + // Vd: destination varnode + // Vn, Vm: multiplicands + + private class MP_INT_MULT implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + int numArgs = inputs.length - 1; + if (numArgs != 2) { + throw new LowlevelError("MP_INT_MULT: requires 2 (Vm, Vn), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_MULT: missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + BigInteger value = memoryState.getBigInteger(inputs[1], true); + BigInteger mult = memoryState.getBigInteger(inputs[2], true); + + BigInteger result = value.multiply(mult); + + memoryState.setValue(outputVarnode, result); + } + } + + // Multiprecision *Unsigned* Multiply. + // + // Vd = MP_INT_UMULT(Vn, Vm); + // + // Vd: destination varnode + // Vn, Vm: multiplicands + + private class MP_INT_UMULT implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + int numArgs = inputs.length - 1; + if (numArgs != 2) { + throw new LowlevelError("MP_INT_UMULT: requires 2 (Vm, Vn), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_UMULT: missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + BigInteger value = memoryState.getBigInteger(inputs[1], false); + BigInteger mult = memoryState.getBigInteger(inputs[2], false); + + BigInteger result = value.multiply(mult); + + memoryState.setValue(outputVarnode, result); + } + } + + private class SIMD_FLOAT_ADD extends SIMD_UOP2 { + protected long op2(long x, long y, int esize) { + if (esize == 2) { + float fx = shortBitsToFloat(x); + float fy = shortBitsToFloat(y); + float fz = fx + fy; + return floatToShortBits(fz); + } else if (esize == 4) { + float fx = Float.intBitsToFloat((int) x); + float fy = Float.intBitsToFloat((int) y); + float fz = fx + fy; + return (long) Float.floatToIntBits(fz); + } else if (esize == 8) { + double fx = Double.longBitsToDouble(x); + double fy = Double.longBitsToDouble(y); + double fz = fx + fy; + return Double.doubleToLongBits(fz); + } + return 0; + } + } + + @SuppressWarnings("unused") + private class SIPD_FLOAT_ADD extends SIPD_UOP2 { + @Override + protected long op2(long x, long y, int iesize, int oesize) { + if (iesize == 2) { + float fx = shortBitsToFloat(x); + float fy = shortBitsToFloat(y); + float fz = fx + fy; + if (oesize == 2) { + return floatToShortBits(fz); + } + if (oesize == 4) { + return (long) Float.floatToIntBits(fz); + } + if (oesize == 8) { + return Double.doubleToLongBits((double) fz); + } + } else if (iesize == 4) { + float fx = Float.intBitsToFloat((int) x); + float fy = Float.intBitsToFloat((int) y); + float fz = fx + fy; + if (oesize == 2) { + return floatToShortBits(fz); + } + if (oesize == 4) { + return (long) Float.floatToIntBits(fz); + } + if (oesize == 8) { + return Double.doubleToLongBits((double) fz); + } + } else if (iesize == 8) { + double fx = Double.longBitsToDouble(x); + double fy = Double.longBitsToDouble(y); + double fz = fx + fy; + if (oesize == 2) { + return floatToShortBits((float) fz); + } + if (oesize == 4) { + return (long) Float.floatToIntBits((float) fz); + } + if (oesize == 8) { + return Double.doubleToLongBits(fz); + } + } + return 0; + } + } + + private class SIMD_FLOAT_DIV extends SIMD_UOP2 { + protected long op2(long x, long y, int esize) { + if (esize == 2) { + float fx = shortBitsToFloat(x); + float fy = shortBitsToFloat(y); + float fz = fx / fy; + return floatToShortBits(fz); + } else if (esize == 4) { + float fx = Float.intBitsToFloat((int) x); + float fy = Float.intBitsToFloat((int) y); + float fz = fx / fy; + return (long) Float.floatToIntBits(fz); + } else if (esize == 8) { + double fx = Double.longBitsToDouble(x); + double fy = Double.longBitsToDouble(y); + double fz = fx / fy; + return Double.doubleToLongBits(fz); + } + return 0; + } + } + + private class SIMD_FLOAT_MULT extends SIMD_UOP2 { + protected long op2(long x, long y, int esize) { + if (esize == 2) { + float fx = shortBitsToFloat(x); + float fy = shortBitsToFloat(y); + float fz = fx * fy; + return floatToShortBits(fz); + } else if (esize == 4) { + float fx = Float.intBitsToFloat((int) x); + float fy = Float.intBitsToFloat((int) y); + float fz = fx * fy; + return (long) Float.floatToIntBits(fz); + } else if (esize == 8) { + double fx = Double.longBitsToDouble(x); + double fy = Double.longBitsToDouble(y); + double fz = fx * fy; + return Double.doubleToLongBits(fz); + } + return 0; + } + } + + private class SIMD_FLOAT_SUB extends SIMD_UOP2 { + protected long op2(long x, long y, int esize) { + if (esize == 2) { + float fx = shortBitsToFloat(x); + float fy = shortBitsToFloat(y); + float fz = fx - fy; + return floatToShortBits(fz); + } else if (esize == 4) { + float fx = Float.intBitsToFloat((int) x); + float fy = Float.intBitsToFloat((int) y); + float fz = fx - fy; + return (long) Float.floatToIntBits(fz); + } else if (esize == 8) { + double fx = Double.longBitsToDouble(x); + double fy = Double.longBitsToDouble(y); + double fz = fx - fy; + return Double.doubleToLongBits(fz); + } + return 0; + } + } + + private class SIMD_FLOAT_NEG extends SIMD_UOP1 { + protected long op1(long x, int esize) { + if (esize == 2) { + float fx = shortBitsToFloat(x); + float fz = - fx; + return floatToShortBits(fz); + } else if (esize == 4) { + float fx = Float.intBitsToFloat((int) x); + float fz = - fx; + return (long) Float.floatToIntBits(fz); + } else if (esize == 8) { + double fx = Double.longBitsToDouble(x); + double fz = - fx; + return Double.doubleToLongBits(fz); + } + return 0; + } + } + + private class SIMD_FLOAT_ABS extends SIMD_UOP1 { + protected long op1(long x, int esize) { + if (esize == 2) { + float fx = shortBitsToFloat(x); + float fz = (fx < 0.0F) ? (0.0F - fx) : fx; + return floatToShortBits(fz); + } else if (esize == 4) { + float fx = Float.intBitsToFloat((int) x); + float fz = (fx < 0.0F) ? (0.0F - fx) : fx; + return (long) Float.floatToIntBits(fz); + } else if (esize == 8) { + double fx = Double.longBitsToDouble(x); + double fz = (fx < 0.0D) ? (0.0F - fx) : fx; + return Double.doubleToLongBits(fz); + } + return 0; + } + } + + private class SIMD_FLOAT2FLOAT extends SIMD_UOP1E { + protected long op1e(long x, int s_size, int d_size) { + if (s_size == d_size) return x; + if (s_size == 2) { + float fx = shortBitsToFloat(x); + if (d_size == 4) return (long) Float.floatToIntBits(fx); + else if (d_size == 8) return Double.doubleToLongBits((double) fx); + } else if (s_size == 4) { + float fx = Float.intBitsToFloat((int) x); + if (d_size == 2) return floatToShortBits(fx); + else if (d_size == 8) return Double.doubleToLongBits((double) fx); + } else if (s_size == 8) { + double fx = Double.longBitsToDouble(x); + if (d_size == 2) return floatToShortBits((float) fx); + else if (d_size == 4) return Float.floatToIntBits((float) fx); + } + return x; + } + } + + private class SIMD_TRUNC extends SIMD_UOP1E { + protected long op1e(long x, int s_size, int d_size) { + if (s_size == d_size) return x; + if (s_size == 2) { + float fx = shortBitsToFloat(x); + if (d_size == 4) return (long) ((int) fx); + else if (d_size == 8) return (long) fx; + } else if (s_size == 4) { + float fx = Float.intBitsToFloat((int) x); + if (d_size == 2) return (long) ((short) fx); + else if (d_size == 8) return (long) fx; + } else if (s_size == 8) { + double fx = Double.longBitsToDouble(x); + if (d_size == 2) return (long) ((short) fx); + else if (d_size == 4) return (long) ((int) fx); + } + return x; + } + } + + @SuppressWarnings("unused") + private class SIMD_FLOAT_ROUND extends SIMD_UOP1E { + protected long op1e(long x, int s_size, int d_size) { + if (s_size == 2) { + float fx = shortBitsToFloat(x); + return (long) fx; + } else if (s_size == 4) { + float fx = Float.intBitsToFloat((int) x); + return (long) fx; + } else if (s_size == 8) { + double fx = Double.longBitsToDouble(x); + return (long) fx; + } + return 0; + } + } + + // Extract a lane from a simd register + // + // Vd = SIMD_PIECE(Vn, offset); + // + // Vd = destination varnode + // Vn = simd register + // offset = the element to extract (0 = least significant) + + private class SIMD_PIECE implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + // Requires 2 inputs + + int numArgs = inputs.length - 1; + if (numArgs != 2) { + throw new LowlevelError("SIMD_PIECE: requires 2 inputs, got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError("SIMD_PIECE: missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + + // Get the init variable to output, the value to copy, and the offset + + Varnode simdVarnode = inputs[1]; + int offset = (int) memoryState.getValue(inputs[2]); + + if (simdVarnode.getSize() < (offset + 1) * outputVarnode.getSize()) { + throw new LowlevelError("SIMD_PIECE: input size (" + simdVarnode.getSize() + + ") too small to extract output size (" + outputVarnode.getSize() + ") from offset (" + offset + ")"); + } + + // Allocate a byte array of the correct size to hold the output + // initialized to all zeros + + int outSize = outputVarnode.getSize(); + byte[] outBytes = new byte[outSize]; + + // Copy the bytes from the simd, in big endian + // order, and maybe truncated and need sign + // extension + + byte[] simdBytes = memoryState.getBigInteger(simdVarnode, false).toByteArray(); + byte ext = 0; + + for (int i = outSize - 1, j = simdBytes.length - 1 - outSize * offset; i >= 0; i--, j--) { + if (j >= 0) { + outBytes[i] = simdBytes[j]; + ext = (byte) ((simdBytes[j] < 0) ? -1 : 0); + } else { + outBytes[i] = ext; + } + } + + memoryState.setValue(outputVarnode, new BigInteger(outBytes)); + } + } + + // Concatenate varnodes into a larger varnode + // + // Vd = a64_CONCAT(Vn, Vm) + // + // Vd = destination varnode + // Vn, Vm = source varnodes + + @SuppressWarnings("unused") + private class a64_CONCAT implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + int numArgs = inputs.length - 1; + if (numArgs != 2) { + throw new LowlevelError(this.getClass().getName() + ": requires 2 inputs (Vn, Vm), got " + numArgs); + } + if (outputVarnode == null) { + throw new LowlevelError(this.getClass().getName() + ": missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + + int outSize = outputVarnode.getSize(); + + Varnode VnVarnode = inputs[1]; + Varnode VmVarnode = inputs[2]; + + if (outSize != VnVarnode.getSize() + VmVarnode.getSize()) { + throw new LowlevelError(this.getClass().getName() + ": output size (" + outSize + + ") must equal the sum of input sizes (" + VnVarnode.getSize() + "," + VmVarnode.getSize() + ")"); + } + + byte[] outBytes = new byte[outSize]; + + byte[] VnBytes = memoryState.getBigInteger(VnVarnode, false).toByteArray(); + byte[] VmBytes = memoryState.getBigInteger(VmVarnode, false).toByteArray(); + + for (int i = outSize - 1, j = VnBytes.length - 1; i >= 0 && j >= 0; i--, j--) { + outBytes[i] = VnBytes[j]; + } + for (int i = outSize - VnVarnode.getSize() - 1, j = VmBytes.length - 1; i >= 0 && j >= 0; i--, j--) { + outBytes[i] = VmBytes[j]; + } + } + } + + // Implementations of AARCH64 instructions + + // Implement the TBL/TBX instructions + // + // Vd = a64_TBL(Vinit, Vn1, [Vn2, Vn3, Vn3,] Vm) + // + // Vd: destination varnode (8 or 16 bytes) + // Vinit: varnode to update (e.g. 0 or Vd) + // Vn1: table varnode (must be 16 bytes) + // Vn2, Vn3, Vn4: additional table varnodes + // Vm: index varnode (8 or 16 bytes, same as Vd) + + private class a64_TBL implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { + + int numArgs = inputs.length - 1; + if (numArgs < 3 || numArgs > 6) { + throw new LowlevelError("a64_TBL: requires 3 to 6 inputs (Vinit, Vn-Vn4, Vm), got " + numArgs); + } + + if (outputVarnode == null) { + throw new LowlevelError("a64_TBL: missing required output"); + } + + MemoryState memoryState = emu.getMemoryState(); + Varnode updateVarnode = inputs[1]; + Varnode indexVarnode = inputs[numArgs]; + + // The index size must match the output size + if (outputVarnode.getSize() != indexVarnode.getSize()) { + throw new LowlevelError("a64_TBL: the output size (" + outputVarnode.getSize() + + ") must match the index size (" + indexVarnode.getSize() + ")"); + } + + int regs = numArgs - 2; + int elements = outputVarnode.getSize(); + + // The indices are converted to little endian order + byte[] indices = new byte[elements]; + byte[] vx = memoryState.getBigInteger(indexVarnode, false).toByteArray(); + for (int j = 0; j < vx.length && j < elements; j++) { + indices[j] = vx[vx.length - j - 1]; + } + + // Create table from registers + // It consists of 16, 32, 48, or 64 bytes from Vn1-Vn4 + // but these are indexed in little endian order. + // The varnodes are in big endian order, so + // they need to be reversed + + byte[] table = new byte[64]; + for (int i = 0; i < regs; i++) { + byte[] vn = memoryState.getBigInteger(inputs[2 + i], false).toByteArray(); + for (int j = 0; j < vn.length && i * 16 + j < 64; j++) { + table[i*16 + j] = vn[vn.length - j - 1]; + } + } + + // The result is pre-initialized to Vi + // and it is also converted to little endian + // order just to make it easier to follow + + byte[] result = new byte[elements]; + byte[] vi = memoryState.getBigInteger(updateVarnode, false).toByteArray(); + for (int j = 0; j < vi.length && j < elements; j++) { + result[j] = vi[vi.length - j - 1]; + } + + // Since the indices, table, and result + // are all in little endian order + // and since the byte arrays are all the right + // size, it's just a simple lookup now + + for (int i = 0; i < elements; i++) { + int index = (int) (indices[i] & 0xff); + if (index < 16 * regs) { + result[i] = table[index]; + } + } + + // reverse the endianness of the result, in place + // so the output can be updated + + for (int i = 0; i < elements / 2; i++) { + byte tmp = result[i]; + result[i] = result[elements - i - 1]; + result[elements - i - 1] = tmp; + } + + memoryState.setValue(outputVarnode, new BigInteger(result)); + } + } +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_BE_O0_EmulatorTest.java b/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_BE_O0_EmulatorTest.java new file mode 100644 index 00000000..e2975743 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_BE_O0_EmulatorTest.java @@ -0,0 +1,41 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class AARCH64_BE_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "AARCH64:BE:64:v8A"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public AARCH64_BE_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "AARCH64_BE_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite( + AARCH64_BE_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_BE_O3_EmulatorTest.java b/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_BE_O3_EmulatorTest.java new file mode 100644 index 00000000..9ead7fc1 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_BE_O3_EmulatorTest.java @@ -0,0 +1,41 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class AARCH64_BE_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "AARCH64:BE:64:v8A"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public AARCH64_BE_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "AARCH64_BE_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite( + AARCH64_BE_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_O0_EmulatorTest.java b/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_O0_EmulatorTest.java new file mode 100644 index 00000000..f916d9dd --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class AARCH64_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "AARCH64:LE:64:v8A"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public AARCH64_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "AARCH64_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(AARCH64_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_O3_EmulatorTest.java b/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_O3_EmulatorTest.java new file mode 100644 index 00000000..73fe8a29 --- /dev/null +++ b/src/third-party/sleigh/processors/AARCH64/src/test.processors/java/ghidra/test/processors/AARCH64_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class AARCH64_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "AARCH64:LE:64:v8A"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public AARCH64_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "AARCH64_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(AARCH64_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM.cspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM.cspec new file mode 100644 index 00000000..4137ca1d --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM.cspec @@ -0,0 +1,247 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ; + offset = *:1 (lr + r3); + r3 = zext(offset); + + + if (inbounds) goto ; + offset = *:1 (lr + r12); + r3 = zext(offset); + + + r3 = r3 * 2; + + r12 = lr + r3; + + ISAModeSwitch = (r12 & 1) != 1; + TB = ISAModeSwitch; + pc = r12 & 0xfffffffe; + goto [pc]; + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM.dwarf b/src/third-party/sleigh/processors/ARM/data/languages/ARM.dwarf new file mode 100644 index 00000000..0444463e --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM.dwarf @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM.gdis b/src/third-party/sleigh/processors/ARM/data/languages/ARM.gdis new file mode 100644 index 00000000..33429f85 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM.gdis @@ -0,0 +1,41 @@ + + + + TMode + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM.ldefs b/src/third-party/sleigh/processors/ARM/data/languages/ARM.ldefs new file mode 100644 index 00000000..ee1a245b --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM.ldefs @@ -0,0 +1,377 @@ + + + + + Generic ARM/Thumb v8 little endian + + + + + + + + + + + + + + + + Generic ARM/Thumb v8 little endian (Thumb is default) + + + + + + + + + + + Generic ARM/Thumb v8 little endian instructions and big endian data + + + + + + + Generic ARM/Thumb v8 big endian + + + + + + + + + + + + + + + Generic ARM/Thumb v8 big endian (Thumb is default) + + + + + + + + + Generic ARM/Thumb v7 little endian + + + + + + + + + + + Generic ARM/Thumb v7 little endian instructions and big endian data + + + + + + + Generic ARM/Thumb v7 big endian + + + + + + + + + + ARM Cortex / Thumb little endian + + + + + + + + + + + + ARM Cortex / Thumb big endian + + + + + + + + + + + Generic ARM/Thumb v6 little endian + + + + + + + + + + + + Generic ARM/Thumb v6 big endian + + + + + + + + + + + + Generic ARM/Thumb v5 little endian (T-variant) + + + + + + + + + + Generic ARM/Thumb v5 big endian (T-variant) + + + + + + + + + + Generic ARM v5 little endian + + + + + + + + Generic ARM v5 big endian + + + + + + + + Generic ARM/Thumb v4 little endian (T-variant) + + + + + + + + + Generic ARM/Thumb v4 big endian (T-variant) + + + + + + + + + Generic ARM v4 little endian + + + + + + + + + + + + Generic ARM v4 big endian + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM.opinion b/src/third-party/sleigh/processors/ARM/data/languages/ARM.opinion new file mode 100644 index 00000000..64634f11 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM.opinion @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM.sinc b/src/third-party/sleigh/processors/ARM/data/languages/ARM.sinc new file mode 100644 index 00000000..5869bfe2 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM.sinc @@ -0,0 +1,303 @@ +# Specification for the ARM Version 4, 4T, 5, 5T, 5E +# The following boolean defines control specific support: T_VARIANT, VERSION_5, VERSION_5E + +define endian=$(ENDIAN); +define alignment=2; + +define space ram type=ram_space size=4 default; +define space register type=register_space size=4; + +define register offset=0x0020 size=4 [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; +define register offset=0x0060 size=1 [ NG ZR CY OV tmpNG tmpZR tmpCY tmpOV shift_carry TB Q GE1 GE2 GE3 GE4 ]; +define register offset=0x0070 size=4 [ cpsr spsr ]; +define register offset=0x0080 size=4 [ mult_addr ]; # Special internal register for dealing with multiple stores/loads +define register offset=0x0084 size=4 [ r14_svc r13_svc spsr_svc ]; +define register offset=0x0090 size=8 [ mult_dat8 ]; # Special internal register for dealing with multiple stores/loads +define register offset=0x0090 size=16 [ mult_dat16 ]; # Special internal register for dealing with multiple stores/loads +define register offset=0x00A0 size=4 [ fpsr ]; # floating point state register (for FPA10 floating-point accelerator) +define register offset=0x00B0 size=1 [ ISAModeSwitch ]; # generic name for TB ThumbBit - set same as TB + +@define FPSCR_N "fpscr[31,1]" +@define FPSCR_Z "fpscr[30,1]" +@define FPSCR_C "fpscr[29,1]" +@define FPSCR_V "fpscr[28,1]" + +@if defined(VFPv2) || defined(VFPv3) || defined(SIMD) + define register offset=0x00B0 size=4 [ fpsid fpscr fpexc mvfr0 mvfr1 ]; +@endif +define register offset=0x0100 size=10 [ fp0 fp1 fp2 fp3 fp4 fp5 fp6 fp7 ]; # eight 80-bit floating registers + +# pseudo-registers for coprocessor calculations +define register offset=0x0200 size=4 [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 cr8 cr9 cr10 cr11 cr12 cr13 cr14 cr15 ]; + +# Advanced SIMD and VFP extension registers +@if defined(VFPv2) || defined(VFPv3) + +@if ENDIAN == "little" + define register offset=0x0300 size=4 [ s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 + s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 s27 s28 s29 s30 s31 ]; +@else # ENDIAN == "big" + define register offset=0x0300 size=4 [ s31 s30 s29 s28 s27 s26 s25 s24 s23 s22 s21 s20 s19 s18 s17 s16 + s15 s14 s13 s12 s11 s10 s9 s8 s7 s6 s5 s4 s3 s2 s1 s0 ]; +@endif # ENDIAN = "big" + +@endif # VFPv2 || VFPv3 + +@if defined(VFPv2) + +@if ENDIAN == "little" + define register offset=0x0300 size=8 [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 ]; +@else # ENDIAN == "big" + define register offset=0x0300 size=8 [ d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0 ]; +@endif # ENDIAN = "big" + +@endif # VFPv2 + +@if defined(SIMD) || defined(VFPv3) + +@if ENDIAN == "little" + define register offset=0x0300 size=8 [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 + d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 ]; +@else # ENDIAN == "big" + define register offset=0x0300 size=8 [ d31 d30 d29 d28 d27 d26 d25 d24 d23 d22 d21 d20 d19 d18 d17 d16 + d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0 ]; +@endif # ENDIAN = "big" + +@endif # SIMD || VFPv3 + +@if defined(SIMD) + +@if ENDIAN == "little" + define register offset=0x0300 size=16 [ q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 ]; +@else # ENDIAN == "big" + define register offset=0x0300 size=16 [ q15 q14 q13 q12 q11 q10 q9 q8 q7 q6 q5 q4 q3 q2 q1 q0 ]; +@endif # ENDIAN = "big" + +@endif # SIMD + +# Define context bits +# WARNING: when adjusting context keep compiler packing in mind +# and make sure fields do not span a 32-bit boundary before or +# after context packing +define register offset=0x00 size=8 contextreg; +define context contextreg +@ifdef T_VARIANT + TMode = (0,0) # 1 if in Thumb instruction decode mode + T = (0,0) # exact copy (alias!) of TMode + LowBitCodeMode = (0,0) # 1 if low bit of instruction address is set on a branch + ISA_MODE = (0,0) # 1 for Thumb instruction decode mode +@endif + LRset = (1,1) noflow # 1 if the instruction right before was a mov lr,pc + REToverride = (2,2) noflow # 1 if the instruction should be a branch not a return + CALLoverride = (3,3) noflow # 1 if the call should actually be a jump +@if defined(VERSION_6T2) || defined(VERSION_7) + TEEMode = (4,4) # 1 if in ThumbEE mode, changes some instruction behavior, and makes some instructions invalid + condit = (5,13) noflow # both base and shift + cond_mask = (10,13) # base condition + cond_full = (6,9) # full condition + cond_true = (9,9) # true if this condition should be tested for true + cond_base = (6,8) # shift mask for controlling shift + cond_shft = (9,13) # mask and lower bit of it condition field + itmode = (5,5) # true if in ITBlock mode + +@endif + + # Transient context bits + counter = (14,18) # 0 to 7 counter (for building variable length register lists) +# dreg = (17,21) # D register (attached, for building register lists) +# sreg = (17,21) # S register (attached, for building register lists) + regNum = (19,23) # D register number (see dreg) + counter2 = (24,26) # 0 to 7 counter (for building variable length register lists) +# dreg2 = (25,29) # 2nd D register (attached, for building register lists) +# sreg2 = (25,29) # 2nd S register (attached, for building register lists) + reg2Num = (27,31) # 2nd D register number (see dreg2) +# --- do not allow any field to span 32-bit boundary --- + regInc = (32,33) # Pair register increment + ARMcond = (34,34) # ARM conditional instruction + ARMcondCk = (35,35) # Finished ARM condition check phase +; + +define pcodeop count_leading_zeroes; +define pcodeop coprocessor_function; +define pcodeop coprocessor_function2; +define pcodeop coprocessor_load; +define pcodeop coprocessor_load2; +define pcodeop coprocessor_loadlong; +define pcodeop coprocessor_loadlong2; +define pcodeop coprocessor_moveto; +define pcodeop coprocessor_moveto2; +define pcodeop coprocessor_movefromRt; +define pcodeop coprocessor_movefromRt2; +define pcodeop coprocessor_movefrom2; +define pcodeop coprocessor_store; +define pcodeop coprocessor_store2; +define pcodeop coprocessor_storelong; +define pcodeop coprocessor_storelong2; +define pcodeop software_interrupt; +define pcodeop software_bkpt; +define pcodeop software_udf; +define pcodeop software_hlt; +define pcodeop software_hvc; +define pcodeop software_smc; + +# CPS methods (Version 6) +define pcodeop setUserMode; +define pcodeop setFIQMode; +define pcodeop setIRQMode; +define pcodeop setSupervisorMode; +define pcodeop setMonitorMode; +define pcodeop setAbortMode; +define pcodeop setUndefinedMode; +define pcodeop setSystemMode; +define pcodeop enableIRQinterrupts; +define pcodeop enableFIQinterrupts; +define pcodeop enableDataAbortInterrupts; +define pcodeop disableIRQinterrupts; +define pcodeop disableFIQinterrupts; +define pcodeop isFIQinterruptsEnabled; +define pcodeop isIRQinterruptsEnabled; +define pcodeop disableDataAbortInterrupts; +define pcodeop hasExclusiveAccess; +define pcodeop isCurrentModePrivileged; +define pcodeop setThreadModePrivileged; +define pcodeop isThreadMode; + +define pcodeop jazelle_branch; +define pcodeop ClearExclusiveLocal; +define pcodeop HintDebug; + +define pcodeop DataMemoryBarrier; +define pcodeop DataSynchronizationBarrier; + +define pcodeop secureMonitorCall; + +define pcodeop WaitForEvent; +define pcodeop WaitForInterrupt; + +define pcodeop HintYield; +define pcodeop InstructionSynchronizationBarrier; + +define pcodeop HintPreloadData; +define pcodeop HintPreloadDataForWrite; +define pcodeop HintPreloadInstruction; + +define pcodeop SignedSaturate; +define pcodeop SignedDoesSaturate; +define pcodeop UnsignedSaturate; +define pcodeop UnsignedDoesSaturate; +define pcodeop Absolute; +define pcodeop ReverseBitOrder; +define pcodeop SendEvent; +define pcodeop setEndianState; + +macro affectflags() { + CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; +} + +macro affect_resflags() { + ZR = tmpZR; NG = tmpNG; +} + +macro SetThumbMode(value) { + ISAModeSwitch = value; + TB = ISAModeSwitch; +} + +# +# simple branch, not inter-working +macro BranchWritePC(addr) { + pc = addr; +} + +# +# Interworking branch, ARM<->Thumb +macro BXWritePC(addr) { + SetThumbMode((addr & 0x1) != 0); + local tmp = addr & 0xfffffffe; + pc = tmp; +} + +# +# Branch depends on version +macro LoadWritePC(addr) { +@if defined(VERSION_5) + BXWritePC(addr); +@else + BranchWritePC(addr); +@endif +} + +# Branch depends on version +macro ALUWritePC(addr) { +@if defined(VERSION_7) + BXWritePC(addr); +@else + BranchWritePC(addr); +@endif +} + +@if defined(T_VARIANT) + +ItCond: is TMode=1 { } +CheckInIT_CZNO: is TMode=1 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } # in older, arms always affect flags +CheckInIT_CZN: is TMode=1 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; } # in older, arms always affect flags +CheckInIT_ZN: is TMode=1 { ZR = tmpZR; NG = tmpNG; } # in older, arms always affect flags + +@endif + +@if defined(VERSION_6T2) || defined(VERSION_7) + +# conditionals for instruction following IT Block +thfcc: "eq" is cond_full=0 { local tmp:1 = (ZR!=0); export tmp; } +thfcc: "ne" is cond_full=1 { local tmp:1 = (ZR==0); export tmp; } +thfcc: "cs" is cond_full=2 { local tmp:1 = (CY!=0); export tmp; } +thfcc: "cc" is cond_full=3 { local tmp:1 = (CY==0); export tmp; } +thfcc: "mi" is cond_full=4 { local tmp:1 = (NG!=0); export tmp; } +thfcc: "pl" is cond_full=5 { local tmp:1 = (NG==0); export tmp; } +thfcc: "vs" is cond_full=6 { local tmp:1 = (OV!=0); export tmp; } +thfcc: "vc" is cond_full=7 { local tmp:1 = (OV==0); export tmp; } +thfcc: "hi" is cond_full=8 { local tmp:1 = CY && !ZR; export tmp; } +thfcc: "ls" is cond_full=9 { local tmp:1 = !CY || ZR; export tmp; } +thfcc: "ge" is cond_full=10 { local tmp:1 = (NG == OV); export tmp; } +thfcc: "lt" is cond_full=11 { local tmp:1 = (NG != OV); export tmp; } +thfcc: "gt" is cond_full=12 { local tmp:1 = !ZR && (NG == OV); export tmp; } +thfcc: "le" is cond_full=13 { local tmp:1 = ZR || (NG != OV); export tmp; } +thfcc: "al" is cond_full=14 { local tmp:1 = 1; export tmp; } #can happen +#thfcc: "nv" is cond_full=15 { local tmp:1 = 0; export tmp; } #unpredictable, shouldn't happen + + +# no ITcondition +ItCond: is TMode=1 & itmode=0 & cond_mask=0 {} +# ITBlock then/else case - the condition being tested is modified by the shift below +ItCond: "."thfcc is TMode=1 & itmode=0 & cond_mask & thfcc [ itmode=1; globalset(inst_next,condit);] + { if (!thfcc) goto inst_next; } + +# last ITBlock then/else case - the condition being tested is modified by the shift below +ItCond: "."thfcc is TMode=1 & itmode=0 & cond_mask=8 & thfcc + { if (!thfcc) goto inst_next; } + +# certain Thumb instructions don't affect all flags in the IT block +CheckInIT_CZNO: is TMode=1 & itmode=1 & cond_mask { } # Do nothing to the flag bits +CheckInIT_CZNO: is TMode=1 & itmode=0 & cond_mask { } # Do nothing to the flag bits +CheckInIT_CZNO: "s" is TMode=1 & itmode=0 & cond_mask=0 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } + +CheckInIT_CZN: is TMode=1 & itmode=1 & cond_mask { } # Do nothing to the flag bits +CheckInIT_CZN: is TMode=1 & itmode=0 & cond_mask { } # Do nothing to the flag bits +CheckInIT_CZN: "s" is TMode=1 & itmode=0 & cond_mask=0 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; } + +CheckInIT_ZN: is TMode=1 & itmode=1 & cond_mask { } # Do nothing to the flag bits +CheckInIT_ZN: is TMode=1 & itmode=0 & cond_mask { } # Do nothing to the flag bits +CheckInIT_ZN: "s" is TMode=1 & itmode=0 & cond_mask=0 { ZR = tmpZR; NG = tmpNG; } + + +:^instruction is itmode=1 & cond_mask=8 & instruction [ condit=0; ] {} +:^instruction is itmode=1 & cond_mask & instruction [ cond_shft=cond_shft << 1; itmode=0; ]{} + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + +@include "ARMinstructions.sinc" + +# THUMB instructions +@ifdef T_VARIANT +@include "ARMTHUMBinstructions.sinc" +@endif diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM4_be.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM4_be.slaspec new file mode 100644 index 00000000..ef190b18 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM4_be.slaspec @@ -0,0 +1,5 @@ + +@define ENDIAN "big" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM4_le.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM4_le.slaspec new file mode 100644 index 00000000..ee66cfc6 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM4_le.slaspec @@ -0,0 +1,5 @@ + +@define ENDIAN "little" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM4t_be.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM4t_be.slaspec new file mode 100644 index 00000000..1e56f79d --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM4t_be.slaspec @@ -0,0 +1,6 @@ + +@define ENDIAN "big" +@define T_VARIANT "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM4t_le.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM4t_le.slaspec new file mode 100644 index 00000000..712b6367 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM4t_le.slaspec @@ -0,0 +1,6 @@ + +@define ENDIAN "little" +@define T_VARIANT "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM5_be.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM5_be.slaspec new file mode 100644 index 00000000..c490b017 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM5_be.slaspec @@ -0,0 +1,7 @@ + +@define ENDIAN "big" +@define VERSION_5 "" +@define VERSION_5E "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM5_le.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM5_le.slaspec new file mode 100644 index 00000000..48f9cf7a --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM5_le.slaspec @@ -0,0 +1,7 @@ + +@define ENDIAN "little" +@define VERSION_5 "" +@define VERSION_5E "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM5t_be.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM5t_be.slaspec new file mode 100644 index 00000000..dbd9cc1c --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM5t_be.slaspec @@ -0,0 +1,9 @@ + +@define ENDIAN "big" +@define T_VARIANT "" +@define VERSION_5 "" +@define VERSION_5E "" + +@include "ARM.sinc" + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM5t_le.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM5t_le.slaspec new file mode 100644 index 00000000..ae5bc1d0 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM5t_le.slaspec @@ -0,0 +1,8 @@ + +@define ENDIAN "little" +@define T_VARIANT "" +@define VERSION_5 "" +@define VERSION_5E "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM6_be.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM6_be.slaspec new file mode 100644 index 00000000..02983a4c --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM6_be.slaspec @@ -0,0 +1,12 @@ + +@define ENDIAN "big" +@define T_VARIANT "" +@define VERSION_5 "" +@define VERSION_5E "" +@define VERSION_6 "" +@define VERSION_6K "" +@define VERSION_6T2 "" +@define VFPv2 "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM6_le.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM6_le.slaspec new file mode 100644 index 00000000..f45a5101 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM6_le.slaspec @@ -0,0 +1,12 @@ + +@define ENDIAN "little" +@define T_VARIANT "" +@define VERSION_5 "" +@define VERSION_5E "" +@define VERSION_6 "" +@define VERSION_6K "" +@define VERSION_6T2 "" +@define VFPv2 "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM7_be.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM7_be.slaspec new file mode 100644 index 00000000..896066e7 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM7_be.slaspec @@ -0,0 +1,15 @@ + +@define ENDIAN "big" +@define T_VARIANT "" +@define VERSION_5 "" +@define VERSION_5E "" +@define VERSION_6 "" +@define VERSION_6K "" +@define VERSION_6T2 "" +@define VERSION_7 "" +@define VERSION_7M "" +@define SIMD "" +@define VFPv3 "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM7_le.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM7_le.slaspec new file mode 100644 index 00000000..e2df6c39 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM7_le.slaspec @@ -0,0 +1,15 @@ + +@define ENDIAN "little" +@define T_VARIANT "" +@define VERSION_5 "" +@define VERSION_5E "" +@define VERSION_6 "" +@define VERSION_6K "" +@define VERSION_6T2 "" +@define VERSION_7 "" +@define VERSION_7M "" +@define SIMD "" +@define VFPv3 "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM8_be.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM8_be.slaspec new file mode 100644 index 00000000..71536634 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM8_be.slaspec @@ -0,0 +1,16 @@ + +@define ENDIAN "big" +@define T_VARIANT "" +@define VERSION_5 "" +@define VERSION_5E "" +@define VERSION_6 "" +@define VERSION_6K "" +@define VERSION_6T2 "" +@define VERSION_7 "" +@define VERSION_7M "" +@define VERSION_8 "" +@define SIMD "" +@define VFPv3 "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM8_le.slaspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM8_le.slaspec new file mode 100644 index 00000000..6eda33d9 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM8_le.slaspec @@ -0,0 +1,16 @@ + +@define ENDIAN "little" +@define T_VARIANT "" +@define VERSION_5 "" +@define VERSION_5E "" +@define VERSION_6 "" +@define VERSION_6K "" +@define VERSION_6T2 "" +@define VERSION_7 "" +@define VERSION_7M "" +@define VERSION_8 "" +@define SIMD "" +@define VFPv3 "" + +@include "ARM.sinc" + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARMCortex.pspec b/src/third-party/sleigh/processors/ARM/data/languages/ARMCortex.pspec new file mode 100644 index 00000000..64222b6c --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARMCortex.pspec @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARMTHUMBinstructions.sinc b/src/third-party/sleigh/processors/ARM/data/languages/ARMTHUMBinstructions.sinc new file mode 100644 index 00000000..daf70573 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARMTHUMBinstructions.sinc @@ -0,0 +1,5294 @@ +# Specification for the THUMB Version 2 +# This closely follows +# "Architecture Reference Manual" Second Edition Edited by David Seal + +# +# WARNING NOTE: Be very careful taking a subpiece or truncating a register with :# or (#) +# The LEBE hybrid language causes endian issues if you do not assign the register to a temp +# variable and then take a subpiece or truncate. +# + +define token instr2 (16) + part2op=(11,15) # this second instruction token is needed for the + part2J1=(13,13) + part2J2=(11,11) + part2cond=(6,9) + part2imm6=(0,5) + part2S=(10,10) + part2imm11=(0,10) + part2imm10=(0,9) + part2off=(0,10) # bl and blx instructions which use 2 16-bit instructions + part2off_10=(1,10) # blx instruction which switches to ARM mode + part2c1415=(14,15) + part2c1212=(12,12) + part2c0615=(6,15) + part2Rt=(12,15) + part2c0011=(0,11) + part2c0909=(9,9) + part2c0808=(8,8) + part2c0707=(7,7) + part2c0505=(5,5) + part2c0404=(4,4) + part2Rd0003=(0,3) +; + +define token instrThumb (16) + op4=(4,15) + op6=(6,15) + op7=(7,15) + op8=(8,15) + op9=(9,15) + op11=(11,15) + op12=(12,15) + op0=(0,15) + sop0407=(4,7) + sop0507=(5,7) + sop0508=(5,8) + sop0003=(0,3) + sop0608=(6,8) + sop0610=(6,10) + + sopit=(0,7) + + Ra1215=(12,15) + Rd0002=(0,2) + Rd0003=(0,3) + Rd0810=(8,10) + Rd0811=(8,11) + Rn0002=(0,2) + Rn0003=(0,3) + Rn0305=(3,5) + Rn0810=(8,10) + Rm0305=(3,5) + Rm0306=(3,6) + Rm0608=(6,8) + Rm0003=(0,3) + Rs0305=(3,5) + Rt1215=(12,15) + Rt0811=(8,11) + + thI9=(9,9) + thP8=(8,8) + thH8=(8,8) + thL8=(8,8) + thU7=(7,7) + thB6=(6,6) + thN6=(6,6) + thS6=(6,6) + thW5=(5,5) + thL4=(4,4) + + thCRd=(12,15) + thCRn=(0,3) + thCRm=(0,3) + + hrn0002=(0,2) + hrm0305=(3,5) + rm0306=(3,6) + hrd0002=(0,2) + + immed3=(6,8) + immed5=(6,10) + immed6=(0,5) + immed7=(0,6) + immed8=(0,7) + + immed12_i=(10,10) + immed12_imm3=(12,14) + immed12_imm8=(0,7) + + soffset8=(0,7) signed + offset10=(0,9) + offset10S=(10,10) + offset11=(0,10) + soffset11=(0,10) signed + offset12=(0,11) + + thcond=(8,11) + thcpn=(8,11) + thopcode1=(4,7) + thopcode2=(5,7) + l07=(7,7) + l11=(11,11) + h1=(7,7) + h2=(6,6) + R=(8,8) + sbz=(0,2) + thwbit=(5,5) + + th_psrmask=(8,11) + + addr_pbit=(10,10) + addr_ubit=(9,9) + addr_wbit=(8,8) + addr_puw =(8,10) + addr_puw1 =(5,8) + + thsrsMode=(0,4) + + fcond=(4,7) + + throt=(4,6) + + imm3_12=(12,14) + + imm3_shft=(12,14) + imm2_shft=(6,7) + + imm5=(3,7) + + sysm=(0,7) + sysm37=(3,7) + sysm02=(0,2) + + + thc0001=(0,1) + thc0002=(0,2) + thc0003=(0,3) + thc0004=(0,4) + thc0005=(0,5) + thc0006=(0,6) + thc0007=(0,7) + thc0011=(0,11) + thc0107=(1,7) + thc0207=(2,7) + thc0307=(3,7) + thc0407=(4,7) + thc0405=(4,5) + thc0409=(4,9) + thc0506=(5,6) + thc0507=(5,7) + thc0607=(6,7) + thc0810=(8,10) + thc0811=(8,11) + thc0910=(9,10) + thc1414=(14,14) + thc1313=(13,13) + thc1212=(12,12) + thc1214=(12,14) + thc1111=(11,11) + thc1010=(10,10) + thc0909=(9,9) + thc0808=(8,8) + thc0707=(7,7) + thc0606=(6,6) + thc0505=(5,5) + thc0404=(4,4) + thc0303=(3,3) + thc0202=(2,2) + thc0101=(1,1) + thc0000=(0,0) + thc0115=(1,15) + thc0215=(2,15) + thc0315=(3,15) + thc0415=(4,15) + thc0515=(5,15) + thc0615=(6,15) + thc0715=(7,15) + thc0815=(8,15) + thc0915=(9,15) + thc1015=(10,15) + thc1112=(11,12) + thc1115=(11,15) + thc1215=(12,15) + thc1315=(13,15) + thc1415=(14,15) + thc1515=(15,15) +; + +attach variables [ Rd0002 Rd0810 Rn0002 Rn0305 Rn0810 Rm0305 Rm0608 Rs0305 ] + [ r0 r1 r2 r3 r4 r5 r6 r7 ]; + +attach variables [ Rm0003 Rm0306 Rd0811 Rn0003 Rt1215 Rt0811 Ra1215 Rd0003 part2Rt part2Rd0003 ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; + +attach variables [ thCRn thCRd thCRm ] [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 cr8 cr9 cr10 cr11 cr12 cr13 cr14 cr15 ]; + +attach names [ thcpn ] [ p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 ]; + +attach variables [ hrn0002 hrm0305 hrd0002 ] + [ r8 r9 r10 r11 r12 sp lr pc ]; + +macro th_addflags(op1,op2) { + tmpCY = carry(op1,op2); + tmpOV = scarry(op1,op2); +} + +#See ARM Architecture reference section "Pseudocode details of addition and subtraction" +macro th_add_with_carry_flags(op1,op2){ + local CYz = zext(CY); + local result = op1 + op2; + tmpCY = carry( op1, op2 ) || carry( result, CYz ); + tmpOV = scarry( op1, op2) ^^ scarry( result, CYz ); +} + +#Note: used for subtraction op1 - (op2 + !CY) +#sets tmpCY if there is NO borrow +macro th_sub_with_carry_flags(op1, op2){ + local result = op1 - op2; + tmpCY = (op1 > op2) || (result < zext(CY)); + tmpOV = sborrow(op1,op2) ^^ sborrow(result,zext(!CY)); +} + + +macro th_test_flags(result){ + ZR = (result == 0); + NG = (result s< 0); + CY = shift_carry; +} + +# Note (unlike x86) carry flag is SET if there is NO borrow +macro th_subflags(op1,op2) { + tmpCY = op2 <= op1; + tmpOV = sborrow(op1,op2); +} +macro th_subflags0(op2) { + tmpCY = op2 == 0; + tmpOV = sborrow(0,op2); +} + + + +macro resflags(result) { + tmpNG = result s< 0; + tmpZR = result == 0; +} + +macro th_logicflags() { + tmpCY = shift_carry; + tmpOV = OV; +} + +macro th_affectflags() { + CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; +} + +############################################################################### + +# conditionals for the branch instruction + +thcc: "eq" is thcond=0 { tmp:1 = (ZR!=0); export tmp; } +thcc: "ne" is thcond=1 { tmp:1 = (ZR==0); export tmp; } +thcc: "cs" is thcond=2 { tmp:1 = (CY!=0); export tmp; } +thcc: "cc" is thcond=3 { tmp:1 = (CY==0); export tmp; } +thcc: "mi" is thcond=4 { tmp:1 = (NG!=0); export tmp; } +thcc: "pl" is thcond=5 { tmp:1 = (NG==0); export tmp; } +thcc: "vs" is thcond=6 { tmp:1 = (OV!=0); export tmp; } +thcc: "vc" is thcond=7 { tmp:1 = (OV==0); export tmp; } +thcc: "hi" is thcond=8 { tmp:1 = CY && !ZR; export tmp; } +thcc: "ls" is thcond=9 { tmp:1 = !CY || ZR; export tmp; } +thcc: "ge" is thcond=10 { tmp:1 = (NG == OV); export tmp; } +thcc: "lt" is thcond=11 { tmp:1 = (NG != OV); export tmp; } +thcc: "gt" is thcond=12 { tmp:1 = !ZR && (NG == OV); export tmp; } +thcc: "le" is thcond=13 { tmp:1 = ZR || (NG != OV); export tmp; } +# thcc: "AL" is thcond=14 { tmp = 1; export tmp; } +# thcc: "NV" is thcond=15 { tmp = 0; export tmp; } + +@define THCC "thcc & (thc1515=0 | thc1414=0 | thc1313=0)" + + +@if defined(VERSION_6T2) || defined(VERSION_7) + +part2thcc: "eq" is part2cond=0 { tmp:1 = (ZR!=0); export tmp; } +part2thcc: "ne" is part2cond=1 { tmp:1 = (ZR==0); export tmp; } +part2thcc: "cs" is part2cond=2 { tmp:1 = (CY!=0); export tmp; } +part2thcc: "cc" is part2cond=3 { tmp:1 = (CY==0); export tmp; } +part2thcc: "mi" is part2cond=4 { tmp:1 = (NG!=0); export tmp; } +part2thcc: "pl" is part2cond=5 { tmp:1 = (NG==0); export tmp; } +part2thcc: "vs" is part2cond=6 { tmp:1 = (OV!=0); export tmp; } +part2thcc: "vc" is part2cond=7 { tmp:1 = (OV==0); export tmp; } +part2thcc: "hi" is part2cond=8 { tmp:1 = CY && !ZR; export tmp; } +part2thcc: "ls" is part2cond=9 { tmp:1 = !CY || ZR; export tmp; } +part2thcc: "ge" is part2cond=10 { tmp:1 = (NG == OV); export tmp; } +part2thcc: "lt" is part2cond=11 { tmp:1 = (NG != OV); export tmp; } +part2thcc: "gt" is part2cond=12 { tmp:1 = !ZR && (NG == OV); export tmp; } +part2thcc: "le" is part2cond=13 { tmp:1 = ZR || (NG != OV); export tmp; } +# part2thcc: "AL" is part2cond=14 { tmp = 1; export tmp; } +# part2thcc: "NV" is part2cond=15 { tmp = 0; export tmp; } + +@define PART2THCC "part2thcc & (part2c0909=0 | part2c0808=0 | part2c0707=0)" + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + + +@if defined(VERSION_6T2) || defined(VERSION_7) + +# conditionals for IT Block +# Marvel at the UGLINESS: the p-code for pairs (eq,ne) (cs,cc) (mi,pl), etc. are the same +# The IT block decoding fills in the complement (if necessary) based on the IT mask bit for the instruction +it_thfcc: "eq" is fcond=0 { tmp:1 = (ZR!=0); export tmp; } +it_thfcc: "ne" is fcond=1 { tmp:1 = (ZR!=0); export tmp; } +it_thfcc: "cs" is fcond=2 { tmp:1 = (CY!=0); export tmp; } +it_thfcc: "cc" is fcond=3 { tmp:1 = (CY!=0); export tmp; } +it_thfcc: "mi" is fcond=4 { tmp:1 = (NG!=0); export tmp; } +it_thfcc: "pl" is fcond=5 { tmp:1 = (NG!=0); export tmp; } +it_thfcc: "vs" is fcond=6 { tmp:1 = (OV!=0); export tmp; } +it_thfcc: "vc" is fcond=7 { tmp:1 = (OV!=0); export tmp; } +it_thfcc: "hi" is fcond=8 { tmp:1 = CY && !ZR; export tmp; } +it_thfcc: "ls" is fcond=9 { tmp:1 = CY && !ZR; export tmp; } +it_thfcc: "ge" is fcond=10 { tmp:1 = (NG == OV); export tmp; } +it_thfcc: "lt" is fcond=11 { tmp:1 = (NG == OV); export tmp; } +it_thfcc: "gt" is fcond=12 { tmp:1 = !ZR && (NG == OV); export tmp; } +it_thfcc: "le" is fcond=13 { tmp:1 = !ZR && (NG == OV); export tmp; } +it_thfcc: "al" is fcond=14 { tmp:1 = 1; export tmp; } + +@define IT_THFCC "it_thfcc & (thc0707=0 | thc0606=0 | thc0505=0 | thc0404=0)" + +ByteRotate: "#"^rot is throt [rot = throt << 3; ] { export *[const]:1 rot; } + +thSBIT_CZNO: is thc0404=0 { } # Do nothing to the flag bits +thSBIT_CZNO: "s" is thc0404=1 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } +thSBIT_CZN: is thc0404=0 { } # Do nothing to the flags bits +thSBIT_CZN: "s" is thc0404=1 {CY = tmpCY; ZR = tmpZR; NG = tmpNG;} +thSBIT_ZN: is thc0404=0 { } # Do nothing to the flag bits +thSBIT_ZN: "s" is thc0404=1 { ZR = tmpZR; NG = tmpNG; } + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + + +# Addressing modes +# The capitalized fields are raw register addressing modes + +Hrd0002: Rd0002 is Rd0002 & h1=0 { export Rd0002; } +Hrd0002: hrd0002 is hrd0002 & h1=1 { export hrd0002; } +Hrd0002: pc is pc & hrd0002=7 & h1=1 { tmp:4 = inst_start + 4; export tmp; } + +Hrn0002: Rn0002 is Rn0002 & h1=0 { export Rn0002; } +Hrn0002: hrn0002 is hrn0002 & h1=1 { export hrn0002; } +Hrn0002: pc is pc & hrn0002=7 & h1=1 { tmp:4 = inst_start + 4; export tmp; } + +Hrm0305: Rm0305 is Rm0305 & h2=0 { export Rm0305; } +Hrm0305: hrm0305 is hrm0305 & h2=1 { export hrm0305; } +Hrm0305: pc is pc & hrm0305=7 & h2=1 { tmp:4 = inst_start + 4; export tmp; } + +@if defined(VERSION_6T2) || defined(VERSION_7) +Immed8_4: "#"^immval is immed8 [ immval = immed8 * 4; ] { export *[const]:4 immval; } +Immed4: "#"^thc0003 is thc0003 { export *[const]:4 thc0003; } +@endif + +Immed8: "#"^immed8 is immed8 { export *[const]:4 immed8; } +Immed3: "#"^immed3 is immed3 { export *[const]:4 immed3; } + +Pcrel8: [reloc] is immed8 + [ reloc = ((inst_start+4) $and 0xfffffffc) + 4*immed8; ] +{ + # don't export as an address, may be PIC code, and would add spurious symbols. + export *[const]:4 reloc; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +Pcrel8_s8: [reloc] is immed8 + [ reloc = ((inst_start+4) $and 0xfffffffc) + 4*immed8; ] +{ + export *:8 reloc; +} +@endif # defined(VERSION_6T2) || defined(VERSION_7) + + +Sprel8: sp,"#"^immval is sp & immed8 [ immval = immed8 * 4; ] { local tmp = sp + immval; export tmp; } +Immed7_4: "#"^immval is immed7 [ immval = immed7 * 4; ] { tmp:4 = immval; export tmp; } +Immed5: "#"^immed5 is immed5 { export *[const]:4 immed5; } + + +@if defined(VERSION_6T2) || defined(VERSION_7) + +Immed12: "#"^immed12 is immed12_i; immed12_imm3 & immed12_imm8 + [ immed12=(immed12_i<<11) | (immed12_imm3<<8) | (immed12_imm8); ] +{ + export *[const]:4 immed12; +} + +Immed16: "#"^immed16 is immed12_i & sop0003; immed12_imm3 & immed12_imm8 + [ immed16 = (sop0003 << 12) | (immed12_i<<11) | (immed12_imm3<<8) | (immed12_imm8); ] +{ + export *[const]:2 immed16; +} + +PcrelImmed12Addr: reloc is immed12_i; immed12_imm3 & immed12_imm8 + [ reloc = ((inst_start+4) $and 0xfffffffc) + ((immed12_i<<11) | (immed12_imm3<<8) | (immed12_imm8)); ] +{ + # don't export as an address, may be PIC code, and would add spurious symbols. + export *[const]:4 reloc; +} + +NegPcrelImmed12Addr: reloc is immed12_i; immed12_imm3 & immed12_imm8 + [ reloc = ((inst_start+4) $and 0xfffffffc) - ((immed12_i<<11) | (immed12_imm3<<8) | (immed12_imm8)); ] +{ + # don't export as an address, may be PIC code, and would add spurious symbols. + export *[const]:4 reloc; +} + +PcrelOffset12: [reloc] is thc0707=1; offset12 + [ reloc = ((inst_start+4) $and 0xfffffffc) + offset12; ] +{ + export *:4 reloc; +} +PcrelOffset12: [reloc] is thc0707=0; offset12 + [ reloc = ((inst_start+4) $and 0xfffffffc) - offset12; ] +{ + export *:4 reloc; +} + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + + +# decode thumb immediate12 encoded value + +@if defined(VERSION_6T2) || defined(VERSION_7) + +ThumbExpandImm12: "#"^imm32 is immed12_i=0 ; thc1414=0 & immed12_imm3=0 & immed12_imm8 + [ imm32=immed12_imm8 $and 0xff; ] +{ + tmp:4 = imm32; shift_carry = CY; export tmp; +} +ThumbExpandImm12: "#"^imm32 is immed12_i=0 ; thc1414=0 & immed12_imm3=1 & immed12_imm8 + [ imm32=(immed12_imm8<<16) | (immed12_imm8); ] +{ + tmp:4 = imm32; shift_carry = CY; export tmp; +} +ThumbExpandImm12: "#"^imm32 is immed12_i=0 ; thc1414=0 & immed12_imm3=2 & immed12_imm8 + [ imm32=(immed12_imm8<<24) | (immed12_imm8<<8); ] +{ + tmp:4 = imm32; shift_carry = CY; export tmp; +} +ThumbExpandImm12: "#"^imm32 is immed12_i=0 ; thc1414=0 & immed12_imm3=3 & immed12_imm8 + [ imm32=(immed12_imm8<<24) | (immed12_imm8<<16) | (immed12_imm8<<8) | (immed12_imm8); ] +{ + tmp:4 = imm32; shift_carry = CY; export tmp; +} +ThumbExpandImm12: "#"^imm32 is immed12_i=0 ; immed12_imm3 & thc0707 & immed7 + [ imm32=(((0x80+immed7)<<(32-((immed12_imm3<<1)|thc0707)))|((0x80+immed7)>>(((immed12_imm3<<1)|thc0707)))) $and 0xffffffff; ] +{ + tmp:4 = imm32; local tmp1 = (tmp >> 31); shift_carry = tmp1(0); export tmp; +} +ThumbExpandImm12: "#"^imm32 is immed12_i=1 ; immed12_imm3 & thc0707 & immed7 + [ imm32=(((0x80+immed7)<<(32-(16+((immed12_imm3<<1)|thc0707))))|((0x80+immed7)>>((16+((immed12_imm3<<1)|thc0707))))) $and 0xffffffff; ] +{ + tmp:4 = imm32; local tmp1 = (tmp >> 31); shift_carry = tmp1(0); export tmp; +} + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + +@if defined(VERSION_6T2) || defined(VERSION_7) + +thLsbImm: "#"^lsb is imm3_shft & imm2_shft [ lsb= (imm3_shft<<2) | imm2_shft; ] { tmp:4 = lsb; export tmp; } +thMsbImm: "#"^thc0004 is thc0004 { tmp:4 = thc0004; export tmp; } +thWidthMinus1: "#"^width is thc0004 [ width = thc0004 + 1; ] { tmp:4 = thc0004; export tmp; } +thBitWidth: "#"^w is imm3_shft & imm2_shft & thc0004 [ w = thc0004 - ((imm3_shft<<2) | imm2_shft) + 1; ] { tmp:4 = w; export tmp; } + +@endif # VERSION_6T2 || VERSION_7 + + +##################### +###### thshift2 ###### +##################### + +@if defined(VERSION_6T2) || defined(VERSION_7) + +thshift2: Rm0003 is imm3_shft=0 & imm2_shft=0 & thc0405=0 & Rm0003 +{ + shift_carry = CY; export Rm0003; +} + +thshift2: Rm0003, "lsl #"^shftval is imm3_shft & imm2_shft & thc0405=0 & Rm0003 + [ shftval=(imm3_shft<<2) | (imm2_shft); ] +{ + local tmp1=(Rm0003>>(32-shftval))&1; shift_carry=tmp1(0); local tmp2=Rm0003<>31); shift_carry=tmp1(0); tmp2:4=0; export tmp2; +} + +thshift2: Rm0003, "lsr #"^shftval is imm3_shft & imm2_shft & thc0405=1 & Rm0003 + [ shftval=(imm3_shft<<2) | (imm2_shft); ] +{ + local tmp1=(Rm0003>>(shftval-1))&1; shift_carry=tmp1(0); local tmp2=Rm0003>>shftval; export tmp2; +} + +thshift2: Rm0003, "asr #32" is imm3_shft=0 & imm2_shft=0 & thc0405=2 & Rm0003 +{ + local tmp1=(Rm0003>>31); shift_carry=tmp1(0); local tmp2 = Rm0003 s>> 32; export tmp2; +} + +thshift2: Rm0003, "asr #"^shftval is imm3_shft & imm2_shft & thc0405=2 & Rm0003 + [ shftval=(imm3_shft<<2) | (imm2_shft); ] +{ + local tmp1=(Rm0003>>(shftval-1))&1; shift_carry=tmp1(0); local tmp2=Rm0003 s>> shftval; export tmp2; +} + +thshift2: Rm0003, "rrx" is imm3_shft=0 & imm2_shft=0 & thc0405=3 & Rm0003 +{ + local tmp1=Rm0003&1; shift_carry=tmp1(0); local tmp2 = (zext(CY)<<31)|(Rm0003>>1); export tmp2; +} + +thshift2: Rm0003, "ror #"^shftval is imm3_shft & imm2_shft & thc0405=3 & Rm0003 + [ shftval=(imm3_shft<<2) | (imm2_shft); ] +{ + local tmp1=(Rm0003>>shftval)|(Rm0003<<(32-shftval)); local tmp2=tmp1 >> 31; shift_carry=tmp2(0); export tmp1; +} + +@endif # VERSION_6T2 || VERSION_7 + +Addr5: reloc is imm5 & thc0909 + [ reloc = inst_start + 4 + ((thc0909 << 6) | (imm5 << 1)); ] +{ + export *:4 reloc; +} + +Addr8: reloc is soffset8 + [ reloc = (inst_start+4) + 2*soffset8; ] +{ + export *:4 reloc; +} + +Addr11: reloc is soffset11 + [ reloc = (inst_start+4) + 2*soffset11; ] +{ + export *:4 reloc; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +ThAddr20: reloc is part2S=1 & part2imm6; part2J1 & part2J2 & part2imm11 + [ reloc = inst_start + 4 + ((-1 << 20) $or (part2J2 << 19) $or (part2J1 << 18) $or (part2imm6 << 12) $or (part2imm11 << 1)); ] +{ + export *:4 reloc; +} + +ThAddr20: reloc is part2S=0 & part2imm6; part2J1 & part2J2 & part2imm11 + [ reloc = inst_start + 4 + ((part2J2 << 19) $or (part2J1 << 18) $or (part2imm6 << 12) $or (part2imm11 << 1)); ] +{ + export *:4 reloc; +} + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + +ThAddr24: reloc is offset10S=0 & offset10; part2J1 & part2J2 & part2off + [ reloc = inst_start + 4 + (((part2J1 $xor 1) << 23) $or ((part2J2 $xor 1) << 22) $or (offset10 << 12) $or (part2off << 1)); ] +{ + export *:4 reloc; +} + +ThAddr24: reloc is offset10S=1 & offset10; part2J1 & part2J2 & part2off + [ reloc = inst_start + 4 + ((-1 << 24) $or (part2J1 << 23) $or (part2J2 << 22) $or (offset10 << 12) $or (part2off << 1)); ] +{ + export *:4 reloc; +} + +@if defined(VERSION_5) + +ThArmAddr23: reloc is offset10S=0 & offset10; part2J1 & part2J2 & part2off_10 + [ reloc = ((inst_start + 4) $and 0xfffffffc) + (((part2J1 $xor 1) << 23) $or ((part2J2 $xor 1) << 22) $or (offset10 << 12) $or (part2off_10 << 2)); ] +{ + export *:4 reloc; +} + +ThArmAddr23: reloc is offset10S=1 & offset10; part2J1 & part2J2 & part2off_10 + [ reloc = ((inst_start + 4) $and 0xfffffffc) + ((-1 << 24) $or (part2J1 << 23) $or (part2J2 << 22) $or (offset10 << 12) $or (part2off_10 << 2)); ] +{ + export *:4 reloc; +} + +@endif # VERSION_5 + + +Rn_exclaim: Rn0810 is Rn0810 & thc0810=0 & thc0000=1 { mult_addr = Rn0810; export Rn0810; } +Rn_exclaim: Rn0810 is Rn0810 & thc0810=1 & thc0101=1 { mult_addr = Rn0810; export Rn0810; } +Rn_exclaim: Rn0810 is Rn0810 & thc0810=2 & thc0202=1 { mult_addr = Rn0810; export Rn0810; } +Rn_exclaim: Rn0810 is Rn0810 & thc0810=3 & thc0303=1 { mult_addr = Rn0810; export Rn0810; } +Rn_exclaim: Rn0810 is Rn0810 & thc0810=4 & thc0404=1 { mult_addr = Rn0810; export Rn0810; } +Rn_exclaim: Rn0810 is Rn0810 & thc0810=5 & thc0505=1 { mult_addr = Rn0810; export Rn0810; } +Rn_exclaim: Rn0810 is Rn0810 & thc0810=6 & thc0606=1 { mult_addr = Rn0810; export Rn0810; } +Rn_exclaim: Rn0810 is Rn0810 & thc0810=7 & thc0707=1 { mult_addr = Rn0810; export Rn0810; } +Rn_exclaim: Rn0810! is Rn0810 & thc0810 { mult_addr = Rn0810; export Rn0810; } + +Rn_exclaim_WB: is Rn0810 & thc0810=0 & thc0000=1 { } +Rn_exclaim_WB: is Rn0810 & thc0810=1 & thc0101=1 { } +Rn_exclaim_WB: is Rn0810 & thc0810=2 & thc0202=1 { } +Rn_exclaim_WB: is Rn0810 & thc0810=3 & thc0303=1 { } +Rn_exclaim_WB: is Rn0810 & thc0810=4 & thc0404=1 { } +Rn_exclaim_WB: is Rn0810 & thc0810=5 & thc0505=1 { } +Rn_exclaim_WB: is Rn0810 & thc0810=6 & thc0606=1 { } +Rn_exclaim_WB: is Rn0810 & thc0810=7 & thc0707=1 { } +Rn_exclaim_WB: is Rn0810 & thc0810 { Rn0810 = mult_addr; } + +# ldlist is the list of registers to be loaded or popped +LdRtype0: " "^r0 is thc0000=1 & r0 & thc0107=0 { r0 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype0: " "^r0^"," is thc0000=1 & r0 { r0 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype0: is thc0000=0 { } +LdRtype1: LdRtype0 r1 is LdRtype0 & thc0101=1 & r1 & thc0207=0 { r1 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype1: LdRtype0 r1^"," is LdRtype0 & thc0101=1 & r1 { r1 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype1: LdRtype0 is LdRtype0 & thc0101=0 { } +LdRtype2: LdRtype1 r2 is LdRtype1 & thc0202=1 & r2 & thc0307=0 { r2 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype2: LdRtype1 r2^"," is LdRtype1 & thc0202=1 & r2 { r2 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype2: LdRtype1 is LdRtype1 & thc0202=0 { } +LdRtype3: LdRtype2 r3 is LdRtype2 & thc0303=1 & r3 & thc0407=0 { r3 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype3: LdRtype2 r3^"," is LdRtype2 & thc0303=1 & r3 { r3 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype3: LdRtype2 is LdRtype2 & thc0303=0 { } +LdRtype4: LdRtype3 r4 is LdRtype3 & thc0404=1 & r4 & thc0507=0 { r4 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype4: LdRtype3 r4^"," is LdRtype3 & thc0404=1 & r4 { r4 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype4: LdRtype3 is LdRtype3 & thc0404=0 { } +LdRtype5: LdRtype4 r5 is LdRtype4 & thc0505=1 & r5 & thc0607=0 { r5 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype5: LdRtype4 r5^"," is LdRtype4 & thc0505=1 & r5 { r5 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype5: LdRtype4 is LdRtype4 & thc0505=0 { } +LdRtype6: LdRtype5 r6 is LdRtype5 & thc0606=1 & r6 & thc0707=0 { r6 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype6: LdRtype5 r6^"," is LdRtype5 & thc0606=1 & r6 { r6 = *mult_addr; mult_addr = mult_addr + 4; } +LdRtype6: LdRtype5 is LdRtype5 & thc0606=0 { } +ldlist: LdRtype6 r7 is LdRtype6 & thc0707=1 & r7 { r7 = *mult_addr; mult_addr = mult_addr + 4; } +ldlist: LdRtype6 is LdRtype6 & thc0707=0 { } + +#strlist is the list of registers to be stored +StrType0: " "^r0 is thc0000=1 & r0 & thc0107=0 { *mult_addr = r0; mult_addr = mult_addr + 4; } +StrType0: " "^r0^"," is thc0000=1 & r0 { *mult_addr = r0; mult_addr = mult_addr + 4; } +StrType0: is thc0000=0 { } +StrType1: StrType0 r1 is StrType0 & thc0101=1 & r1 & thc0207=0 { *mult_addr = r1; mult_addr = mult_addr + 4; } +StrType1: StrType0 r1^"," is StrType0 & thc0101=1 & r1 { *mult_addr = r1; mult_addr = mult_addr + 4; } +StrType1: StrType0 is StrType0 & thc0101=0 { } +StrType2: StrType1 r2 is StrType1 & thc0202=1 & r2 & thc0307=0 { *mult_addr = r2; mult_addr = mult_addr + 4; } +StrType2: StrType1 r2^"," is StrType1 & thc0202=1 & r2 { *mult_addr = r2; mult_addr = mult_addr + 4; } +StrType2: StrType1 is StrType1 & thc0202=0 { } +StrType3: StrType2 r3 is StrType2 & thc0303=1 & r3 & thc0407=0 { *mult_addr = r3; mult_addr = mult_addr + 4; } +StrType3: StrType2 r3^"," is StrType2 & thc0303=1 & r3 { *mult_addr = r3; mult_addr = mult_addr + 4; } +StrType3: StrType2 is StrType2 & thc0303=0 { } +StrType4: StrType3 r4 is StrType3 & thc0404=1 & r4 & thc0507=0 { *mult_addr = r4; mult_addr = mult_addr + 4; } +StrType4: StrType3 r4^"," is StrType3 & thc0404=1 & r4 { *mult_addr = r4; mult_addr = mult_addr + 4; } +StrType4: StrType3 is StrType3 & thc0404=0 { } +StrType5: StrType4 r5 is StrType4 & thc0505=1 & r5 & thc0607=0 { *mult_addr = r5; mult_addr = mult_addr + 4; } +StrType5: StrType4 r5^"," is StrType4 & thc0505=1 & r5 { *mult_addr = r5; mult_addr = mult_addr + 4; } +StrType5: StrType4 is StrType4 & thc0505=0 { } +StrType6: StrType5 r6 is StrType5 & thc0606=1 & r6 & thc0707=0 { *mult_addr = r6; mult_addr = mult_addr + 4; } +StrType6: StrType5 r6^"," is StrType5 & thc0606=1 & r6 { *mult_addr = r6; mult_addr = mult_addr + 4; } +StrType6: StrType5 is StrType5 & thc0606=0 { } +StrType7: StrType6 r7 is StrType6 & thc0707=1 & r7 { *mult_addr = r7; mult_addr = mult_addr + 4; } +StrType7: StrType6 is StrType6 & thc0707=0 { } +strlist: StrType7 is StrType7 { } + +# pshlist is the list registers to be pushed to memory +# SCR 10921, fix the order in which the regs appear in the disassembled insn, to be in line with objdump +# Also add commas between regs +# +PshType7: "" is thc0707=0 { } +PshType7: r7 is thc0707=1 & r7 { mult_addr = mult_addr - 4; *mult_addr = r7; } +PshType6: PshType7 is PshType7 & thc0606=0 { } +PshType6: r6 is PshType7 & thc0606=1 & r6 & thc0707=0 { mult_addr = mult_addr - 4; *mult_addr = r6; } +PshType6: r6^"," PshType7 is PshType7 & thc0606=1 & r6 { mult_addr = mult_addr - 4; *mult_addr = r6; } +PshType5: PshType6 is PshType6 & thc0505=0 { } +PshType5: r5 is PshType6 & thc0505=1 & r5 & thc0607=0 { mult_addr = mult_addr - 4; *mult_addr = r5; } +PshType5: r5^"," PshType6 is PshType6 & thc0505=1 & r5 { mult_addr = mult_addr - 4; *mult_addr = r5; } +PshType4: PshType5 is PshType5 & thc0404=0 { } +PshType4: r4 is PshType5 & thc0404=1 & r4 & thc0507=0 { mult_addr = mult_addr - 4; *mult_addr = r4; } +PshType4: r4^"," PshType5 is PshType5 & thc0404=1 & r4 { mult_addr = mult_addr - 4; *mult_addr = r4; } +PshType3: PshType4 is PshType4 & thc0303=0 { } +PshType3: r3 is PshType4 & thc0303=1 & r3 & thc0407=0 { mult_addr = mult_addr - 4; *mult_addr = r3; } +PshType3: r3^"," PshType4 is PshType4 & thc0303=1 & r3 { mult_addr = mult_addr - 4; *mult_addr = r3; } +PshType2: PshType3 is PshType3 & thc0202=0 { } +PshType2: r2 is PshType3 & thc0202=1 & r2 & thc0307=0 { mult_addr = mult_addr - 4; *mult_addr = r2; } +PshType2: r2^"," PshType3 is PshType3 & thc0202=1 & r2 { mult_addr = mult_addr - 4; *mult_addr = r2; } +PshType1: PshType2 is PshType2 & thc0101=0 { } +PshType1: r1 is PshType2 & thc0101=1 & r1 & thc0207=0 { mult_addr = mult_addr - 4; *mult_addr = r1; } +PshType1: r1^"," PshType2 is PshType2 & thc0101=1 & r1 { mult_addr = mult_addr - 4; *mult_addr = r1; } +pshlist: PshType1 is PshType1 & thc0000=0 { } +pshlist: r0 is PshType1 & thc0000=1 & r0 & thc0107=0 { mult_addr = mult_addr - 4; *mult_addr = r0; } +pshlist: r0^"," PshType1 is PshType1 & thc0000=1 & r0 { mult_addr = mult_addr - 4; *mult_addr = r0; } + +# ldlist_inc is the list of registers to be loaded for pop instructions +thrlist15: r0 is thc0000=1 & r0 & thc0115=0 { r0 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist15: r0^"," is thc0000=1 & r0 { r0 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist15: is thc0000=0 { } +thrlist14: thrlist15 r1 is thc0101=1 & thrlist15 & r1 & thc0215=0 { r1 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist14: thrlist15 r1^"," is thc0101=1 & thrlist15 & r1 { r1 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist14: thrlist15 is thc0101=0 & thrlist15 { } +thrlist13: thrlist14 r2 is thc0202=1 & thrlist14 & r2 & thc0315=0 { r2 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist13: thrlist14 r2^"," is thc0202=1 & thrlist14 & r2 { r2 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist13: thrlist14 is thc0202=0 & thrlist14 { } +thrlist12: thrlist13 r3 is thc0303=1 & thrlist13 & r3 & thc0415=0 { r3 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist12: thrlist13 r3^"," is thc0303=1 & thrlist13 & r3 { r3 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist12: thrlist13 is thc0303=0 & thrlist13 { } +thrlist11: thrlist12 r4 is thc0404=1 & thrlist12 & r4 & thc0515=0 { r4 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist11: thrlist12 r4^"," is thc0404=1 & thrlist12 & r4 { r4 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist11: thrlist12 is thc0404=0 & thrlist12 { } +thrlist10: thrlist11 r5 is thc0505=1 & thrlist11 & r5 & thc0615=0 { r5 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist10: thrlist11 r5^"," is thc0505=1 & thrlist11 & r5 { r5 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist10: thrlist11 is thc0505=0 & thrlist11 { } +thrlist9: thrlist10 r6 is thc0606=1 & thrlist10 & r6 & thc0715=0 { r6 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist9: thrlist10 r6^"," is thc0606=1 & thrlist10 & r6 { r6 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist9: thrlist10 is thc0606=0 & thrlist10 { } +thrlist8: thrlist9 r7 is thc0707=1 & thrlist9 & r7 & thc0815=0 { r7 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist8: thrlist9 r7^"," is thc0707=1 & thrlist9 & r7 { r7 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist8: thrlist9 is thc0707=0 & thrlist9 { } +thrlist7: thrlist8 r8 is thc0808=1 & thrlist8 & r8 & thc0915=0 { r8 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist7: thrlist8 r8^"," is thc0808=1 & thrlist8 & r8 { r8 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist7: thrlist8 is thc0808=0 & thrlist8 { } +thrlist6: thrlist7 r9 is thc0909=1 & thrlist7 & r9 & thc1015=0 { r9 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist6: thrlist7 r9^"," is thc0909=1 & thrlist7 & r9 { r9 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist6: thrlist7 is thc0909=0 & thrlist7 { } +thrlist5: thrlist6 r10 is thc1010=1 & thrlist6 & r10 & thc1115=0 { r10 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist5: thrlist6 r10^"," is thc1010=1 & thrlist6 & r10 { r10 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist5: thrlist6 is thc1010=0 & thrlist6 { } +thrlist4: thrlist5 r11 is thc1111=1 & thrlist5 & r11 & thc1215=0 { r11 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist4: thrlist5 r11^"," is thc1111=1 & thrlist5 & r11 { r11 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist4: thrlist5 is thc1111=0 & thrlist5 { } +thrlist3: thrlist4 r12 is thc1212=1 & thrlist4 & r12 & thc1315=0 { r12 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist3: thrlist4 r12^"," is thc1212=1 & thrlist4 & r12 { r12 = * mult_addr; mult_addr = mult_addr + 4; } +thrlist3: thrlist4 is thc1212=0 & thrlist4 { } +thrlist2: thrlist3 sp is thc1313=1 & thrlist3 & sp & thc1415=0 { sp = * mult_addr; mult_addr = mult_addr + 4; } +thrlist2: thrlist3 sp^"," is thc1313=1 & thrlist3 & sp { sp = * mult_addr; mult_addr = mult_addr + 4; } +thrlist2: thrlist3 is thc1313=0 & thrlist3 { } +thrlist1: thrlist2 lr is thc1414=1 & thrlist2 & lr & thc1515=0 { lr = * mult_addr; mult_addr = mult_addr + 4; } +thrlist1: thrlist2 lr^"," is thc1414=1 & thrlist2 & lr { lr = * mult_addr; mult_addr = mult_addr + 4; } +thrlist1: thrlist2 is thc1414=0 & thrlist2 { } +thldrlist_inc: {thrlist1 pc } is thc1515=1 & thrlist1 & pc { pc = * mult_addr; mult_addr = mult_addr + 4; } +thldrlist_inc: {thrlist1 } is thc1515=0 & thrlist1 { } + +@if defined(VERSION_6T2) || defined(VERSION_7) + +# thstrlist_inc is the list of registers to be stored using IA or IB in Addressing Mode 4 +thsinc15: r0 is thc0000=1 & r0 { * mult_addr = r0; mult_addr = mult_addr + 4; } +thsinc15: is thc0000=0 { } +thsinc14: thsinc15 r1 is thc0101=1 & thsinc15 & r1 & thc0215=0 { * mult_addr = r1; mult_addr = mult_addr + 4; } +thsinc14: thsinc15 r1^"," is thc0101=1 & thsinc15 & r1 { * mult_addr = r1; mult_addr = mult_addr + 4; } +thsinc14: thsinc15 is thc0101=0 & thsinc15 { } +thsinc13: thsinc14 r2 is thc0202=1 & thsinc14 & r2 & thc0315=0 { * mult_addr = r2; mult_addr = mult_addr + 4; } +thsinc13: thsinc14 r2^"," is thc0202=1 & thsinc14 & r2 { * mult_addr = r2; mult_addr = mult_addr + 4; } +thsinc13: thsinc14 is thc0202=0 & thsinc14 { } +thsinc12: thsinc13 r3 is thc0303=1 & thsinc13 & r3 & thc0415=0 { * mult_addr = r3; mult_addr = mult_addr + 4; } +thsinc12: thsinc13 r3^"," is thc0303=1 & thsinc13 & r3 { * mult_addr = r3; mult_addr = mult_addr + 4; } +thsinc12: thsinc13 is thc0303=0 & thsinc13 { } +thsinc11: thsinc12 r4 is thc0404=1 & thsinc12 & r4 & thc0515=0 { * mult_addr = r4; mult_addr = mult_addr + 4; } +thsinc11: thsinc12 r4^"," is thc0404=1 & thsinc12 & r4 { * mult_addr = r4; mult_addr = mult_addr + 4; } +thsinc11: thsinc12 is thc0404=0 & thsinc12 { } +thsinc10: thsinc11 r5 is thc0505=1 & thsinc11 & r5 & thc0615=0 { * mult_addr = r5; mult_addr = mult_addr + 4; } +thsinc10: thsinc11 r5^"," is thc0505=1 & thsinc11 & r5 { * mult_addr = r5; mult_addr = mult_addr + 4; } +thsinc10: thsinc11 is thc0505=0 & thsinc11 { } +thsinc9: thsinc10 r6 is thc0606=1 & thsinc10 & r6 & thc0715=0 { * mult_addr = r6; mult_addr = mult_addr + 4; } +thsinc9: thsinc10 r6^"," is thc0606=1 & thsinc10 & r6 { * mult_addr = r6; mult_addr = mult_addr + 4; } +thsinc9: thsinc10 is thc0606=0 & thsinc10 { } +thsinc8: thsinc9 r7 is thc0707=1 & thsinc9 & r7 & thc0815=0 { * mult_addr = r7; mult_addr = mult_addr + 4; } +thsinc8: thsinc9 r7^"," is thc0707=1 & thsinc9 & r7 { * mult_addr = r7; mult_addr = mult_addr + 4; } +thsinc8: thsinc9 is thc0707=0 & thsinc9 { } +thsinc7: thsinc8 r8 is thc0808=1 & thsinc8 & r8 & thc0915=0 { * mult_addr = r8; mult_addr = mult_addr + 4; } +thsinc7: thsinc8 r8^"," is thc0808=1 & thsinc8 & r8 { * mult_addr = r8; mult_addr = mult_addr + 4; } +thsinc7: thsinc8 is thc0808=0 & thsinc8 { } +thsinc6: thsinc7 r9 is thc0909=1 & thsinc7 & r9 & thc1015=0 { * mult_addr = r9; mult_addr = mult_addr + 4; } +thsinc6: thsinc7 r9^"," is thc0909=1 & thsinc7 & r9 { * mult_addr = r9; mult_addr = mult_addr + 4; } +thsinc6: thsinc7 is thc0909=0 & thsinc7 { } +thsinc5: thsinc6 r10 is thc1010=1 & thsinc6 & r10 & thc1115=0 { * mult_addr = r10; mult_addr = mult_addr + 4; } +thsinc5: thsinc6 r10^"," is thc1010=1 & thsinc6 & r10 { * mult_addr = r10; mult_addr = mult_addr + 4; } +thsinc5: thsinc6 is thc1010=0 & thsinc6 { } +thsinc4: thsinc5 r11 is thc1111=1 & thsinc5 & r11 & thc1215=0 { * mult_addr = r11; mult_addr = mult_addr + 4; } +thsinc4: thsinc5 r11^"," is thc1111=1 & thsinc5 & r11 { * mult_addr = r11; mult_addr = mult_addr + 4; } +thsinc4: thsinc5 is thc1111=0 & thsinc5 { } +thsinc3: thsinc4 r12 is thc1212=1 & thsinc4 & r12 & thc1315=0 { * mult_addr = r12; mult_addr = mult_addr + 4; } +thsinc3: thsinc4 r12^"," is thc1212=1 & thsinc4 & r12 { * mult_addr = r12; mult_addr = mult_addr + 4; } +thsinc3: thsinc4 is thc1212=0 & thsinc4 { } +thsinc2: thsinc3 sp is thc1313=1 & thsinc3 & sp & thc1415=0 { * mult_addr = sp; mult_addr = mult_addr + 4; } +thsinc2: thsinc3 sp^"," is thc1313=1 & thsinc3 & sp { * mult_addr = sp; mult_addr = mult_addr + 4; } +thsinc2: thsinc3 is thc1313=0 & thsinc3 { } +thsinc1: thsinc2 lr is thc1414=1 & thsinc2 & lr & thc1515=0 { * mult_addr = lr; mult_addr = mult_addr + 4; } +thsinc1: thsinc2 lr^"," is thc1414=1 & thsinc2 & lr { * mult_addr = lr; mult_addr = mult_addr + 4; } +thsinc1: thsinc2 is thc1414=0 & thsinc2 { } +thstrlist_inc: {thsinc1 pc } is thc1515=1 & thsinc1 & pc { *:4 mult_addr = inst_start+4; mult_addr = mult_addr + 4; } +thstrlist_inc: {thsinc1 } is thc1515=0 & thsinc1 { } + +# thldrlist_dec is the list of registers to be loaded using DA or DB in Addressing Mode 4 +thrldec15: pc is thc1515=1 & pc { pc = * mult_addr; mult_addr = mult_addr - 4; } +thrldec15: is thc1515=0 { } +thrldec14: lr thrldec15 is thc1414=1 & thrldec15 & lr & thc1515=0 { lr = * mult_addr; mult_addr = mult_addr - 4; } +thrldec14: lr^"," thrldec15 is thc1414=1 & thrldec15 & lr { lr = * mult_addr; mult_addr = mult_addr - 4; } +thrldec14: thrldec15 is thc1414=0 & thrldec15 { } +thrldec13: sp thrldec14 is thc1313=1 & thrldec14 & sp & thc1415=0 { sp = * mult_addr; mult_addr = mult_addr - 4; } +thrldec13: sp^"," thrldec14 is thc1313=1 & thrldec14 & sp { sp = * mult_addr; mult_addr = mult_addr - 4; } +thrldec13: thrldec14 is thc1313=0 & thrldec14 { } +thrldec12: r12 thrldec13 is thc1212=1 & thrldec13 & r12 & thc1315=0 { r12 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec12: r12^"," thrldec13 is thc1212=1 & thrldec13 & r12 { r12 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec12: thrldec13 is thc1212=0 & thrldec13 { } +thrldec11: r11 thrldec12 is thc1111=1 & thrldec12 & r11 & thc1215=0 { r11 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec11: r11^"," thrldec12 is thc1111=1 & thrldec12 & r11 { r11 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec11: thrldec12 is thc1111=0 & thrldec12 { } +thrldec10: r10 thrldec11 is thc1010=1 & thrldec11 & r10 & thc1115=0 { r10 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec10: r10^"," thrldec11 is thc1010=1 & thrldec11 & r10 { r10 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec10: thrldec11 is thc1010=0 & thrldec11 { } +thrldec9: r9 thrldec10 is thc0909=1 & thrldec10 & r9 & thc1015=0 { r9 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec9: r9^"," thrldec10 is thc0909=1 & thrldec10 & r9 { r9 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec9: thrldec10 is thc0909=0 & thrldec10 { } +thrldec8: r8 thrldec9 is thc0808=1 & thrldec9 & r8 & thc0915=0 { r8 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec8: r8^"," thrldec9 is thc0808=1 & thrldec9 & r8 { r8 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec8: thrldec9 is thc0808=0 & thrldec9 { } +thrldec7: r7 thrldec8 is thc0707=1 & thrldec8 & r7 & thc0815=0 { r7 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec7: r7^"," thrldec8 is thc0707=1 & thrldec8 & r7 { r7 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec7: thrldec8 is thc0707=0 & thrldec8 { } +thrldec6: r6 thrldec7 is thc0606=1 & thrldec7 & r6 & thc0715=0 { r6 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec6: r6^"," thrldec7 is thc0606=1 & thrldec7 & r6 { r6 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec6: thrldec7 is thc0606=0 & thrldec7 { } +thrldec5: r5 thrldec6 is thc0505=1 & thrldec6 & r5 & thc0615=0 { r5 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec5: r5^"," thrldec6 is thc0505=1 & thrldec6 & r5 { r5 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec5: thrldec6 is thc0505=0 & thrldec6 { } +thrldec4: r4 thrldec5 is thc0404=1 & thrldec5 & r4 & thc0515=0 { r4 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec4: r4^"," thrldec5 is thc0404=1 & thrldec5 & r4 { r4 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec4: thrldec5 is thc0404=0 & thrldec5 { } +thrldec3: r3 thrldec4 is thc0303=1 & thrldec4 & r3 & thc0415=0 { r3 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec3: r3^"," thrldec4 is thc0303=1 & thrldec4 & r3 { r3 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec3: thrldec4 is thc0303=0 & thrldec4 { } +thrldec2: r2 thrldec3 is thc0202=1 & thrldec3 & r2 & thc0315=0 { r2 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec2: r2^"," thrldec3 is thc0202=1 & thrldec3 & r2 { r2 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec2: thrldec3 is thc0202=0 & thrldec3 { } +thrldec1: r1 thrldec2 is thc0101=1 & thrldec2 & r1 & thc0215=0 { r1 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec1: r1^"," thrldec2 is thc0101=1 & thrldec2 & r1 { r1 = * mult_addr; mult_addr = mult_addr - 4; } +thrldec1: thrldec2 is thc0101=0 & thrldec2 { } +thldrlist_dec: { r0 thrldec1 } is thc0000=1 & thrldec1 & r0 & thc0115=0 { r0 = * mult_addr; mult_addr = mult_addr - 4; } +thldrlist_dec: { r0^"," thrldec1 } is thc0000=1 & thrldec1 & r0 { r0 = * mult_addr; mult_addr = mult_addr - 4; } +thldrlist_dec: { thrldec1 } is thc0000=0 & thrldec1 { } + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + +# thstrlist_dec is the list of registers to be pushed +thsdec15: pc is thc1515=1 & pc { *:4 mult_addr = inst_start+4; mult_addr = mult_addr - 4; } +thsdec15: is thc1515=0 { } +thsdec14: lr thsdec15 is thc1414=1 & thsdec15 & lr & thc1515=0 { * mult_addr=lr; mult_addr = mult_addr - 4; } +thsdec14: lr^"," thsdec15 is thc1414=1 & thsdec15 & lr { * mult_addr=lr; mult_addr = mult_addr - 4; } +thsdec14: thsdec15 is thc1414=0 & thsdec15 { } +thsdec13: sp thsdec14 is thc1313=1 & thsdec14 & sp & thc1415=0 { * mult_addr=sp; mult_addr = mult_addr - 4; } +thsdec13: sp^"," thsdec14 is thc1313=1 & thsdec14 & sp { * mult_addr=sp; mult_addr = mult_addr - 4; } +thsdec13: thsdec14 is thc1313=0 & thsdec14 { } +thsdec12: r12 thsdec13 is thc1212=1 & thsdec13 & r12 & thc1315=0 { * mult_addr=r12; mult_addr = mult_addr - 4; } +thsdec12: r12^"," thsdec13 is thc1212=1 & thsdec13 & r12 { * mult_addr=r12; mult_addr = mult_addr - 4; } +thsdec12: thsdec13 is thc1212=0 & thsdec13 { } +thsdec11: r11 thsdec12 is thc1111=1 & thsdec12 & r11 & thc1215=0 { * mult_addr=r11; mult_addr = mult_addr - 4; } +thsdec11: r11^"," thsdec12 is thc1111=1 & thsdec12 & r11 { * mult_addr=r11; mult_addr = mult_addr - 4; } +thsdec11: thsdec12 is thc1111=0 & thsdec12 { } +thsdec10: r10 thsdec11 is thc1010=1 & thsdec11 & r10 & thc1115=0 { * mult_addr=r10; mult_addr = mult_addr - 4; } +thsdec10: r10^"," thsdec11 is thc1010=1 & thsdec11 & r10 { * mult_addr=r10; mult_addr = mult_addr - 4; } +thsdec10: thsdec11 is thc1010=0 & thsdec11 { } +thsdec9: r9 thsdec10 is thc0909=1 & thsdec10 & r9 & thc1015=0 { * mult_addr=r9; mult_addr = mult_addr - 4; } +thsdec9: r9^"," thsdec10 is thc0909=1 & thsdec10 & r9 { * mult_addr=r9; mult_addr = mult_addr - 4; } +thsdec9: thsdec10 is thc0909=0 & thsdec10 { } +thsdec8: r8 thsdec9 is thc0808=1 & thsdec9 & r8 & thc0915=0 { * mult_addr=r8; mult_addr = mult_addr - 4; } +thsdec8: r8^"," thsdec9 is thc0808=1 & thsdec9 & r8 { * mult_addr=r8; mult_addr = mult_addr - 4; } +thsdec8: thsdec9 is thc0808=0 & thsdec9 { } +thsdec7: r7 thsdec8 is thc0707=1 & thsdec8 & r7 & thc0815=0 { * mult_addr=r7; mult_addr = mult_addr - 4; } +thsdec7: r7^"," thsdec8 is thc0707=1 & thsdec8 & r7 { * mult_addr=r7; mult_addr = mult_addr - 4; } +thsdec7: thsdec8 is thc0707=0 & thsdec8 { } +thsdec6: r6 thsdec7 is thc0606=1 & thsdec7 & r6 & thc0715=0 { * mult_addr=r6; mult_addr = mult_addr - 4; } +thsdec6: r6^"," thsdec7 is thc0606=1 & thsdec7 & r6 { * mult_addr=r6; mult_addr = mult_addr - 4; } +thsdec6: thsdec7 is thc0606=0 & thsdec7 { } +thsdec5: r5 thsdec6 is thc0505=1 & thsdec6 & r5 & thc0615=0 { * mult_addr=r5; mult_addr = mult_addr - 4; } +thsdec5: r5^"," thsdec6 is thc0505=1 & thsdec6 & r5 { * mult_addr=r5; mult_addr = mult_addr - 4; } +thsdec5: thsdec6 is thc0505=0 & thsdec6 { } +thsdec4: r4 thsdec5 is thc0404=1 & thsdec5 & r4 & thc0515=0 { * mult_addr=r4; mult_addr = mult_addr - 4; } +thsdec4: r4^"," thsdec5 is thc0404=1 & thsdec5 & r4 { * mult_addr=r4; mult_addr = mult_addr - 4; } +thsdec4: thsdec5 is thc0404=0 & thsdec5 { } +thsdec3: r3 thsdec4 is thc0303=1 & thsdec4 & r3 & thc0415=0 { * mult_addr=r3; mult_addr = mult_addr - 4; } +thsdec3: r3^"," thsdec4 is thc0303=1 & thsdec4 & r3 { * mult_addr=r3; mult_addr = mult_addr - 4; } +thsdec3: thsdec4 is thc0303=0 & thsdec4 { } +thsdec2: r2 thsdec3 is thc0202=1 & thsdec3 & r2 & thc0415=0 { * mult_addr=r2; mult_addr = mult_addr - 4; } +thsdec2: r2^"," thsdec3 is thc0202=1 & thsdec3 & r2 { * mult_addr=r2; mult_addr = mult_addr - 4; } +thsdec2: thsdec3 is thc0202=0 & thsdec3 { } +thsdec1: r1 thsdec2 is thc0101=1 & thsdec2 & r1 & thc0215=0 { * mult_addr=r1; mult_addr = mult_addr - 4; } +thsdec1: r1^"," thsdec2 is thc0101=1 & thsdec2 & r1 { * mult_addr=r1; mult_addr = mult_addr - 4; } +thsdec1: thsdec2 is thc0101=0 & thsdec2 { } +thstrlist_dec: { r0 thsdec1 } is thc0000=1 & thsdec1 & r0 & thc0115=0 { * mult_addr=r0; mult_addr = mult_addr - 4; } +thstrlist_dec: { r0^"," thsdec1 } is thc0000=1 & thsdec1 & r0 { * mult_addr=r0; mult_addr = mult_addr - 4; } +thstrlist_dec: { thsdec1 } is thc0000=0 & thsdec1 { } + +ldbrace: {ldlist } is ldlist { } +stbrace: {strlist } is strlist { } +psbrace: { pshlist } is pshlist { } + +# Some extra subconstructors for the push and pop instructions +pclbrace:{ldlist^"," pc } is ldlist & pc { build ldlist; pc = *mult_addr; mult_addr = mult_addr + 4; } +pclbrace:{ pc } is thc0007=0 & pc { pc = *mult_addr; mult_addr = mult_addr + 4; } +pcpbrace:{ pshlist^"," lr } is pshlist & lr { mult_addr = mult_addr - 4; *mult_addr = lr; build pshlist; } +pcpbrace:{ lr } is thc0007=0 & lr { mult_addr = mult_addr - 4; *mult_addr = lr; } + +@if defined(VERSION_6T2) || defined(VERSION_7) +RnIndirect12: [Rn0003,"#"^offset12] is Rn0003; offset12 { local tmp = Rn0003 + offset12; export tmp; } + +RnIndirectPUW: [Rn0003],"#"^-immed8 is Rn0003; addr_puw=1 & immed8 { local tmp = Rn0003; Rn0003=Rn0003-immed8; export tmp; } +RnIndirectPUW: [Rn0003],"#"^immed8 is Rn0003; addr_puw=3 & immed8 { local tmp = Rn0003; Rn0003=Rn0003+immed8; export tmp; } +RnIndirectPUW: [Rn0003,"#"^-immed8] is Rn0003; addr_puw=4 & immed8 { local tmp = Rn0003 - immed8; export tmp; } +RnIndirectPUW: [Rn0003,"#"^-immed8]! is Rn0003; addr_puw=5 & immed8 { local tmp = Rn0003 - immed8; Rn0003=tmp; export tmp; } +RnIndirectPUW: [Rn0003,"#"^immed8]! is Rn0003; addr_puw=7 & immed8 { local tmp = Rn0003 + immed8; Rn0003=tmp; export tmp; } + +@define RN_INDIRECT_PUW "(op0; (addr_puw=4 | thc0808=1)) & RnIndirectPUW" # constraint for RnIndirectPUW + +RnIndirectPUW1: [Rn0003],"#"^-immval is Rn0003 & addr_puw1=0x3; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003; Rn0003=Rn0003-immval; export tmp; } +RnIndirectPUW1: [Rn0003],"#"^immval is Rn0003 & addr_puw1=0x7; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003; Rn0003=Rn0003+immval; export tmp; } +RnIndirectPUW1: [Rn0003,"#"^-immval] is Rn0003 & addr_puw1=0xa; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003 - immval; export tmp; } +RnIndirectPUW1: [Rn0003,"#"^-immval]! is Rn0003 & addr_puw1=0xb; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003 - immval; Rn0003=tmp; export tmp; } +RnIndirectPUW1: [Rn0003,"#"^immval] is Rn0003 & addr_puw1=0xe; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003 + immval; export tmp; } +RnIndirectPUW1: [Rn0003,"#"^immval]! is Rn0003 & addr_puw1=0xf; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003 + immval; Rn0003=tmp; export tmp; } + +@endif # VERSION_6T2 || VERSION_7 + +@define RN_INDIRECT_PUW1 "((thc0808=1 | thc0505=1); op0) & RnIndirectPUW1" # constraint for RnIndirectPUW1 + +RnIndirect4: [Rn0305,"#"^immval] is Rn0305 & immed5 [ immval = immed5 * 4; ] { local tmp = Rn0305 + immval; export tmp; } +RnIndirect2: [Rn0305,"#"^immval] is Rn0305 & immed5 [ immval = immed5 * 2; ] { local tmp = Rn0305 + immval; export tmp; } +RnIndirect1: [Rn0305,"#"^immed5] is Rn0305 & immed5 { local tmp = Rn0305 + immed5; export tmp; } + +RnRmIndirect: [Rn0305,Rm0608] is Rn0305 & Rm0608 { local tmp = Rn0305 + Rm0608; export tmp; } + +Pcrel8Indirect: [reloc] is immed8 + [ reloc = ((inst_start+4) $and 0xfffffffc) + 4*immed8; ] +{ + export *:4 reloc; +} + +Sprel8Indirect: [sp,"#"^immval] is sp & immed8 [ immval = immed8 * 4; ] { local tmp = sp + immval; export tmp; } + +@if defined(VERSION_6T2) || defined(VERSION_7) + +taddrmode5: [Rn0003,"#"^off8] is thP8=1 & thU7=1 & thW5=0 & Rn0003; immed8 [ off8=immed8*4; ] { local tmp = Rn0003 + off8; export tmp; } +taddrmode5: [Rn0003,"#"^noff8] is thP8=1 & thU7=0 & thW5=0 & Rn0003; immed8 [ noff8=-(immed8*4); ] { local tmp = Rn0003 + noff8; export tmp; } +taddrmode5: [Rn0003,"#"^off8]! is thP8=1 & thU7=1 & thW5=1 & Rn0003; immed8 [ off8=immed8*4; ] { Rn0003 = Rn0003 + off8; export Rn0003; } +taddrmode5: [Rn0003,"#"^noff8]! is thP8=1 & thU7=0 & thW5=1 & Rn0003; immed8 [ noff8=-(immed8*4); ] { Rn0003 = Rn0003 + noff8; export Rn0003; } +taddrmode5: [Rn0003],"#"^off8 is thP8=0 & thU7=1 & thW5=1 & Rn0003; immed8 [ off8=immed8*4; ] { local tmp = Rn0003; Rn0003 = Rn0003+off8; export tmp; } +taddrmode5: [Rn0003],"#"^noff8 is thP8=0 & thU7=0 & thW5=1 & Rn0003; immed8 [ noff8=-(immed8*4); ] { local tmp = Rn0003; Rn0003 = Rn0003 + noff8; export tmp; } +taddrmode5: [Rn0003],{immed8} is thP8=0 & thU7=1 & thW5=0 & Rn0003; immed8 { export Rn0003; } + +@endif # VERSION_6T2 || VERSION_7 + +# +# Modes for SRS instructions +# +thSRSMode: "usr" is thsrsMode=8 & thc0004 { export *[const]:1 thc0004; } +thSRSMode: "fiq" is thsrsMode=9 & thc0004 { export *[const]:1 thc0004; } +thSRSMode: "irq" is thsrsMode=10 & thc0004 { export *[const]:1 thc0004; } +thSRSMode: "svc" is thsrsMode=11 & thc0004 { export *[const]:1 thc0004; } +thSRSMode: "mon" is thsrsMode=14 & thc0004 { export *[const]:1 thc0004; } +thSRSMode: "abt" is thsrsMode=15 & thc0004 { export *[const]:1 thc0004; } +thSRSMode: "und" is thsrsMode=19 & thc0004 { export *[const]:1 thc0004; } +thSRSMode: "sys" is thsrsMode=23 & thc0004 { export *[const]:1 thc0004; } +thSRSMode: "#"^thsrsMode is thsrsMode { export *[const]:1 thsrsMode; } + +# +# Detect if the PC is loaded and do a GOTO +# +# TODO: this is how all detections of writes into the PC should be done. +# Instead of enumerating and splitting the case into PC loaded and non loaded, IE (add PC,#0x...) +# Should only have one base constructor and have a sub constructor and build to test if the PC was loaded and do the right thing. +# + +@if defined(VERSION_6T2) || defined(VERSION_7) + +RtGotoCheck: is Rt1215=15 { + LoadWritePC(pc); + goto [pc]; +} +RtGotoCheck: is Rt1215 {} + +@endif # VERSION_6T2 || VERSION_7 + +############################################################ + +# Base constructors + +# We have the following operand types: +# Type Corresponding syntax in ARM/THUMB manual +# +# Rd0002 with Rd occupying bits 0-2 +# Rd0810 with Rd occupying bits 8-10 +# Rm0305 with Rm occupying bits 3-5 +# Rm0608 with Rm occupying bits 6-8 +# Rn0002 with Rn occupying bits 0-2 +# Rn0305 with Rn occupying bits 3-5 +# Rn0810 with Rn occupying bits 8-10 +# Hrd0002 with H1 bit in diagram +# Hrn0002 with H1 bit in diagram +# Hrm0305 with H2 bit in diagram +# Rs0305 with Rs occupying bits 3-5 +# Immed3 # +# Immed5 # +# Immed8 # +# Pcrel8 PC,#*4 +# Sprel8 SP,#*4 +# Immed7_4 #*4 +# thcc +# Addr8 (for B) +# Addr11 (for B no condition) +# ThAddr22 (for double BL, BLX) +# immed8 (no "#" as in BKPT and SWI) +# Rn_exclaim ! +# ldbrace (load instructions) +# strbrace (store instructions) +# RnIndirect4 [,#*4] +# RnIndirect2 [,#*2] +# RnIndirect1 [,#] +# RnRmIndirect [,] +# Pcrel8Indirect [PC,#*4] +# Sprel8Indirect [SP,#*4] + +# + + +@if defined(VERSION_6T2) || defined(VERSION_7) +# Ensure that the recursive rule for ARMcond is applied for assembly +with : ARMcondCk=1 { +@endif + + +:adc^CheckInIT_CZNO^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x105 & Rm0305 & Rd0002 & CheckInIT_CZNO +{ + build ItCond; + th_add_with_carry_flags(Rd0002,Rm0305); + Rd0002 = Rd0002 + Rm0305 + zext(CY); + resflags(Rd0002); + build CheckInIT_CZNO; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:adc^thSBIT_CZNO^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=0xa & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + build ThumbExpandImm12; + th_add_with_carry_flags(Rn0003,ThumbExpandImm12); + Rd0811 = Rn0003 + ThumbExpandImm12 + zext(CY); + resflags(Rd0811); + build thSBIT_CZNO; +} + +:adc^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=0xa & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + build thshift2; + th_add_with_carry_flags(Rn0003,thshift2); + local tmp = thshift2+zext(CY); + Rd0811 = Rn0003+tmp; + resflags(Rd0811); + build thSBIT_CZNO; +} +@endif # VERSION_6T2 || VERSION_7 + +:add^CheckInIT_CZNO^ItCond Rd0002,Rn0305,Immed3 is TMode=1 & ItCond & op9=0x0e & Immed3 & Rn0305 & Rd0002 & CheckInIT_CZNO +{ + build ItCond; + th_addflags(Rn0305,Immed3); + Rd0002 = Rn0305 + Immed3; + resflags(Rd0002); + build CheckInIT_CZNO; +} + +:add^CheckInIT_CZNO^ItCond Rd0810,Immed8 is TMode=1 & ItCond & op11=0x06 & Rd0810 & Immed8 & CheckInIT_CZNO +{ + build ItCond; + th_addflags(Rd0810,Immed8); + Rd0810 = Rd0810 + Immed8; + resflags(Rd0810); + build CheckInIT_CZNO; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:add^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=8 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + build ThumbExpandImm12; + th_addflags(Rn0003,ThumbExpandImm12); + Rd0811 = Rn0003+ThumbExpandImm12; + resflags(Rd0811); + build thSBIT_CZNO; +} + +:addw^ItCond Rd0811,Rn0003,Immed12 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=0 & thc0404=0 & Rn0003; thc1515=0 & Rd0811) & Immed12 +{ + build ItCond; + th_addflags(Rn0003,Immed12); + Rd0811 = Rn0003+Immed12; + resflags(Rd0811); +} + +:add^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=8 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + build thshift2; + local tmp = thshift2; + th_addflags(Rn0003,tmp); + Rd0811 = Rn0003+tmp; + resflags(Rd0811); + build thSBIT_CZNO; +} + +:add^thSBIT_CZNO^ItCond^".w" Rd0811,sp,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=8 & thSBIT_CZNO & sp & sop0003=0xd; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + build ThumbExpandImm12; + th_addflags(sp,ThumbExpandImm12); + Rd0811 = sp+ThumbExpandImm12; + resflags(Rd0811); + build thSBIT_CZNO; +} + +:addw^ItCond Rd0811,sp,Immed12 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=0 & thc0404=0 & sop0003=0xd & sp; thc1515=0 & Rd0811) & Immed12 +{ + build ItCond; + th_addflags(sp,Immed12); + Rd0811 = sp+Immed12; + resflags(Rd0811); +} + +:add^thSBIT_CZNO^ItCond^".w" Rd0811,sp,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=8 & thSBIT_CZNO & sop0003=0xd & sp; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + build thshift2; + local tmp = thshift2; + th_addflags(sp,tmp); + Rd0811 = sp+tmp; + resflags(Rd0811); + build thSBIT_CZNO; +} + +@endif # VERSION_6T2 || VERSION_7 + +:add^CheckInIT_CZNO^ItCond Rd0002,Rn0305,Rm0608 is TMode=1 & ItCond & op9=0x0c & Rm0608 & Rn0305 & Rd0002 & CheckInIT_CZNO +{ + build ItCond; + th_addflags(Rn0305,Rm0608); + Rd0002 = Rn0305 + Rm0608; + resflags(Rd0002); + build CheckInIT_CZNO; +} + +:add^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x44 & Hrd0002 & Hrm0305 +{ + build ItCond; + Hrd0002 = Hrd0002 + Hrm0305; +} + +:add^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x44 & Hrd0002 & Hrm0305 & hrd0002=7 & h1=1 +{ + build ItCond; + dest:4 = Hrd0002 + Hrm0305; + BranchWritePC(dest); + goto [pc]; +} + +:add^ItCond Rd0810,Sprel8 is TMode=1 & ItCond & op11=0x15 & Rd0810 & Sprel8 +{ + build ItCond; + Rd0810 = Sprel8; +} + +:add^ItCond sp,Immed7_4 is TMode=1 & ItCond & op7=0x160 & sp & Immed7_4 +{ + build ItCond; + sp = sp + Immed7_4; +} + +:adr^ItCond Rd0810,Pcrel8 is TMode=1 & ItCond & op11=0x14 & Rd0810 & Pcrel8 +{ + build ItCond; + Rd0810 = &Pcrel8; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:adr^ItCond^".w" Rd0811,NegPcrelImmed12Addr is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=5 & thc0404=0 & sop0003=0xf; thc1515=0 & Rd0811) & NegPcrelImmed12Addr +{ + build ItCond; + Rd0811 = &NegPcrelImmed12Addr; +} + +:adr^ItCond^".w" Rd0811,PcrelImmed12Addr is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=0 & thc0404=0 & sop0003=0xf; thc1515=0 & Rd0811) & PcrelImmed12Addr +{ + build ItCond; + Rd0811 = &PcrelImmed12Addr; +} + +:adr^ItCond^".w" Rd0811,NegPcrelImmed12Addr is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=5 & thc0404=0 & sop0003=0xf; thc1515=0 & Rd0811 & thc0811=15) & NegPcrelImmed12Addr +{ + build ItCond; + pc = &NegPcrelImmed12Addr; + goto NegPcrelImmed12Addr; +} + +:adr^ItCond^".w" Rd0811,PcrelImmed12Addr is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=0 & thc0404=0 & sop0003=0xf; thc1515=0 & Rd0811 & thc0811=15) & PcrelImmed12Addr +{ + build ItCond; + pc = &PcrelImmed12Addr; + goto PcrelImmed12Addr; +} + +@endif # VERSION_6T2 || VERSION_7 + +:and^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x100 & Rd0002 & Rm0305 & CheckInIT_ZN +{ + build ItCond; + Rd0002 = Rd0002 & Rm0305; + resflags(Rd0002); + build CheckInIT_ZN; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:and^thSBIT_ZN^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=0 & thSBIT_ZN & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + build ThumbExpandImm12; + Rd0811 = Rn0003 & ThumbExpandImm12; + resflags(Rd0811); + build thSBIT_ZN; +} + +:and^thSBIT_ZN^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=0 & thSBIT_ZN & Rn0003; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + build thshift2; + Rd0811 = Rn0003 & thshift2; + resflags(Rd0811); + build thSBIT_ZN; +} +@endif # VERSION_6T2 || VERSION_7 + +macro th_set_carry_for_asr(op1,shift_count) { + local bit = (op1 s>> (shift_count-1)) & 1; + tmpCY = ((shift_count == 0) && CY) || ((shift_count != 0) && (bit != 0)); +} + +#note that this is a special case where immed5 = 0, which corresponds to a shift amount of 32 +:asr^CheckInIT_CZN^ItCond Rd0002,Rm0305,"#0x20" is TMode=1 & ItCond & op11=0x02 & Immed5 & Rm0305 & Rd0002 & immed5=0 & CheckInIT_CZN +{ + build ItCond; + th_set_carry_for_asr(Rm0305,32:1); + Rd0002 = Rm0305 s>> 32; + resflags(Rd0002); + build CheckInIT_CZN; +} + +:asr^CheckInIT_CZN^ItCond Rd0002,Rm0305,Immed5 is TMode=1 & ItCond & op11=0x02 & Immed5 & Rm0305 & Rd0002 & CheckInIT_CZN +{ + build ItCond; + th_set_carry_for_asr(Rm0305,Immed5); + Rd0002 = Rm0305 s>> Immed5; + resflags(Rd0002); + build CheckInIT_CZN; +} + +:asr^CheckInIT_CZN^ItCond Rd0002,Rs0305 is TMode=1 & ItCond & op6=0x104 & Rd0002 & Rs0305 & CheckInIT_CZN +{ + build ItCond; + local shift_amount = Rs0305 & 0xff; + th_set_carry_for_asr(Rd0002,shift_amount); + Rd0002 = Rd0002 s>> (shift_amount); + resflags(Rd0002); + build CheckInIT_CZN; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:asr^thSBIT_CZN^ItCond^".w" Rd0811,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZN & sop0003=0xf; thc1515=0 & Rd0811 & thc0405=2 & thshift2 +{ + build ItCond; + build thshift2; + Rd0811 = thshift2; + tmpCY = shift_carry; + resflags(Rd0811); + build thSBIT_CZN; +} + +:asr^thSBIT_CZN^ItCond^".w" Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op11=0x1f & thc0910=1 & sop0508=2 & thSBIT_CZN & Rn0003; op12=0xf & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + local shift_amount = Rm0003 & 0xff; + th_set_carry_for_asr(Rn0003,shift_amount); + Rd0811 = Rn0003 s>> (shift_amount); + resflags(Rd0811); + build thSBIT_CZN; +} +@endif # VERSION_6T2 || VERSION_7 + +# this constructor is identical to 16-bit udf instruction. it looks +# like it implented an unconditional branch instruction (giving it a +# made up name), but the thumb 16-bit instruction does not support +# unconditional branching. + +@ifdef NOT_AN_INSTRUCTION +:bal Addr8 is TMode=1 & op12=0xd & thcond=14 & Addr8 +{ + goto Addr8; +} +@endif + +:b^thcc Addr8 is TMode=1 & ItCond & op12=0b1101 & $(THCC) & Addr8 +{ + if (thcc) goto Addr8; +} + +:b^ItCond Addr11 is TMode=1 & ItCond & op11=0b11100 & Addr11 +{ + goto Addr11; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:b^part2thcc^".w" ThAddr20 is TMode=1 & (part2op=0x1e & $(PART2THCC); part2c1415=2 & part2c1212=0) & ThAddr20 +{ + if (part2thcc) goto ThAddr20; +} + +:b^ItCond^".w" ThAddr24 is TMode=1 & ItCond & (op11=0x1e; part2c1415=2 & part2c1212=1) & ThAddr24 +{ + build ItCond; + goto ThAddr24; +} +@endif # VERSION_6T2 || VERSION_7 + + +@if defined(VERSION_6T2) || defined(VERSION_7) +:bfc^ItCond Rd0811,thLsbImm,thBitWidth is TMode=1 & ItCond & op0=0xf36f; thc1515=0 & Rd0811 & thc0505=0 & thLsbImm & thMsbImm & thBitWidth +{ + build ItCond; + clearMask:4 = (-1 << (thMsbImm + 1)) | (-1 >> (32 - thLsbImm)); + Rd0811 = Rd0811 & clearMask; +} + +:bfi^ItCond Rd0811,Rn0003,thLsbImm,thBitWidth is TMode=1 & ItCond & op4=0xf36 & Rn0003; thc1515=0 & Rd0811 & thc0505=0 & thLsbImm & thMsbImm & thBitWidth +{ + build ItCond; + clearMask:4 = (-1 << (thMsbImm + 1)) | (-1 >> (32 - thLsbImm)); + bits:4 = (Rn0003 << thLsbImm) & ~clearMask; + Rd0811 = (Rd0811 & clearMask) | bits; +} + +@endif # VERSION_6T2 || VERSION_7 + + +:bic^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x10e & Rd0002 & Rm0305 & CheckInIT_ZN +{ + build ItCond; + Rd0002 = Rd0002 & (~Rm0305); + resflags(Rd0002); + build CheckInIT_ZN; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:bic^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=1 & thSBIT_ZN & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + build ThumbExpandImm12; + Rd0811 = Rn0003&(~ThumbExpandImm12); + resflags(Rd0811); + build thSBIT_ZN; +} + +:bic^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=1 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + build thshift2; + Rd0811 = Rn0003&(~thshift2); + th_logicflags(); + resflags(Rd0811); + build thSBIT_CZNO; +} +@endif # VERSION_6T2 || VERSION_7 + + +@if defined(VERSION_5) + +# Exception Generation and UDF + +:hlt immed6 is TMode=1 & op6=0b1011101010 & immed6 +{ + software_hlt(immed6:4); +} + +:bkpt immed8 is TMode=1 & ItCond & op8=0xbe & immed8 +{ + software_bkpt(immed8:4); + # Not a mistake, breakpoint always unconditional even in IT Block + build ItCond; +} + +:hvc "#"^tmp is TMode=1 & op4=0xf7e & thc0003; op12=0x8 & thc0011 [tmp = thc0003 << 12 | thc0011;] +{ + software_hvc(tmp:4); +} + +# Requires Security Extensions +:smc^ItCond "#"^thc0003 is TMode=1 & ItCond & op4=0xf7f & thc0003; op12=0x8 +{ + build ItCond; + software_smc(thc0003:1); +} + +@ifndef NOT_AN_INSTRUCTION +:udf^ItCond "#"thc0007 is TMode=1 & ItCond & op8 = 0xde & thc0007 +{ + build ItCond; + local excaddr:4 = inst_start; + local target:4 = software_udf(thc0007:4, excaddr); + goto [target]; +} +@endif + +:udf^ItCond "#"tmp is TMode=1 & ItCond & op4=0xf7f & thc0003; op12=0xa & thc0011 [tmp = thc0003 << 12 | thc0011;] +{ + build ItCond; + local excaddr:4 = inst_start; + local target:4 = software_udf(tmp:4, excaddr); + goto [target]; +} + +@endif # VERSION_5 + +:bl^ItCond ThAddr24 is TMode=1 & ItCond & (op11=0x1e; part2c1415=3 & part2c1212=1) & ThAddr24 +{ + build ItCond; + lr = inst_next|1; + SetThumbMode(1); + call ThAddr24; +} + +@ifndef VERSION_6T2 + +:bl^ItCond "#"^off is TMode=1 & ItCond & op11=0x1e & soffset11 [ off = inst_start + 4 + (soffset11 << 12); ] +{ + build ItCond; + lr = off:4; +} + +:bl^ItCond "#"^off is TMode=1 & ItCond & op11=0x1f & offset11 [ off = offset11 << 1; ] +{ + build ItCond; + local dest = lr + off:4; + lr = inst_next|1; + SetThumbMode(1); + goto [dest]; +} + +:blx^ItCond "#"^off is TMode=1 & ItCond & op11=0x1d & offset11 & thc0000=0 [ off = offset11 << 1; ] +{ + build ItCond; + local dest = (lr & (~0x3)) + off:4; + lr = inst_next|1; + SetThumbMode(0); + call [dest]; +} +@endif + +:bl^ItCond ThAddr24 is TMode=1 & CALLoverride=1 & ItCond & (op11=0x1e; part2c1415=3 & part2c1212=1) & ThAddr24 +{ + build ItCond; + lr = inst_next|1; + SetThumbMode(1); + goto ThAddr24; +} + +bxns: "" is thc0003 { } +bxns: "ns" is thc0002=0b100 { } + +@if defined(VERSION_5) + +:blx^ItCond ThArmAddr23 is TMode=1 & ItCond & (op11=0x1e;part2op=0x1d) & ThArmAddr23 [ TMode=0; globalset(ThArmAddr23,TMode); TMode=1; ] +{ + build ItCond; + lr = inst_next|1; + SetThumbMode(0); + call ThArmAddr23; + # Don't set this, assume return will set for emulation. Was screwing up decompiler. TB = 1; +} + +:blx^ItCond ThArmAddr23 is TMode=1 & ItCond & CALLoverride=1 & (op11=0x1e;part2op=0x1d) & ThArmAddr23 [ TMode=0; globalset(ThArmAddr23,TMode); TMode=1; ] +{ + build ItCond; + lr = inst_next|1; + SetThumbMode(0); + goto ThArmAddr23; +} + +:blx^ItCond ThArmAddr23 is TMode=1 & ItCond & (op11=0x1e; part2c1415=3 & part2c1212=0) & ThArmAddr23 [ TMode=0; globalset(ThArmAddr23,TMode); TMode=1; ] +{ + build ItCond; + lr = inst_next|1; + SetThumbMode(0); + call ThArmAddr23; +} + +:blx^bxns^ItCond Hrm0305 is TMode=1 & ItCond & op7=0x08f & Hrm0305 & bxns +{ + build ItCond; + BXWritePC(Hrm0305); + lr = inst_next|1; + call [pc]; + # Don't set this, assume return will set for emulation. Was screwing up decompiler. TB = 1; +} + +@endif # VERSION_5 + +:bx^bxns^ItCond Hrm0305 is TMode=1 & ItCond & op7=0x08e & Hrm0305 & hrm0305=6 & h2=1 & bxns +{ + build ItCond; + BXWritePC(Hrm0305); + return [pc]; +} + +:bx^bxns^ItCond Hrm0305 is TMode=1 & ItCond & op7=0x08e & Hrm0305 & bxns +{ + build ItCond; + BXWritePC(Hrm0305); + goto [pc]; +} + +:bx^bxns^ItCond Hrm0305 is TMode=1 & ItCond & LRset=1 & op7=0x08e & Hrm0305 & bxns [ LRset=0; TMode=1; globalset(inst_next,LRset); globalset(inst_next,TMode); ] +{ + build ItCond; + BXWritePC(Hrm0305); + call [pc]; + # Don't set this, assume return will set for emulation. Was screwing up decompiler. TB = 1; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:bxj^ItCond Rn0003 is TMode=1 & ItCond & op4=0xf3c & Rn0003; op0=0x8f00 +{ + build ItCond; + success:1 = jazelle_branch(); + if (success) goto ; + SetThumbMode( (Rn0003&0x00000001)!=0 ); + local tmp=Rn0003&0xfffffffe; + goto [tmp]; + +} # Optional change to THUMB + +@endif # VERSION_6T2 || VERSION_7 + +:cbnz^ItCond Rn0002,Addr5 is TMode=1 & ItCond & op12=0xb & thc1111=1 & thc1010=0 & thc0808=1 & Rn0002 & Addr5 +{ + build ItCond; + local tmp = Rn0002 != 0; + if (tmp) goto Addr5; +} + +:cbz^ItCond Rn0002,Addr5 is TMode=1 & ItCond & op12=0xb & thc1111=0 & thc1010=0 & thc0808=1 & Rn0002 & Addr5 +{ + build ItCond; + local tmp = Rn0002 == 0; + if (tmp) goto Addr5; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:cdp^ItCond thcpn,thopcode1,thCRd,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xee & thopcode1 & thCRn; thCRd & thcpn & thopcode2 & thc0404=0 & thCRm +{ + build ItCond; + t_cpn:4 = thcpn; + t_op1:4 = thopcode1; + t_op2:4 = thopcode2; + coprocessor_function(t_cpn,t_op1,t_op2,thCRd,thCRn,thCRm); +} + +:cdp2^ItCond thcpn,thopcode1,thCRd,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xfe & thopcode1 & thCRn; thCRd & thcpn & thopcode2 & thc0404=0 & thCRm +{ + build ItCond; + t_cpn:4 = thcpn; + t_op1:4 = thopcode1; + t_op2:4 = thopcode2; + coprocessor_function2(t_cpn,t_op1,t_op2,thCRd,thCRn,thCRm); +} + +define pcodeop IndexCheck; + +:chka^ItCond Hrn0002,Rm0306 is TMode=1 & ItCond & TEEMode=1 & op8=0xca & Rm0306 & Hrn0002 +{ + build ItCond; + local tmp = Hrn0002 <= Rm0306; + if (!tmp) goto inst_next; + lr = inst_next|1; + IndexCheck(); +} + +:clrex^ItCond is TMode=1 & ItCond & op0=0xf3bf; op0=0x8f2f +{ + build ItCond; + ClearExclusiveLocal(); +} + +:clz^ItCond Rd0811,Rm0003 is TMode=1 & ItCond & op4=0xfab & Rm0003; op12=15 & Rd0811 +{ + build ItCond; + Rd0811 = count_leading_zeroes(Rm0003); +} + +:cmn^ItCond Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=8 & thc0404=1 & Rn0003; thc1515=0 & thc0811=15) & ThumbExpandImm12 +{ + build ItCond; + th_addflags(Rn0003,ThumbExpandImm12); + local tmp = Rn0003 + ThumbExpandImm12; + resflags(tmp); + th_affectflags(); +} + +:cmn^ItCond^".w" Rn0003,thshift2 is TMode=1 & ItCond & op4=0xeb1 & Rn0003; thc1515=0 & thc0811=15 & thshift2 +{ + build ItCond; + build thshift2; + th_addflags(Rn0003,thshift2); + local tmp = Rn0003+thshift2; + resflags(tmp); + th_affectflags(); +} + +@endif # VERSION_6T2 || VERSION_7 + +:cmn^ItCond Rn0002,Rm0305 is TMode=1 & ItCond & op6=0x10b & Rm0305 & Rn0002 +{ + build ItCond; + th_addflags(Rn0002,Rm0305); + local tmp = Rn0002 + Rm0305; + resflags(tmp); + th_affectflags(); +} + +:cmp^ItCond Rn0810,Immed8 is TMode=1 & ItCond & op11=5 & Rn0810 & Immed8 +{ + build ItCond; + th_subflags(Rn0810,Immed8); + local tmp = Rn0810 - Immed8; + resflags(tmp); + th_affectflags(); +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:cmp^ItCond^".w" Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & thc0404=1 & sop0508=13 & Rn0003; thc1515=0 & thc0811=15) & ThumbExpandImm12 +{ + build ItCond; + th_subflags(Rn0003,ThumbExpandImm12); + local tmp = Rn0003 - ThumbExpandImm12; + resflags(tmp); + th_affectflags(); +} + +:cmp^ItCond^".w" Rn0003,thshift2 is TMode=1 & ItCond & op4=0xebb & Rn0003; thc1515=0 & thc0811=15 & thshift2 +{ + build ItCond; + th_subflags(Rn0003,thshift2); + local tmp = Rn0003 - thshift2; + resflags(tmp); + th_affectflags(); +} + +@endif # VERSION_6T2 || VERSION_7 + +:cmp^ItCond Rn0002,Rm0305 is TMode=1 & ItCond & op6=0x10a & Rm0305 & Rn0002 +{ + build ItCond; + th_subflags(Rn0002,Rm0305); + local tmp = Rn0002 - Rm0305; + resflags(tmp); + th_affectflags(); +} + +:cmp^ItCond Hrn0002,Hrm0305 is TMode=1 & ItCond & op8=0x45 & Hrm0305 & Hrn0002 +{ + build ItCond; + th_subflags(Hrn0002,Hrm0305); + local tmp = Hrn0002 - Hrm0305; + resflags(tmp); + th_affectflags(); +} + +@if defined(VERSION_6) + +aflag: "a" is thc0202=1 & thc0404=0 { enableDataAbortInterrupts(); } +aflag: "a" is thc0202=1 { disableDataAbortInterrupts(); } +aflag: is thc0202=0 { } + +iflag: "i" is thc0101=1 & thc0404=0 { enableIRQinterrupts(); } # 7M: set primask +iflag: "i" is thc0101=1 { disableIRQinterrupts(); } # 7M: clear primask +iflag: is thc0101=0 { } + +fflag: "f" is thc0000=1 & thc0404=0 { enableFIQinterrupts(); } # 7M: set faultmask +fflag: "f" is thc0000=1 { disableFIQinterrupts(); } # 7M: clear faultmask +fflag: is thc0000=0 { } + +iflags: aflag^iflag^fflag is aflag & iflag & fflag { } + +:cpsie^ItCond iflags is TMode=1 & ItCond & op8=0xb6 & sop0507=3 & thc0303=0 & iflags & thc0404=0 +{ + build ItCond; + build iflags; + # see iflags for semantics +} + +:cpsid^ItCond iflags is TMode=1 & ItCond & op8=0xb6 & sop0507=3 & thc0303=0 & iflags & thc0404=1 +{ + build ItCond; + build iflags; + # see iflags for semantics +} + +@endif # VERSION_6 + +@if defined(VERSION_6T2) || defined(VERSION_7) + +# For SCR 11074, implement the "Encoding T2" 32-bit Thumb-2 cps change processor state instruction +# Note the manual says there are no conditions on this insn +# +th2_aflag: "a" is thc0707=1 & thc0910=0x2 { enableDataAbortInterrupts(); } +th2_aflag: "a" is thc0707=1 { disableDataAbortInterrupts(); } +th2_aflag: is thc0707=0 { } + +th2_iflag: "i" is thc0606=1 & thc0910=0x2 { enableIRQinterrupts(); } # 7M: set primask +th2_iflag: "i" is thc0606=1 { disableIRQinterrupts(); } # 7M: clear primask +th2_iflag: is thc0606=0 { } + +th2_fflag: "f" is thc0505=1 & thc0910=0 { enableFIQinterrupts(); } # 7M: set faultmask +th2_fflag: "f" is thc0505=1 { disableFIQinterrupts(); } # 7M: clear faultmask +th2_fflag: is thc0505=0 { } + +th2_iflags: th2_aflag^th2_iflag^th2_fflag is th2_aflag & th2_iflag & th2_fflag { } + +th2_SetMode: "#"^16 is thc0004=0x10 { setUserMode(); } +th2_SetMode: "#"^17 is thc0004=0x11 { setFIQMode(); } +th2_SetMode: "#"^18 is thc0004=0x12 { setIRQMode(); } +th2_SetMode: "#"^19 is thc0004=0x13 { setSupervisorMode(); } +th2_SetMode: "#"^22 is thc0004=0x16 { setMonitorMode(); } +th2_SetMode: "#"^23 is thc0004=0x17 { setAbortMode(); } +th2_SetMode: "#"^27 is thc0004=0x1b { setUndefinedMode(); } +th2_SetMode: "#"^31 is thc0004=0x1f { setSystemMode(); } + +# 11110 0 1110 1 0 1111 10 0 0 0 +:cpsie th2_iflags, th2_SetMode is + TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2imm6=0x2f ; + thc0910=0x2 & th2_SetMode & op11=0x10 & th2_iflags +{ + build th2_iflags; +} + +:cpsid th2_iflags, th2_SetMode is + TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2imm6=0x2f ; + thc0910=0x3 & th2_SetMode & op11=0x10 & th2_iflags +{ + build th2_iflags; +} + +:cps th2_SetMode is + TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2imm6=0x2f ; + thc0808=0x1 & th2_SetMode & op11=0x10 +{ +} + +@endif # (VERSION_6T2) || defined(VERSION_7) + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:dbg^ItCond "#"^thc0004 is TMode=1 & ItCond & op0=0xf3af; op4=0x80f & thc0004 +{ +@if defined(VERSION_7) + HintDebug(thc0004:1); +@endif # VERSION_7 +} + +@if defined(VERSION_7) +:dmb^ItCond "#"^thc0004 is TMode=1 & ItCond & op0=0xf3bf; op4=0x8f5 & thc0004 +{ + DataMemoryBarrier(thc0004:1); +} + +:dsb^ItCond "#"^thc0004 is TMode=1 & ItCond & op0=0xf3bf; op4=0x8f4 & thc0004 +{ + DataSynchronizationBarrier(thc0004:1); +} +@endif + +:eor^thSBIT_CZNO^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=4 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + Rd0811 = Rn0003 ^ ThumbExpandImm12; + th_logicflags(); + resflags(Rd0811); + build thSBIT_CZNO; +} + +:eor^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=4 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + Rd0811 = Rn0003 ^ thshift2; + th_logicflags(); + resflags(Rd0811); + build thSBIT_CZNO; +} + + +:enterx^ItCond is TMode=1 & ItCond & op0=0xf3bf; op0=0x8f1f [ TEEMode=1; globalset(inst_next,TEEMode); ] +{ + build ItCond; +} + +:leavex^ItCond is TMode=1 & ItCond & op0=0xf3bf; op0=0x8f0f [ TEEMode=0; globalset(inst_next,TEEMode); ] +{ + build ItCond; +} + +@endif # VERSION_6T2 || VERSION_7 + + +:eor^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x101 & Rm0305 & Rd0002 & CheckInIT_ZN +{ + build ItCond; + Rd0002 = Rd0002 ^ Rm0305; + resflags(Rd0002); + build CheckInIT_ZN; +} + +@if defined(VERSION_7) + +:hb^ItCond "#"^immed8 is TMode=1 & ItCond & TEEMode=1 & op9=0x61 & immed8 +{ + build ItCond; +} + + +:isb^ItCond "#"^thc0004 is TMode=1 & ItCond & op0=0xf3bf; op4=0x8f6 & thc0004 +{ + InstructionSynchronizationBarrier(thc0004:1); +} + +@endif # VERSION_7 + +@if defined(VERSION_8) + +# F5.1.178 p2969 SEVL T1 variant +:sevl + is TMode=1 & op0=0b1011111101010000 + & ItCond + { + build ItCond; + SendEvent(); + } + +@endif # VERSION_8 + +@if defined(VERSION_6T2) || defined(VERSION_7) +X: "t" is TMode=1 & ((thc0404=1 & thc0303=1) | (thc0404=0 & thc0303=0)) & (thc0202=1 | thc0101=1 | thc0000=1) { } +X: "e" is TMode=1 & ((thc0404=1 & thc0303=0) | (thc0404=0 & thc0303=1)) & (thc0202=1 | thc0101=1 | thc0000=1) { } +X: "" is TMode=1 & thc0404 & thc0303 & (thc0202=0 & thc0101=0 & thc0000=0) { } +Y: "t" is TMode=1 & ((thc0404=1 & thc0202=1) | (thc0404=0 & thc0202=0)) & (thc0101=1 | thc0000=1) { } +Y: "e" is TMode=1 & ((thc0404=1 & thc0202=0) | (thc0404=0 & thc0202=1)) & (thc0101=1 | thc0000=1) { } +Y: "" is TMode=1 & thc0404 & thc0202 & (thc0101=0 & thc0000=0) { } +Z: "t" is TMode=1 & ((thc0404=1 & thc0101=1) | (thc0404=0 & thc0101=0)) & (thc0000=1) { } +Z: "e" is TMode=1 & ((thc0404=1 & thc0101=0) | (thc0404=0 & thc0101=1)) & (thc0000=1) { } +Z: "" is TMode=1 & thc0404 & thc0101 & (thc0000=0) { } + +XYZ: is TMode=1 & sop0003=8 { } +XYZ: X^Y^Z is TMode=1 & X & Y & Z { } + + +:it^XYZ it_thfcc is TMode=1 & op8=0xbf & XYZ & $(IT_THFCC) & thc0507 & thc0004 + [ itmode=0; cond_base = thc0507; cond_shft=thc0004; globalset(inst_next,condit); ] +{ + # just sets up the condition and If Then/Else mask +} + +:ldc^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x76 & thN6=0 & thL4=1; thCRd & thcpn) & taddrmode5 +{ + build ItCond; + build taddrmode5; + t_cpn:4 = thcpn; + coprocessor_load(t_cpn,thCRd,taddrmode5); +} + +:ldcl^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x76 & thN6=1 & thL4=1; thCRd & thcpn) & taddrmode5 +{ + build ItCond; + build taddrmode5; + t_cpn:4 = thcpn; + coprocessor_loadlong(t_cpn,thCRd,taddrmode5); +} + +:ldc2^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x7e & thN6=0 & thL4=1; thCRd & thcpn) & taddrmode5 +{ + build ItCond; + build taddrmode5; + t_cpn:4 = thcpn; + coprocessor_load(t_cpn,thCRd,taddrmode5); +} + +:ldc2l^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x7e & thN6=1 & thL4=1; thCRd & thcpn) & taddrmode5 +{ + build ItCond; + build taddrmode5; + t_cpn:4 = thcpn; + coprocessor_loadlong(t_cpn,thCRd,taddrmode5); +} + +@endif # VERSION_6T2 || VERSION_7 + +:ldmia^ItCond Rn_exclaim,ldbrace is TMode=1 & ItCond & op11=0x19 & Rn_exclaim & ldbrace & Rn_exclaim_WB +{ + build ItCond; + build Rn_exclaim; + build ldbrace; + build Rn_exclaim_WB; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:ldm^ItCond^".w" Rn0003,thldrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=0 & thc0404=1 & Rn0003; thc1313=0 & thldrlist_inc +{ + build ItCond; + mult_addr = Rn0003; + build thldrlist_inc; +} + +:ldm^ItCond^".w" Rn0003!,thldrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=1 & thc0404=1 & Rn0003; thc1313=0 & thldrlist_inc +{ + build ItCond; + mult_addr = Rn0003; + build thldrlist_inc; + Rn0003 = mult_addr; +} + +:ldm^ItCond Rn0003,thldrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=0 & thc0404=1 & Rn0003; thc1515=1 & thc1313=0 & thldrlist_inc +{ + build ItCond; + mult_addr = Rn0003; + build thldrlist_inc; + LoadWritePC(pc); + goto [pc]; +} + +:ldm^ItCond^".w" Rn0003,thldrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=1 & thc0404=1 & Rn0003; thc1515=1 & thc1313=0 & thldrlist_inc +{ + build ItCond; + mult_addr = Rn0003; + build thldrlist_inc; + Rn0003 = mult_addr; + LoadWritePC(pc); + goto [pc]; +} + + +:ldmdb^ItCond Rn0003,thldrlist_dec is TMode=1 & ItCond & op4=0xe91 & Rn0003; thc1313=0 & thldrlist_dec +{ + build ItCond; + mult_addr = Rn0003-4; + build thldrlist_dec; +} + +:ldmdb^ItCond Rn0003!,thldrlist_dec is TMode=1 & ItCond & op4=0xe93 & Rn0003; thc1313=0 & thldrlist_dec +{ + build ItCond; + mult_addr = Rn0003-4; + build thldrlist_dec; + Rn0003 = mult_addr + 4; +} + +:ldmdb^ItCond Rn0003,thldrlist_dec is TMode=1 & ItCond & op4=0xe91 & Rn0003; thc1515=1 & thc1313=0 & thldrlist_dec +{ + build ItCond; + mult_addr = Rn0003-4; + build thldrlist_dec; + LoadWritePC(pc); + goto [pc]; +} + +:ldmdb^ItCond Rn0003,thldrlist_dec is TMode=1 & ItCond & op4=0xe93 & Rn0003; thc1515=1 & thc1313=0 & thldrlist_dec +{ + build ItCond; + mult_addr = Rn0003-4; + build thldrlist_dec; + Rn0003 = mult_addr + 4; + LoadWritePC(pc); + goto [pc]; +} + +@endif # VERSION_6T2 || VERSION_7 + +:ldr^ItCond Rd0002,RnIndirect4 is TMode=1 & ItCond & op11=0xd & RnIndirect4 & Rd0002 +{ + build ItCond; + build RnIndirect4; + Rd0002 = *RnIndirect4; +} + +:ldr^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2c & RnRmIndirect & Rd0002 +{ + build ItCond; + build RnRmIndirect; + Rd0002 = *RnRmIndirect; +} + +:ldr^ItCond Rd0810,Pcrel8Indirect is TMode=1 & ItCond & op11=9 & Pcrel8Indirect & Rd0810 +{ + build ItCond; + build Pcrel8Indirect; + Rd0810 = Pcrel8Indirect; +} + + # Note: NO '*' IS INTENTIONAL +:ldr^ItCond Rd0810,Sprel8Indirect is TMode=1 & ItCond & op11=0x13 & Sprel8Indirect & Rd0810 +{ + build ItCond; + build Sprel8Indirect; + Rd0810 = *Sprel8Indirect; +} + +:ldrb^ItCond Rd0002,RnIndirect1 is TMode=1 & ItCond & op11=0xf & RnIndirect1 & Rd0002 +{ + build ItCond; + build RnIndirect1; + Rd0002 = zext( *:1 RnIndirect1 ); +} + +:ldrb^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2e & RnRmIndirect & Rd0002 +{ + build ItCond; + build RnRmIndirect; + Rd0002 = zext( *:1 RnRmIndirect); +} + +:ldrh^ItCond Rd0002,RnIndirect2 is TMode=1 & ItCond & op11=0x11 & RnIndirect2 & Rd0002 +{ + build ItCond; + build RnIndirect2; + Rd0002 = zext( *:2 RnIndirect2); +} + +:ldrh^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2d & RnRmIndirect & Rd0002 +{ + build ItCond; + build RnRmIndirect; + Rd0002 = zext( *:2 RnRmIndirect); +} + +:ldrsb^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2b & RnRmIndirect & Rd0002 +{ + build ItCond; + build RnRmIndirect; + Rd0002 = sext( *:1 RnRmIndirect); +} + +:ldrsh^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2f & RnRmIndirect & Rd0002 +{ + build ItCond; + build RnRmIndirect; + Rd0002 = sext( *:2 RnRmIndirect); +} + +define pcodeop ExclusiveAccess; + +@if defined(VERSION_7) + +:ldrexb^ItCond Rt1215,[Rn0003] is TMode=1 & ItCond & op4=0xe8d & Rn0003; Rt1215 & thc0811=15 & thc0407=4 & thc0003=15 +{ + build ItCond; + local tmp = Rn0003; + ExclusiveAccess(tmp); + val:1 = *tmp; + Rt1215 = zext(val); +} + +:ldrexh^ItCond Rt1215,[Rn0003] is TMode=1 & ItCond & op4=0xe8d & Rn0003; Rt1215 & thc0811=15 & thc0407=5 & thc0003=15 +{ + build ItCond; + local tmp = Rn0003; + ExclusiveAccess(tmp); + val:2 = *tmp; + Rt1215 = zext(val); +} + +:ldrexd^ItCond Rt1215,Rt0811,[Rn0003] is TMode=1 & ItCond & op4=0xe8d & Rn0003; Rt1215 & Rt0811 & thc0407=7 & thc0003=15 +{ + build ItCond; + local tmp = Rn0003; + ExclusiveAccess(tmp); + val1:4 = *tmp; + val2:4 = *(tmp + 4); + Rt1215 = val1; + Rt0811 = val2; +} + +@endif # VERSION_7 + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:ldrex^ItCond Rt1215,[Rn0003,Immed8_4] is TMode=1 & ItCond & op4=0xe85 & Rn0003; Rt1215 & thc0811=15 & Immed8_4 +{ + build ItCond; + local tmp = Rn0003 + Immed8_4; + ExclusiveAccess(tmp); + Rt1215 = *tmp; +} + + +:ldr^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf8d; Rt1215 & RtGotoCheck) & RnIndirect12 +{ + build ItCond; + build RnIndirect12; + Rt1215 = *RnIndirect12; + build RtGotoCheck; +} + +:ldr^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf85; Rt1215 & thc1111=1 & RtGotoCheck) & $(RN_INDIRECT_PUW) +{ + build ItCond; + build RnIndirectPUW; + Rt1215 = *RnIndirectPUW; + build RtGotoCheck; +} + +:ldr^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf8d & sop0003=15; Rt1215 & RtGotoCheck) & PcrelOffset12 +{ + build ItCond; + build PcrelOffset12; + Rt1215 = PcrelOffset12:4; + build RtGotoCheck; +} + +# overlaps pattern with next ldr intruction, must occur first +:ldr^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf85 & sop0003=15; Rt1215 & RtGotoCheck) & PcrelOffset12 +{ + build ItCond; + build PcrelOffset12; + Rt1215 = PcrelOffset12:4; + build RtGotoCheck; +} + +:ldr^ItCond^".w" Rt1215,[Rn0003,Rm0003] is TMode=1 & ItCond & op4=0xf85 & Rn0003; Rt1215 & RtGotoCheck & thc1111=0 & sop0610=0 & thc0405=0 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + Rm0003; + Rt1215 = *tmp; + build RtGotoCheck; +} + +:ldr^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf85 & Rn0003; Rt1215 & RtGotoCheck & thc1111=0 & sop0610=0 & thc0405 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + (Rm0003 << thc0405); + Rt1215 = *tmp; + build RtGotoCheck; +} + +:ldrb^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf89; Rt1215) & RnIndirect12 +{ + build ItCond; + build RnIndirect12; + tmp:1 = *RnIndirect12; + Rt1215 = zext(tmp); +} + +:ldrb^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf81; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) +{ + build ItCond; + build RnIndirectPUW; + tmp:1 = *RnIndirectPUW; + Rt1215 = zext(tmp); +} + +:ldrb^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf89 & sop0003=15; Rt1215) & PcrelOffset12 +{ + build ItCond; + build PcrelOffset12; + tmp:1 = PcrelOffset12:1; + Rt1215 = zext(tmp); +} +:ldrb^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf81 & sop0003=15; Rt1215) & PcrelOffset12 +{ + build ItCond; + build PcrelOffset12; + tmp:1 = PcrelOffset12:1; + Rt1215 = zext(tmp); +} + +:ldrb^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf81 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + (Rm0003 << thc0405); + val:1 = *tmp; + Rt1215 = zext(val); +} + +:ldrbt^ItCond^".w" Rt1215,[Rn0003,"#"^Immed8] is TMode=1 & ItCond & op4=0xf81 & Rn0003; Rt1215 & thc0811=14 & Immed8 +{ + build ItCond; + local tmp = Rn0003 + Immed8; + val:1 = *tmp; + Rt1215 = zext(val); +} + +:ldrd^ItCond Rt1215,Rt0811,RnIndirectPUW1 is TMode=1 & ItCond & (op9=0x74 & thc0606=1 & thc0404=1 & Rn0003; Rt1215 & Rt0811) & $(RN_INDIRECT_PUW1) +{ + build ItCond; + build RnIndirectPUW1; + Rt1215 = *RnIndirectPUW1; + Rt0811 = *(RnIndirectPUW1+4); +} + +:ldrd^ItCond Rt1215,Rt0811,Pcrel8_s8 is TMode=1 & ItCond & op9=0x74 & thc0606=1 & thc0404=1 & sop0003=15; Rt1215 & Rt0811 & Pcrel8_s8 +{ + build ItCond; + build Pcrel8_s8; + local val = Pcrel8_s8; + Rt1215 = val(4); + Rt0811 = val(0); +} + +:ldrh.w^ItCond Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf8B; Rt1215) & RnIndirect12 +{ + build ItCond; + build RnIndirect12; + tmp:2 = *RnIndirect12; + Rt1215 = zext(tmp); +} + +:ldrh^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf83; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) +{ + build ItCond; + build RnIndirectPUW; + tmp:2 = *RnIndirectPUW; + Rt1215 = zext(tmp); +} + +:ldrh^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf83 & sop0003=15; Rt1215) & PcrelOffset12 +{ + build ItCond; + local tmp = PcrelOffset12:2; + Rt1215 = zext(tmp); +} +:ldrh^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf8b & sop0003=15; Rt1215) & PcrelOffset12 +{ + build ItCond; + tmp:2 = PcrelOffset12:2; + Rt1215 = zext(tmp); +} + +:ldrh^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf83 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + (Rm0003 << thc0405); + val:2 = *tmp; + Rt1215 = zext(val); +} + +:ldrht^ItCond^".w" Rt1215,[Rn0003,"#"^Immed8] is TMode=1 & ItCond & op4=0xf83 & Rn0003; Rt1215 & thc0811=14 & Immed8 +{ + build ItCond; + local tmp = Rn0003 + Immed8; + val:2 = *tmp; + Rt1215 = zext(val); +} + +:ldrsb^ItCond".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf99; Rt1215) & RnIndirect12 +{ + build ItCond; + tmp:1 = *RnIndirect12; + Rt1215 = sext(tmp); +} + +:ldrsb^ItCond".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf91; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) +{ + build ItCond; + build RnIndirectPUW; + tmp:1 = *RnIndirectPUW; + Rt1215 = sext(tmp); +} + +:ldrsb^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op8=0xf9 & thc0506=0 & thc0404=1 & sop0003=15; Rt1215) & PcrelOffset12 +{ + build ItCond; + tmp:1 = *PcrelOffset12; + Rt1215 = sext(tmp); +} + +:ldrsb^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf91 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + (Rm0003 << thc0405); + val:1 = *tmp; + Rt1215 = sext(val); +} + +:ldrsbt^ItCond^".w" Rt1215,[Rn0003,"#"^Immed8] is TMode=1 & ItCond & op4=0xf91 & Rn0003; Rt1215 & thc0811=14 & Immed8 +{ + build ItCond; + local tmp = Rn0003 + Immed8; + val:1 = *tmp; + Rt1215 = sext(val); +} + +:ldrsh^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf9B; Rt1215) & RnIndirect12 +{ + build ItCond; + tmp:2 = *RnIndirect12; + Rt1215 = sext(tmp); +} + +:ldrsh^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf93; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) +{ + build ItCond; + build RnIndirectPUW; + tmp:2 = *RnIndirectPUW; + Rt1215 = sext(tmp); +} + +:ldrsh^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op8=0xf9 & thc0506=1 & thc0404=1 & sop0003=15; Rt1215) & PcrelOffset12 +{ + build ItCond; + build PcrelOffset12; + tmp:2 = *PcrelOffset12; + Rt1215 = sext(tmp); +} + +:ldrsh^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf93 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + (Rm0003 << thc0405); + val:2 = *tmp; + Rt1215 = sext(val); +} + +:ldrsht^ItCond^".w" Rt1215,[Rn0003,"#"^Immed8] is TMode=1 & ItCond & op4=0xf93 & Rn0003; Rt1215 & thc0811=14 & Immed8 +{ + build ItCond; + local tmp = Rn0003 + Immed8; + val:2 = *tmp; + Rt1215 = sext(val); +} + +:ldrt^ItCond^".w" Rt1215,[Rn0003,"#"^Immed8] is TMode=1 & ItCond & op4=0xf85 & Rn0003; Rt1215 & thc0811=14 & Immed8 +{ + build ItCond; + local tmp = Rn0003 + Immed8; + Rt1215 = *tmp; +} + +@endif # VERSION_6T2 || VERSION_7 + +macro th_set_carry_for_lsl(op1,shift_count) { + local bit = (op1 << (shift_count-1)) & 0x80000000; + tmpCY = ((shift_count == 0) && CY) || ((shift_count != 0) && (bit != 0)); +} + +:lsl^CheckInIT_CZN^ItCond Rd0002,Rm0305,Immed5 is TMode=1 & ItCond & op11=0x0 & Immed5 & Rm0305 & Rd0002 & CheckInIT_CZN +{ + build ItCond; + th_set_carry_for_lsl(Rm0305,Immed5); + Rd0002 = Rm0305 << Immed5; + resflags(Rd0002); + build CheckInIT_CZN; +} + +:lsl^CheckInIT_CZN^ItCond Rd0002,Rs0305 is TMode=1 & ItCond & op6=0x102 & Rs0305 & Rd0002 & CheckInIT_CZN +{ + build ItCond; + local shift_count = Rs0305 & 0xff; + th_set_carry_for_lsl(Rd0002,shift_count); + Rd0002 = Rd0002 << shift_count; + resflags(Rd0002); + build CheckInIT_CZN; +} + +macro th_set_carry_for_lsr(op1,shift_count) { + local bit = (op1 >> (shift_count-1)) & 1; + tmpCY = ((shift_count == 0) && CY) || ((shift_count != 0) && (bit != 0)); +} + +#note that this is a special case where immed5 = 0, which corresponds to a shift amount of 32 +:lsr^CheckInIT_CZN^ItCond Rd0002,Rm0305,"#0x20" is TMode=1 & ItCond & op11=1 & Immed5 & Rm0305 & Rd0002 & immed5=0 & CheckInIT_CZN +{ + build ItCond; + th_set_carry_for_lsr(Rm0305,32:1); + Rd0002 = Rm0305 >> 32; + resflags(Rd0002); + build CheckInIT_CZN; +} + +:lsr^CheckInIT_CZN^ItCond Rd0002,Rm0305,Immed5 is TMode=1 & ItCond & op11=1 & Immed5 & Rm0305 & Rd0002 & CheckInIT_CZN +{ + build ItCond; + local shift_amount = Immed5; + th_set_carry_for_lsr(Rm0305,shift_amount); + Rd0002 = Rm0305 >> Immed5; + resflags(Rd0002); + build CheckInIT_CZN; +} + +:lsr^CheckInIT_CZN^ItCond Rd0002,Rs0305 is TMode=1 & ItCond & op6=0x103 & Rd0002 & Rs0305 & CheckInIT_CZN +{ + build ItCond; + local shift_amount = (Rs0305 & 0xff); + th_set_carry_for_lsr(Rd0002,shift_amount); + Rd0002 = Rd0002 >> (Rs0305 & 0xff); + resflags(Rd0002); + build CheckInIT_CZN; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:lsl^thSBIT_CZN^ItCond^".w" Rd0811,Rm0003,thLsbImm is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZN & sop0003=15; thc1515=0 & Rd0811 & thc0405=0 & Rm0003 & thLsbImm +{ + build ItCond; + th_set_carry_for_lsl(Rm0003,thLsbImm); + Rd0811 = Rm0003 << thLsbImm; + resflags(Rd0811); + build thSBIT_CZN; +} + +:lsl^thSBIT_CZN^ItCond^".w" Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op11=0x1f & thc0910=1 & sop0508=0 & thSBIT_CZN & Rn0003; op12=15 & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + local shift_amount = (Rm0003 & 0xff); + th_set_carry_for_lsl(Rn0003,shift_amount); + Rd0811 = Rn0003 << (shift_amount); + resflags(Rd0811); + build thSBIT_CZN; +} + +:lsr^thSBIT_CZN^ItCond^".w" Rd0811,Rm0003,thLsbImm is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZN & sop0003=15; thc1515=0 & Rd0811 & thc0405=1 & Rm0003 & thLsbImm +{ + build ItCond; + th_set_carry_for_lsr(Rm0003,thLsbImm); + Rd0811 = Rm0003 >> thLsbImm; + resflags(Rd0811); + build thSBIT_CZN; +} + +:lsr^thSBIT_CZN^ItCond^".w" Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op11=0x1f & thc0910=1 & sop0508=1 & thSBIT_CZN & Rn0003; op12=15 & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + local shift_amount = Rm0003 & 0xff; + th_set_carry_for_lsr(Rn0003,shift_amount); + Rd0811 = Rn0003 >> shift_amount; + resflags(Rd0811); + build thSBIT_CZN; +} + + +@endif # VERSION_6T2 || VERSION_7 + +:mcr^ItCond thcpn,thc0507,Rt1215,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xee & thc0507 & thc0404=0 & thCRn; Rt1215 & thcpn & thopcode2 & thc0404=1 & thCRm +{ + build ItCond; + t_cpn:4 = thcpn; + t_op1:4 = thc0507; + t_op2:4 = thopcode2; + coprocessor_moveto(t_cpn,t_op1,t_op2,Rt1215,thCRn,thCRm); +} + +:mcr2^ItCond thcpn,thc0507,Rt1215,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xfe & thc0507 & thc0404=0 & thCRn; Rt1215 & thcpn & thopcode2 & thc0404=1 & thCRm +{ + build ItCond; + t_cpn:4 = thcpn; + t_op1:4 = thc0507; + t_op2:4 = thopcode2; + coprocessor_moveto(t_cpn,t_op1,t_op2,Rt1215,thCRn,thCRm); +} + +:mcrr^ItCond thcpn,thopcode1,Rt1215,Rn0003,thCRm is TMode=1 & ItCond & op4=0xec4 & Rn0003; Rt1215 & thcpn & thopcode1 & thCRm +{ + build ItCond; + t_cpn:4 = thcpn; + t_op:4 = thopcode1; + coprocessor_moveto2(t_cpn,t_op,Rt1215,Rn0003,thCRm); +} + +:mcrr^ItCond thcpn,thopcode1,Rt1215,Rn0003,thCRm is TMode=1 & ItCond & op4=0xfc4 & Rn0003; Rt1215 & thcpn & thopcode1 & thCRm +{ + build ItCond; + t_cpn:4 = thcpn; + t_op:4 = thopcode1; + coprocessor_moveto2(t_cpn,t_op,Rt1215,Rn0003,thCRm); +} + + +:mov^CheckInIT_ZN^ItCond Rd0810,Immed8 is TMode=1 & ItCond & op11=4 & Rd0810 & Immed8 & CheckInIT_ZN +{ + build ItCond; + Rd0810 = Immed8; + resflags(Rd0810); + build CheckInIT_ZN; +} + +:mov^CheckInIT_ZN^ItCond Rd0002,Rn0305 is TMode=1 & ItCond & op6=0x000 & Rn0305 & Rd0002 & CheckInIT_ZN +{ + build ItCond; + Rd0002 = Rn0305; + resflags(Rd0002); + build CheckInIT_ZN; +} + +:mov^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x46 & Hrm0305 & Hrd0002 +{ + build ItCond; + Hrd0002 = Hrm0305; +} + +:mov^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x46 & Hrm0305 & Hrd0002 & hrd0002=7 & h1=1 +{ + build ItCond; + dest:4 = Hrm0305; + BranchWritePC(dest); + goto [pc]; +} + +:mov^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x46 & Hrm0305 & rm0306=14 & Hrd0002 & hrd0002=7 & h1=1 +{ + build ItCond; + dest:4 = Hrm0305; + BranchWritePC(dest); + return [pc]; +} + +:mov^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x46 & Hrm0305 & hrm0305=7 & Hrd0002 & hrd0002=6 & h1=1 [ LRset=1; TMode=1; globalset(inst_next,LRset); globalset(inst_next,TMode); ] +{ + build ItCond; + Hrd0002 = Hrm0305; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:mov^thSBIT_ZN^ItCond^".w" Rd0811,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=2 & thSBIT_ZN & sop0003=15; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + Rd0811 = ThumbExpandImm12; + resflags(Rd0811); + build thSBIT_ZN; +} + +:movw^ItCond Rd0811,Immed16 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=2 & thc0404=0; thc1515=0 & Rd0811) & Immed16 +{ + build ItCond; + Rd0811 = zext(Immed16); + resflags(Rd0811); +} + +:mov^thSBIT_ZN^ItCond^".w" Rd0811,Rm0003 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_ZN & sop0003=15; op12=0 & Rd0811 & thc0407=0 & Rm0003 +{ + build ItCond; + Rd0811 = Rm0003; + resflags(Rd0811); + build thSBIT_ZN; +} + +:movt^ItCond Rd0811,Immed16 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=6 & thc0404=0; thc1515=0 & Rd0811) & Immed16 +{ + build ItCond; + Rd0811 = (zext(Immed16) << 16) | (Rd0811 & 0xffff); +} + + +:mrc^ItCond thcpn,thc0507,Rt1215,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xee & thc0507 & thc0404=1 & thCRn; Rt1215 & thcpn & thopcode2 & thc0404=1 & thCRm +{ + build ItCond; + t_cpn:4 = thcpn; + t_op1:4 = thc0507; + t_op2:4 = thopcode2; + Rt1215 = coprocessor_movefromRt(t_cpn,t_op1,t_op2,thCRn,thCRm); +} + +:mrc2^ItCond thcpn,thc0507,Rt1215,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xfe & thc0507 & thc0404=1 & thCRn; Rt1215 & thcpn & thopcode2 & thc0404=1 & thCRm +{ + build ItCond; + t_cpn:4 = thcpn; + t_op1:4 = thc0507; + t_op2:4 = thopcode2; + Rt1215 = coprocessor_movefromRt(t_cpn,t_op1,t_op2,thCRn,thCRm); +} + +:mrrc^ItCond thcpn,thopcode1,Rt1215,Rn0003,thCRm is TMode=1 & ItCond & op4=0xec5 & Rn0003; Rt1215 & thcpn & thopcode1 & thCRm +{ + build ItCond; + t_cpn:4 = thcpn; + t_op:4 = thopcode1; + Rt1215 = coprocessor_movefromRt(t_cpn,t_op,thCRm); + Rn0003 = coprocessor_movefromRt2(t_cpn,t_op,thCRm); +} + +:mrrc2^ItCond thcpn,thopcode1,Rt1215,Rn0003,thCRm is TMode=1 & ItCond & op4=0xfc5 & Rn0003; Rt1215 & thcpn & thopcode1 & thCRm +{ + build ItCond; + t_cpn:4 = thcpn; + t_op:4 = thopcode1; + Rt1215 = coprocessor_movefromRt(t_cpn,t_op,thCRm); + Rn0003 = coprocessor_movefromRt2(t_cpn,t_op,thCRm); +} + +macro readAPSR(r) { +# TODO: GE bits have not been included + r = r | zext( (NG<<4) | (ZR<<3) | (CY<<2) | (OV<<1) | (Q) ) << 27; +} + +macro writeAPSR(r) { +# TODO: GE bits have not been included + local tmp = r >> 27 & 0x1f; + Q = ((tmp ) & 0x1) != 0; + OV = ((tmp >> 1) & 0x1) != 0; + CY = ((tmp >> 2) & 0x1) != 0; + ZR = ((tmp >> 3) & 0x1) != 0; + NG = ((tmp >> 4) & 0x1) != 0; +} + +@if defined(VERSION_7M) + +define pcodeop getMainStackPointer; +define pcodeop getProcessStackPointer; +define pcodeop getBasePriority; +define pcodeop getCurrentExceptionNumber; + +mrsipsr: "i" is thc0000=1 & Rd0811 { + b:1 = isCurrentModePrivileged(); + if (!b) goto ; + ipsr:4 = getCurrentExceptionNumber(); + Rd0811 = Rd0811 | (ipsr & 0x1f); + +} +mrsipsr: is thc0000=0 { } +mrsepsr: "e" is thc0101=1 { } +mrsepsr: is thc0101=0 { } +mrsapsr: is thc0202=1 { } +mrsapsr: "a" is thc0202=0 & Rd0811 { readAPSR(Rd0811); } + +mrspsr: mrsipsr^mrsepsr^mrsapsr^"psr" is mrsipsr & mrsepsr & mrsapsr & Rd0811 { + Rd0811 = 0; + build mrsapsr; + build mrsipsr; +} +mrspsr: "xpsr" is sysm02=3 & mrsipsr & mrsepsr & mrsapsr & Rd0811 { + Rd0811 = 0; + build mrsapsr; + build mrsipsr; +} + +:mrs^ItCond Rd0811,mrspsr is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm37=0 & mrspsr +{ + build ItCond; + build mrspsr; +} + +msp: "msp" is epsilon {} + +:mrs^ItCond Rd0811,msp is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=8 & msp +{ + build ItCond; + Rd0811 = getMainStackPointer(); +} + +psp: "psp" is epsilon {} + +:mrs^ItCond Rd0811,psp is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=9 & psp +{ + build ItCond; + Rd0811 = getProcessStackPointer(); +} + +primask: "primask" is epsilon {} + +:mrs^ItCond Rd0811,primask is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=16 & primask +{ + build ItCond; + Rd0811 = 0; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + Rd0811 = isIRQinterruptsEnabled(); # should reflect primask register/bit +} + +basepri: "basepri" is epsilon {} + +:mrs^ItCond Rd0811,basepri is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=17 & basepri +{ + build ItCond; + Rd0811 = 0; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + Rd0811 = getBasePriority(); +} + +basepri_max: "basepri_max" is epsilon {} + +:mrs^ItCond Rd0811,basepri_max is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=18 & basepri_max +{ + build ItCond; + Rd0811 = 0; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + Rd0811 = getBasePriority(); +} + +faultmask: "faultmask" is epsilon {} + +:mrs^ItCond Rd0811,faultmask is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=19 & faultmask +{ + build ItCond; + Rd0811 = 0; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + Rd0811 = isFIQinterruptsEnabled(); # should reflect faultmask register/bit +} + +define pcodeop isThreadModePrivileged; +define pcodeop isUsingMainStack; + +control: "control" is epsilon {} + +:mrs^ItCond Rd0811,control is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=20 & control +{ + build ItCond; + notPrivileged:1 = isThreadModePrivileged() != 1:1; + altStackMode:1 = isUsingMainStack() != 1:1; + Rd0811 = zext((altStackMode << 1) | notPrivileged); +} + +@endif + +:mrs^ItCond Rd0811,cpsr is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=0 & cpsr +{ + build ItCond; + tmp:4 = 0; + readAPSR(tmp); + Rd0811 = tmp; +} + +:mrs^ItCond Rd0811,spsr is TMode=1 & ItCond & op0=0xf3ff; op12=0x8 & Rd0811 & sysm=0 & spsr +{ + build ItCond; + Rd0811 = spsr; +} + + +@if defined(VERSION_7M) + +msripsr: "i" is thc0000=1 { } +msripsr: is thc0000=0 { } +msrepsr: "e" is thc0101=1 { } +msrepsr: is thc0101=0 { } +msrapsr: is thc0202=1 { } +msrapsr: "a" is thc0202=0 & Rn0003 { + cpsr = cpsr | (Rn0003 & 0xf8000000); + writeAPSR(cpsr); +} + +msrpsr: msripsr^msrepsr^msrapsr^"psr" is msripsr & msrepsr & msrapsr { + build msrapsr; +} +msrpsr: "xpsr" is sysm02=3 & msrapsr { + build msrapsr; +} + +:msr^ItCond msrpsr,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm37=0 & msrpsr +{ + build ItCond; + build msrpsr; +} + +define pcodeop setMainStackPointer; +define pcodeop setProcessStackPointer; +define pcodeop setBasePriority; + +:msr^ItCond msp,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=8 & msp +{ + build ItCond; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + setMainStackPointer(Rn0003); +} + +:msr^ItCond psp,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=9 & psp +{ + build ItCond; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + setProcessStackPointer(Rn0003); +} + +:msr^ItCond primask,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=16 & primask +{ + build ItCond; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + enableIRQinterrupts((Rn0003 & 1) == 1); # should set/clear primask register/bit +} + +:msr^ItCond basepri,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=17 & basepri +{ + build ItCond; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + setBasePriority(Rn0003); +} + +:msr^ItCond basepri_max,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=18 & basepri_max +{ + build ItCond; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + if (Rn0003 == 0) goto inst_next; +# TODO: does the following compare need to be signed?? + cur:4 = getBasePriority(); + if (cur != 0 && Rn0003 >= cur) goto inst_next; + setBasePriority(Rn0003); +} + +:msr^ItCond faultmask,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=19 & faultmask +{ + build ItCond; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + enableFIQinterrupts((Rn0003 & 1) == 1); +} + +define pcodeop setStackMode; + +:msr^ItCond control,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=20 & control +{ + build ItCond; + b:1 = isCurrentModePrivileged(); + if (!b) goto inst_next; + privileged:1 = (Rn0003 & 1) == 0; + setThreadModePrivileged(privileged); +# TODO: not sure about the following semantics + b = isThreadMode(); + if (!b) goto inst_next; + stackMode:1 = isUsingMainStack() == 1:1; + setStackMode(stackMode); +# TODO: should we set sp ? +} + +@endif + +thpsrmask: is th_psrmask=0 { export 0:4; } +thpsrmask: "_c" is th_psrmask=1 { export 0xff:4; } +thpsrmask: "_x" is th_psrmask=2 { export 0xff00:4; } +thpsrmask: "_cx" is th_psrmask=3 { export 0xffff:4; } +thpsrmask: "_s" is th_psrmask=4 { export 0xff0000:4; } +thpsrmask: "_cs" is th_psrmask=5 { export 0xff00ff:4; } +thpsrmask: "_xs" is th_psrmask=6 { export 0xffff00:4; } +thpsrmask: "_cxs" is th_psrmask=7 { export 0xffffff:4; } +thpsrmask: "_f" is th_psrmask=8 { export 0xff000000:4; } +thpsrmask: "_cf" is th_psrmask=9 { export 0xff0000ff:4; } +thpsrmask: "_xf" is th_psrmask=10 { export 0xff00ff00:4; } +thpsrmask: "_cxf" is th_psrmask=11 { export 0xff00ffff:4; } +thpsrmask: "_sf" is th_psrmask=12 { export 0xffff0000:4; } +thpsrmask: "_csf" is th_psrmask=13 { export 0xffff00ff:4; } +thpsrmask: "_xsf" is th_psrmask=14 { export 0xffffff00:4; } +thpsrmask: "_cxsf" is th_psrmask=15 { export 0xffffffff:4; } + +thcpsrmask: cpsr^thpsrmask is thpsrmask & cpsr { export thpsrmask; } + +:msr^ItCond thcpsrmask,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & thcpsrmask & thc0007=0 +{ + build ItCond; + build thcpsrmask; + cpsr = (cpsr& ~thcpsrmask) | (Rn0003 & thcpsrmask); + writeAPSR(cpsr); +} + +thspsrmask: spsr^thpsrmask is thpsrmask & spsr { export thpsrmask; } + +:msr^ItCond thspsrmask,Rn0003 is TMode=1 & ItCond & op4=0xf39 & Rn0003; op12=0x8 & thspsrmask & thc0007=0 +{ + build ItCond; + build thspsrmask; + spsr = (spsr& ~thspsrmask) | (Rn0003 & thspsrmask); +} + +:mvn^thSBIT_ZN^ItCond Rd0811,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=3 & thSBIT_ZN & thc0003=15; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + Rd0811 = ~ThumbExpandImm12; + resflags(Rd0811); + build thSBIT_ZN; +} + +:mvn^thSBIT_ZN^ItCond^".w" Rd0811,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=3 & thSBIT_ZN & thc0003=15; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + Rd0811 = ~thshift2; + resflags(Rd0811); + build thSBIT_ZN; +} + +@endif # VERSION_6T2 || VERSION_7 + +:mul^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x10d & Rm0305 & Rd0002 & CheckInIT_ZN +{ + build ItCond; + Rd0002 = Rm0305 * Rd0002; + resflags(Rd0002); + build CheckInIT_ZN; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:mla^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb0 & Rn0003; Ra1215 & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + Rd0811 = Rn0003 * Rm0003 + Ra1215; +} + +:mls^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb0 & Rn0003; Ra1215 & Rd0811 & sop0407=1 & Rm0003 +{ + build ItCond; + Rd0811 = Ra1215- Rn0003 * Rm0003; +} + +:mul^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb0 & Rn0003; op12=15 & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + Rd0811 = Rn0003 * Rm0003; +} + +@endif # VERSION_6T2 || VERSION_7 + +:mvn^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x10f & Rm0305 & Rd0002 & CheckInIT_ZN +{ + build ItCond; + Rd0002 = ~Rm0305; + resflags(Rd0002); + build CheckInIT_ZN; +} + +:nop^ItCond is TMode=1 & ItCond & op0=0xbf00 +{ +} + +:nop^ItCond^".w" is TMode=1 & ItCond & op0=0xf3af; op0=0x8000 +{ +} + +:nop is op0=0x46c0 # This is just like a mov r0 r0 +{ +} + +:orr^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x10c & Rm0305 & Rd0002 & CheckInIT_ZN +{ + build ItCond; + Rd0002 = Rd0002 | Rm0305; + resflags(Rd0002); + build CheckInIT_ZN; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:orn^thSBIT_CZNO^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=3 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + Rd0811 = Rn0003 | ~(ThumbExpandImm12); + th_logicflags(); + resflags(Rd0811); + build thSBIT_CZNO; +} + +:orn^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=3 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + Rd0811 = Rn0003 | ~(thshift2); + th_logicflags(); + resflags(Rd0811); + build thSBIT_CZNO; +} + +:orr^thSBIT_CZNO^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=2 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + Rd0811 = Rn0003 | ThumbExpandImm12; + th_logicflags(); + resflags(Rd0811); + build thSBIT_CZNO; +} + +:orr^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + Rd0811 = Rn0003 | thshift2; + th_logicflags(); + resflags(Rd0811); + build thSBIT_CZNO; +} + +:pkhbt^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op4=0xeac & Rn0003; thc1515=0 & Rd0811 & thc0505=0 & thc0404=0 & thshift2 +{ + build ItCond; + Rd0811 = (Rn0003 & 0x0000ffff) | (thshift2 & 0xffff0000); + th_logicflags(); + resflags(Rd0811); +} + +:pkhtb^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op4=0xeac & Rn0003; thc1515=0 & Rd0811 & thc0505=1 & thc0404=0 & thshift2 +{ + build ItCond; + Rd0811 = (Rn0003 & 0xffff0000) | (thshift2 & 0x0000ffff); + th_logicflags(); + resflags(Rd0811); +} + +:pld^ItCond Rn0003,"#"^offset12 is TMode=1 & ItCond & op6=0x3e2 & thwbit=0 & thc0404=1 & Rn0003; op12=0xf & offset12 +{ + build ItCond; + addr:4 = Rn0003 + offset12; + HintPreloadData(addr); +} + +:pldw^ItCond Rn0003,"#"^offset12 is TMode=1 & ItCond & op6=0x3e2 & thwbit=1 & thc0404=1 & Rn0003; op12=0xf & offset12 +{ + build ItCond; + addr:4 = Rn0003 + offset12; + HintPreloadDataForWrite(addr); +} + +:pld^ItCond Rn0003,"#-"^immed8 is TMode=1 & ItCond & op6=0x3e0 & thwbit=0 & thc0404=1 & Rn0003; op8=0xfc & immed8 +{ + build ItCond; + addr:4 = Rn0003 - immed8; + HintPreloadData(addr); +} + +:pldw^ItCond Rn0003,"#-"^immed8 is TMode=1 & ItCond & op6=0x3e0 & thwbit=1 & thc0404=1 & Rn0003; op8=0xfc & immed8 +{ + build ItCond; + addr:4 = Rn0003 - immed8; + HintPreloadDataForWrite(addr); +} + +:pld^ItCond PcrelOffset12 is TMode=1 & ItCond & (op8=0xf8 & thc0506=0 & thc0004=0x1f; thc1215=0xf) & PcrelOffset12 +{ + build ItCond; + HintPreloadData(PcrelOffset12); +} + +:pld^ItCond Rn0003,Rm0003"lsl #"^thc0405 is TMode=1 & ItCond & op6=0x3e0 & thwbit=0 & thc0404=1 & Rn0003; op8=0xf0 & thc0607=0 & thc0405 & Rm0003 +{ + build ItCond; + addr:4 = Rn0003 + (Rm0003 << thc0405); + HintPreloadData(addr); +} + +:pldw^ItCond Rn0003,Rm0003,"lsl #"^thc0405 is TMode=1 & ItCond & op6=0x3e0 & thwbit=1 & thc0404=1 & Rn0003; op8=0xf0 & thc0607=0 & thc0405 & Rm0003 +{ + build ItCond; + addr:4 = Rn0003 + (Rm0003 << thc0405); + HintPreloadDataForWrite(addr); +} + + +:pli^ItCond Rn0003,"#"^offset12 is TMode=1 & ItCond & op4=0xf99 & Rn0003; op12=0xf & offset12 +{ + build ItCond; + addr:4 = Rn0003 + offset12; + HintPreloadInstruction(addr); +} + +:pli^ItCond Rn0003,"#-"^immed8 is TMode=1 & ItCond & op4=0xf91 & Rn0003; op8=0xfc & immed8 +{ + build ItCond; + addr:4 = Rn0003 - immed8; + HintPreloadInstruction(addr); +} + +:pli^ItCond PcrelOffset12 is TMode=1 & ItCond & (op8=0xf9 & thc0506=0 & thc0004=0x1f; thc1215=0xf) & PcrelOffset12 +{ + build ItCond; + HintPreloadInstruction(PcrelOffset12); +} + +:pli^ItCond Rn0003,Rm0003"lsl #"^thc0405 is TMode=1 & ItCond & op4=0xf91 & Rn0003; op6=0x3c0 & thc0405 & Rm0003 +{ + build ItCond; + addr:4 = Rn0003 + (Rm0003 << thc0405); + HintPreloadInstruction(addr); +} + + +@endif # VERSION_6T2 || VERSION_7 + +# +# Removed the masking of the stack pointer on push and pop to ignore the lower 2 bits. +# This isn't really needed for modeling. +# NOTE: It may need to be put back in to model correctly for nasty stack shenanigans. +# +:pop^ItCond ldbrace is TMode=1 & ItCond & op9=0x5e & R=0 & ldbrace +{ + build ItCond; +# mult_addr = sp & 0xfffffffc; + mult_addr = sp; + build ldbrace; + sp = mult_addr; +} + +:pop^ItCond pclbrace is TMode=1 & ItCond & op9=0x5e & R=1 & pclbrace +{ + build ItCond; +# mult_addr = sp & 0xfffffffc; + mult_addr = sp; + build pclbrace; + sp = mult_addr; + LoadWritePC(pc); + return [pc]; +} + +:pop^ItCond thldrlist_inc is TMode=1 & ItCond & op0=0xe8bd; thldrlist_inc +{ + build ItCond; +# mult_addr = sp & 0xfffffffc; + mult_addr = sp; + build thldrlist_inc; + sp = mult_addr; +} + +:pop^ItCond thldrlist_inc is TMode=1 & ItCond & op0=0xe8bd; thldrlist_inc & thc1515=1 +{ + build ItCond; +# mult_addr = sp & 0xfffffffc; + mult_addr = sp; + build thldrlist_inc; + sp = mult_addr; + LoadWritePC(pc); + return [pc]; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:pop^ItCond^".w" thldrlist_inc is TMode=1 & ItCond & op0=0xe8bd; thc1515=0 & thc1313=0 & thldrlist_inc +{ + build ItCond; + mult_addr = sp; + build thldrlist_inc; + sp = mult_addr; +} + +:pop^ItCond^".w" Rt1215 is TMode=1 & ItCond & op0=0xf85d; Rt1215 & offset12=0xb04 +{ + build ItCond; + Rt1215 = *sp; + sp=sp+4; +} + +:pop^ItCond^".w" thldrlist_inc is TMode=1 & ItCond & op0=0xe8bd; thc1515=1 & thc1313=0 & thldrlist_inc +{ + build ItCond; + mult_addr = sp; + build thldrlist_inc; + sp = mult_addr; + LoadWritePC(pc); + return [pc]; +} + +:pop^ItCond^".w" Rt1215 is TMode=1 & ItCond & op0=0xf85d; Rt1215 & op12=15 & offset12=0xb04 +{ + build ItCond; + dest:4 = *sp; + sp=sp+4; + LoadWritePC(dest); + return [pc]; +} + +:push^ItCond^".w" thstrlist_dec is TMode=1 & ItCond & op0=0xe8ad; thc1515=0 & thc1313=0 & thstrlist_dec +{ + build ItCond; + mult_addr = sp-4; + build thstrlist_dec; + sp = mult_addr + 4; +} + +:push^ItCond^".w" Rt1215 is TMode=1 & ItCond & op0=0xf84d; Rt1215 & offset12=0xd04 +{ + build ItCond; + sp=sp-4; + *sp = Rt1215; +} + +@endif # VERSION_6T2 || VERSION_7 + +:push^ItCond psbrace is TMode=1 & ItCond & op9=0x5a & R=0 & psbrace +{ + build ItCond; +# mult_addr = sp & 0xfffffffc; + mult_addr = sp; + build psbrace; + sp = mult_addr; +} + +:push^ItCond pcpbrace is TMode=1 & ItCond & op9=0x5a & R=1 & pcpbrace +{ + build ItCond; +# mult_addr = sp & 0xfffffffc; + mult_addr = sp; + build pcpbrace; + sp = mult_addr; +} + +:push^ItCond thstrlist_dec is TMode=1 & ItCond & op0=0xe92d; thstrlist_dec +{ + build ItCond; +# mult_addr = sp & 0xfffffffc; + mult_addr = sp-4; + build thstrlist_dec; + sp = mult_addr+4; +} + +@if defined(VERSION_5E) + +:qadd^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x8 & Rm0003 +{ + build ItCond; + local sum1 = Rm0003 + Rn0003; + sum1 = SignedSaturate(sum1,32:2); + Q = SignedDoesSaturate(sum1,32:2); + Rd0811 = sum1; +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +:qadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 +{ + build ItCond; + local lRn = Rn0003 & 0xffff; + local lRm = Rm0003 & 0xffff; + local uRn = (Rn0003) & 0xffff; + local uRm = (Rm0003 >> 16) & 0xffff; + sum1:2 = lRn:2 + lRm:2; + sum1 = SignedSaturate(sum1,16:2); + sum2:2 = uRn:2 + uRm:2; + sum2 = SignedSaturate(sum2,16:2); + Rd0811 = (zext(sum2) << 16) | zext(sum1); +} + +:qadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 +{ + build ItCond; + local rn1 = Rn0003 & 0xff; + local rm1 = Rm0003 & 0xff; + local rn2 = (Rn0003 >> 8) & 0xff; + local rm2 = (Rm0003 >> 8) & 0xff; + local rn3 = (Rn0003 >> 16) & 0xff; + local rm3 = (Rm0003 >> 16) & 0xff; + local rn4 = (Rn0003 >> 24) & 0xff; + local rm4 = (Rm0003 >> 24) & 0xff; + sum1:1 = rn1:1 + rm1:1; + sum1 = SignedSaturate(sum1,8:2); + sum2:1 = rn2:1 + rm2:1; + sum2 = SignedSaturate(sum2,8:2); + sum3:1 = rn3:1 + rm3:1; + sum3 = SignedSaturate(sum3,8:2); + sum4:1 = rn4:1 + rm4:1; + sum4 = SignedSaturate(sum4,8:2); + Rd0811 = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); +} + +# qaddsubx +:qasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 +{ + build ItCond; + local lRn = Rn0003 & 0xffff; + local lRm = Rm0003 & 0xffff; + local uRn = (Rn0003 >> 16) & 0xffff; + local uRm = (Rm0003 >> 16) & 0xffff; + sum1:2 = lRn:2 - lRm:2; + sum1 = SignedSaturate(sum1,16:2); + sum2:2 = uRn:2 + uRm:2; + sum2 = SignedSaturate(sum2,16:2); + Rd0811 = (zext(sum2) << 16) | zext(sum1); +} + +@endif # VERSION_6 + +@if defined(VERSION_5E) + +:qdadd^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x9 & Rm0003 +{ + build ItCond; + tmp:4 = Rn0003 * 2; + tmp = SignedSaturate(tmp,32:2); + Q = SignedDoesSaturate(tmp,32:2); + tmp = tmp + Rm0003; + tmp = SignedSaturate(tmp,32:2); + Q = Q | SignedDoesSaturate(tmp,32:2); + Rd0811 = tmp; +} + +:qdsub^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0xb & Rm0003 +{ + build ItCond; + tmp:4 = Rn0003 * 2; + tmp = SignedSaturate(tmp); + Q = SignedDoesSaturate(tmp,32:2); + tmp = Rm0003 - tmp; + tmp = SignedSaturate(tmp,32:2); + Q = Q | SignedDoesSaturate(tmp,32:2); + Rd0811 = tmp; +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +# qsubaddx +:qsax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 +{ + build ItCond; + local lRn = Rn0003 & 0xffff; + local lRm = Rm0003 & 0xffff; + local uRn = (Rn0003 >> 16) & 0xffff; + local uRm = (Rm0003 >> 16) & 0xffff; + sum1:2 = lRn:2 + lRm:2; + sum1 = SignedSaturate(sum1,16:2); + sum2:2 = uRn:2 - uRm:2; + sum2 = SignedSaturate(sum2,16:2); + Rd0811 = (zext(sum2) << 16) | zext(sum1); +} + +@endif # VERSION_6 + +@if defined(VERSION_5E) + +:qsub^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0xa & Rm0003 +{ + build ItCond; + tmp:4 = Rm0003 - Rn0003; + tmp = SignedSaturate(tmp,32:2); + Q = SignedDoesSaturate(tmp,32:2); + Rd0811 = tmp; +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +:qsub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 +{ + build ItCond; + local lRn = Rn0003 & 0xffff; + local lRm = Rm0003 & 0xffff; + local uRn = (Rn0003 >> 16) & 0xffff; + local uRm = (Rm0003 >> 16) & 0xffff; + sum1:2 = lRn:2 - lRm:2; + sum1 = SignedSaturate(sum1,16:2); + sum2:2 = uRn:2 - uRm:2; + sum2 = SignedSaturate(sum2,16:2); + Rd0811 = (zext(sum2) << 16) | zext(sum1); +} + +:qsub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 +{ + build ItCond; + local rn1 = Rn0003 & 0xff; + local rm1 = Rm0003 & 0xff; + local rn2 = (Rn0003 >> 8) & 0xff; + local rm2 = (Rm0003 >> 8) & 0xff; + local rn3 = (Rn0003 >> 16) & 0xff; + local rm3 = (Rm0003 >> 16) & 0xff; + local rn4 = (Rn0003 >> 24) & 0xff; + local rm4 = (Rm0003 >> 24) & 0xff; + sum1:1 = rn1:1 - rm1:1; + sum1 = SignedSaturate(sum1,8:2); + sum2:1 = rn2:1 - rm2:1; + sum2 = SignedSaturate(sum2,8:2); + sum3:1 = rn3:1 - rm3:1; + sum3 = SignedSaturate(sum3,8:2); + sum4:1 = rn4:1 - rm4:1; + sum4 = SignedSaturate(sum4,8:2); + Rd0811 = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); +} + +@endif # VERSION_6 + +@if defined(THUMB_2) + +# WARNING Rm0003 on the first 2 bytes must be the same value as Rm0003 on the last bytes! +# but there is no easy way to check this now... +:rev^ItCond Rd0811,Rm0003 is TMode=1 & ItCond & op4=0xfa9; op6=0x2e8 & Rd0811 & Rm0003 +{ + build ItCond; + local tmp1 = Rm0003 & 0xff; + local tmp2 = (Rm0003 >> 8) & 0xff; + local tmp3 = (Rm0003 >> 16) & 0xff; + local tmp4 = (Rm0003 >> 24) & 0xff; + Rd0811 = (tmp1 << 24) | (tmp2 << 16) | (tmp3 << 8) | tmp4; +} + +@endif # THUMB_2 + +:rsb^CheckInIT_CZNO^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x109 & Rm0305 & Rd0002 & CheckInIT_CZNO +{ + build ItCond; + th_subflags0(Rm0305); + Rd0002 = 0-Rm0305; + resflags(Rd0002); + build CheckInIT_CZNO; +} + +@if defined(VERSION_6) + +:rev^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x2e8 & Rd0002 & Rm0305 +{ + build ItCond; + local tmp1 = Rm0305 & 0xff; + local tmp2 = (Rm0305 >> 8) & 0xff; + local tmp3 = (Rm0305 >> 16) & 0xff; + local tmp4 = (Rm0305 >> 24) & 0xff; + Rd0002 = (tmp1 << 24) | (tmp2 << 16) | (tmp3 << 8) | tmp4; +} + +:rev16^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x2e9 & Rd0002 & Rm0305 +{ + build ItCond; + local tmp1 = Rm0305 & 0xff; + local tmp2 = (Rm0305 >> 8) & 0xff; + local tmp3 = (Rm0305 >> 16) & 0xff; + local tmp4 = (Rm0305 >> 24) & 0xff; + Rd0002 = (tmp3 << 24) | (tmp4 << 16) | (tmp1 << 8) | tmp2; +} + +:revsh^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x2eb & Rd0002 & Rm0305 +{ + build ItCond; + local tmp1 = Rm0305 & 0xff; + local tmp2 = (Rm0305 >> 8) & 0xff; + local result = (tmp1 << 8) | tmp2; + Rd0002 = sext(result:2); +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +macro BitReverse(val) { + tval:1 = val; + result:1 = 0; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + val = result; +} + +:rbit^ItCond Rd0811, Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rm0003; op12=0xf & Rd0811 & thc0407=0xa & Rn0003 +{ + build ItCond; + t:4 = Rm0003 & 0xff; + b1:1 = t:1; + t = (Rm0003 >> 8) & 0xff; + b2:1 = t:1; + t = (Rm0003 >> 16) & 0xff; + b3:1 = t:1; + t = (Rm0003 >> 24) & 0xff; + b4:1 = t:1; + BitReverse(b1); + BitReverse(b2); + BitReverse(b3); + BitReverse(b4); + Rd0811 = (zext(b1) << 24) | (zext(b2) << 16) | (zext(b3) << 8) | zext(b4); +} + +:rev^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rm0003; op12=0xf & Rd0811 & thc0407=8 & Rn0003 +{ + build ItCond; + local tmp1 = Rm0003 & 0xff; + local tmp2 = (Rm0003 >> 8) & 0xff; + local tmp3 = (Rm0003 >> 16) & 0xff; + local tmp4 = (Rm0003 >> 24) & 0xff; + Rd0811 = (tmp1 << 24) | (tmp2 << 16) | (tmp3 << 8) | tmp4; +} + +:rev16^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rm0003; op12=0xf & Rd0811 & thc0407=9 & Rn0003 +{ + build ItCond; + local tmp1 = Rm0003 & 0xff; + local tmp2 = (Rm0003 >> 8) & 0xff; + local tmp3 = (Rm0003 >> 16) & 0xff; + local tmp4 = (Rm0003 >> 24) & 0xff; + Rd0811 = (tmp3 << 24) | (tmp4 << 16) | (tmp1 << 8) | tmp2; +} + +:revsh^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rm0003; op12=0xf & Rd0811 & thc0407=0xb & Rn0003 +{ + build ItCond; + local tmp1 = Rm0003 & 0xff; + local tmp2 = (Rm0003 >> 8) & 0xff; + local result = (tmp1 << 8) | tmp2; + Rd0811 = sext(result:2); +} + +# RFE instructions for Thumb-2 "Encoding T1" and "Encoding T2" on page 1574 +# +:rfedb part2Rd0003 is TMode=1 & part2c0615=0x3a0 & part2c0505=0x0 & part2c0404=0x1 & part2Rd0003 ; op0=0xc000 +{ + # register list is always: pc, cpsr + ptr:4 = part2Rd0003 - 4; + cpsr = *ptr; + ptr = ptr - 4; + dest:4 = *ptr; + BranchWritePC(dest); + return [pc]; +} + +:rfedb part2Rd0003^"!" is TMode=1 & part2c0615=0x3a0 & part2c0505=0x1 & part2c0404=0x1 & part2Rd0003 ; op0=0xc000 +{ + # register list is always: pc, cpsr + ptr:4 = part2Rd0003 - 4; + cpsr = *ptr; + ptr = ptr - 4; + dest:4 = *ptr; + part2Rd0003 = ptr; + BranchWritePC(dest); + return [pc]; +} + +:rfeia part2Rd0003 is TMode=1 & part2c0615=0x3a6 & part2c0505=0x0 & part2c0404=0x1 & part2Rd0003 ; op0=0xc000 +{ + # register list is always: pc, cpsr + ptr:4 = part2Rd0003; + cpsr = *ptr; + ptr = ptr + 4; + dest:4 = *ptr; + BranchWritePC(dest); + return [pc]; +} + +:rfeia part2Rd0003^"!" is TMode=1 & part2c0615=0x3a6 & part2c0505=0x1 & part2c0404=0x1 & part2Rd0003 ; op0=0xc000 +{ + # register list is always: pc, cpsr + ptr:4 = part2Rd0003; + cpsr = *ptr; + ptr = ptr + 4; + dest:4 = *ptr; + part2Rd0003 = ptr + 4; + BranchWritePC(dest); + return [pc]; +} + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + + +:rsb^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=14 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + th_subflags(ThumbExpandImm12,Rn0003); + Rd0811 = ThumbExpandImm12 - Rn0003; + resflags(Rd0811); + build thSBIT_CZNO; +} + +:rsb^thSBIT_CZNO^ItCond Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=14 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + th_subflags(thshift2,Rn0003); + Rd0811 = thshift2 - Rn0003; + resflags(Rd0811); + build thSBIT_CZNO; +} + +@endif # VERSION_6 + +macro th_set_carry_for_ror(result, count) { + local bit = result & 0x80000000; + tmpCY = ((count == 0) && CY) || ((count != 0) && (bit != 0)); +} + +:ror^CheckInIT_CZN^ItCond Rd0002,Rs0305 is TMode=1 & ItCond & op6=0x107 & Rs0305 & Rd0002 & CheckInIT_CZN +{ + build ItCond; + local shift_amount = Rs0305 & 0x1f; + local tmp = (Rd0002 >> shift_amount)|(Rd0002 << (32-shift_amount)); + th_set_carry_for_ror(tmp,Rs0305 & 0xff); + Rd0002 = tmp; + resflags(Rd0002); + build CheckInIT_CZN; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:ror^thSBIT_CZN^ItCond Rd0811,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZN & thc0003=0xf; thc1515=0 & Rd0811 & thc0405=3 & thshift2 +{ + build ItCond; + Rd0811 = thshift2; + tmpCY = shift_carry; + resflags(Rd0811); + build thSBIT_CZN; +} + +:ror^thSBIT_CZN^ItCond^".w" Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op8=0xfa & thc0707=0 & thc0506=3 & thSBIT_CZN & Rn0003; op12=15 & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + local shift_amount = Rm0003 & 0x1f; + local tmp = (Rn0003>>shift_amount)|(Rn0003<<(32-shift_amount)); + th_set_carry_for_ror(tmp,Rm0003 & 0xff); + Rd0811 = tmp; + resflags(Rd0811); + build thSBIT_CZN; +} + +:rrx^thSBIT_CZN^ItCond Rd0811,Rm0003 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZN & thc0003=0xf; thc1515=0 & thc1214=0 & Rd0811 & thc0607=0 & thc0405=3 & Rm0003 +{ + build ItCond; + local tmp1=Rm0003&1; + shift_carry=tmp1(0); + local tmp2 = (zext(CY)<<31)|(Rm0003>>1); + Rd0811 = tmp2; + th_logicflags(); + resflags(Rd0811); + build thSBIT_CZN; +} + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:sadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + local tmpRn0003 = Rn0003; + sum1:4 = sext(tmpRn0003[ 0,16]) + sext(tmpRm0003[ 0,16]); + sum2:4 = sext(tmpRn0003[16,16]) + sext(tmpRm0003[16,16]); + Rd0811[ 0,16] = sum1:2; + Rd0811[16,16] = sum2:2; + GE1 = sum1 s>= 0; + GE2 = sum1 s>= 0; + GE3 = sum2 s>= 0; + GE4 = sum2 s>= 0; +} + +:sadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + local tmpRn0003 = Rn0003; + sum1:4 = sext(tmpRn0003[ 0,8]) + sext(tmpRm0003[ 0,8]); + sum2:4 = sext(tmpRn0003[ 8,8]) + sext(tmpRm0003[ 8,8]); + sum3:4 = sext(tmpRn0003[16,8]) + sext(tmpRm0003[16,8]); + sum4:4 = sext(tmpRn0003[24,8]) + sext(tmpRm0003[24,8]); + Rd0811[ 0,8] = sum1:1; + Rd0811[ 8,8] = sum2:1; + Rd0811[16,8] = sum3:1; + Rd0811[24,8] = sum4:1; + GE1 = sum1 s>= 0; + GE2 = sum2 s>= 0; + GE3 = sum3 s>= 0; + GE4 = sum4 s>= 0; +} + +:sasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + local tmpRn0003 = Rn0003; + diff:4 = sext(tmpRn0003[ 0,16]) - sext(tmpRm0003[16,16]); + sum:4 = sext(tmpRn0003[16,16]) + sext(tmpRm0003[ 0,16]); + Rd0811[ 0,16] = diff[ 0,16]; + Rd0811[16,16] = sum[ 0,16]; + GE1 = diff s>= 0; + GE2 = diff s>= 0; + GE3 = sum s>= 0; + GE4 = sum s>= 0; +} + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + +:sbc^CheckInIT_CZNO^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x106 & Rm0305 & Rd0002 & CheckInIT_CZNO +{ + build ItCond; + th_sub_with_carry_flags(Rd0002,Rm0305); + Rd0002 = Rd0002 - Rm0305 - zext(!CY); + resflags(Rd0002); + build CheckInIT_CZNO; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:sbc^thSBIT_CZNO^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=11 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + build ThumbExpandImm12; + th_sub_with_carry_flags(Rn0003,ThumbExpandImm12); + Rd0811 = Rn0003 - ThumbExpandImm12 - zext(!CY); + resflags(Rd0811); + build thSBIT_CZNO; +} + +:sbc^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=11 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + build thshift2; + th_sub_with_carry_flags(Rn0003,thshift2); + Rd0811 = Rn0003 - thshift2 - zext(!CY); + resflags(Rd0811); + build thSBIT_CZNO; +} + +:sbfx^ItCond Rd0811,Rn0003,thLsbImm,thWidthMinus1 is TMode=1 & ItCond & op4=0xf34 & Rn0003; thc1515=0 & Rd0811 & thLsbImm & thWidthMinus1 +{ + build ItCond; + build thLsbImm; + build thWidthMinus1; + shift:4 = 31 - (thLsbImm + thWidthMinus1); # thMsbImm represents widthMinus1 + Rd0811 = Rn0003 << shift; + shift = 31 - thWidthMinus1; # msbImm represents widthMinus1 + Rd0811 = Rd0811 s>> shift; +} + +:sdiv^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb9 & Rn0003; op12=0xf & Rd0811 & thc0407=0xf & Rm0003 +{ + build ItCond; + local result = Rn0003 / Rm0003; + Rd0811 = result; +} + +:sel^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x8 & Rm0003 +{ + build ItCond; + Rd0811[ 0,8] = ((GE1 == 1) * Rn0003[ 0,8]) + ((GE1 == 0) * Rm0003[ 0,8]); + Rd0811[ 8,8] = ((GE2 == 1) * Rn0003[ 8,8]) + ((GE2 == 0) * Rm0003[ 8,8]); + Rd0811[16,8] = ((GE3 == 1) * Rn0003[16,8]) + ((GE3 == 0) * Rm0003[16,8]); + Rd0811[24,8] = ((GE4 == 1) * Rn0003[24,8]) + ((GE4 == 0) * Rm0003[24,8]); +} + +:shadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 +{ + build ItCond; + sum1:4 = sext(Rn0003[ 0,16]) + sext(Rm0003[ 0,16]); + sum2:4 = sext(Rn0003[16,16]) + sext(Rm0003[16,16]); + Rd0811[ 0,16] = sum1[1,16]; + Rd0811[16,16] = sum2[1,16]; +} + +:shadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 +{ + build ItCond; + sum1:4 = sext(Rn0003[ 0,8]) + sext(Rm0003[ 0,8]); + sum2:4 = sext(Rn0003[ 8,8]) + sext(Rm0003[ 8,8]); + sum3:4 = sext(Rn0003[16,8]) + sext(Rm0003[16,8]); + sum4:4 = sext(Rn0003[24,8]) + sext(Rm0003[24,8]); + Rd0811[ 0,8] = sum1[1,8]; + Rd0811[ 8,8] = sum2[1,8]; + Rd0811[16,8] = sum3[1,8]; + Rd0811[24,8] = sum4[1,8]; +} + +:shasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 +{ + build ItCond; + diff:4 = sext(Rn0003[ 0,16]) - sext(Rm0003[16,16]); + sum:4 = sext(Rn0003[16,16]) + sext(Rm0003[ 0,16]); + Rd0811[ 0,16] = diff[1,16]; + Rd0811[16,16] = sum[1,16]; +} + +:shsax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 +{ + build ItCond; + sum:4 = sext(Rn0003[ 0,16]) + sext(Rm0003[16,16]); + diff:4 = sext(Rn0003[16,16]) - sext(Rm0003[ 0,16]); + Rd0811[ 0,16] = sum[1,16]; + Rd0811[16,16] = diff[1,16]; +} + +:shsub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 +{ + build ItCond; + diff1:4 = sext(Rn0003[ 0,16]) - sext(Rm0003[ 0,16]); + diff2:4 = sext(Rn0003[16,16]) - sext(Rm0003[16,16]); + Rd0811[ 0,16] = diff1[1,16]; + Rd0811[16,16] = diff2[1,16]; +} + +:shsub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 +{ + build ItCond; + diff1:4 = sext(Rn0003[ 0,8]) - sext(Rm0003[ 0,8]); + diff2:4 = sext(Rn0003[ 8,8]) - sext(Rm0003[ 8,8]); + diff3:4 = sext(Rn0003[16,8]) - sext(Rm0003[16,8]); + diff4:4 = sext(Rn0003[24,8]) - sext(Rm0003[24,8]); + Rd0811[ 0,8] = diff1[1,8]; + Rd0811[ 8,8] = diff2[1,8]; + Rd0811[16,8] = diff3[1,8]; + Rd0811[24,8] = diff4[1,8]; +} + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + +thXBIT: "b" is Rn0003 ; thc0505=0 { local tmpRn0003 = Rn0003; tmp_x:2 = tmpRn0003:2; export tmp_x; } +thXBIT: "t" is Rn0003 ; thc0505=1 { local tmpRn0003 = Rn0003; tmp_x:2 = tmpRn0003(2); export tmp_x; } + +thYBIT: "b" is thc0404=0 & Rm0003 { local tmpRm0003 = Rm0003; tmp_y:2 = tmpRm0003:2; export tmp_y; } +thYBIT: "t" is thc0404=1 & Rm0003 { local tmpRm0003 = Rm0003; tmp_y:2 = tmpRm0003(2); export tmp_y; } + +:smla^thXBIT^thYBIT^ItCond Rd0811,Rn0003,Rm0003,Rt1215 is TMode=1 & ItCond & (op4=0xfb1 & Rn0003; Rt1215 & Rd0811 & thc0607=0 & thYBIT & Rm0003) & thXBIT +{ + build ItCond; + tmp:4 = sext(thXBIT) * sext(thYBIT); + Q = scarry(tmp,Rt1215) || Q; #Q flag is never cleared by this instruction + Rd0811 = tmp + Rt1215; +} + +thdXbot: "" is thc0404=0 & Rm0003 { local tmpRm0003 = Rm0003; tmp:2 = tmpRm0003:2; export tmp; } +thdXbot: "X" is thc0404=1 & Rm0003 { local tmpRm0003 = Rm0003; tmp:2 = tmpRm0003(2); export tmp; } + +thdXtop: "" is thc0404=0 & Rm0003 { local tmpRm0003 = Rm0003; tmp:2 = tmpRm0003(2); export tmp; } +thdXtop: "X" is thc0404=1 & Rm0003 { local tmpRm0003 = Rm0003; tmp:2 = tmpRm0003:2; export tmp; } + +:smlad^thdXbot^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb2 & Rn0003; Ra1215 & Rd0811 & thc0507=0 & thdXbot & thdXtop & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + rnbot:2 = tmpRn0003:2; + rntop:2 = tmpRn0003(2); + tmpbot:4 = sext(rnbot) * sext(thdXbot); + tmptop:4 = sext(rntop) * sext(thdXtop); + tmp:4 = sext(tmpbot) + sext(tmptop); + Q = scarry(tmp,Ra1215) || Q; #Q flag is never cleared by this instruction + Rd0811 = tmp + Ra1215; +} + +:smlald^thdXbot^ItCond Rt1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbc & Rn0003; Rt1215 & Rd0811 & thc0507=6 & thdXbot & thdXtop & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + rnbot:2 = tmpRn0003:2; + rntop:2 = tmpRn0003(2); + tmpbot:4 = sext(rnbot) * sext(thdXbot); + tmptop:4 = sext(rntop) * sext(thdXtop); + accum:8 = (sext(Rd0811) << 32) | zext(Rt1215); + tmp:8 = sext(tmpbot) + sext(tmptop); + accum = tmp + accum; + Rt1215 = accum:4; + Rd0811 = accum(4); +} + +:smlal^ItCond Rt1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbc & Rn0003; Rt1215 & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + accum:8 = (sext(Rd0811) << 32) | zext(Rt1215); + val:8 = sext(Rn0003) * sext(Rm0003) + accum; + Rt1215 = val(0); + Rd0811 = val(4); +} + +:smlal^thXBIT^thYBIT^ItCond Rt1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & (op4=0xfbc & Rn0003; Rt1215 & Rd0811 & thc0607=2 & thYBIT & Rm0003) & thXBIT +{ + build ItCond; + tmp:4 = sext(thXBIT) * sext(thYBIT); + accum:8 = (zext(Rd0811) << 32) | zext(Rt1215); + val:8 = sext(tmp) + accum; + Rt1215 = val(0); + Rd0811 = val(4); +} + +:smlaw^thYBIT^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb3 & Rn0003; Ra1215 & Rd0811 & thc0507=0 & thYBIT & Rm0003 +{ + build ItCond; + local tmp:6 = (sext(Rn0003) * sext(thYBIT)); + local addend:6 = sext(Ra1215) << 16; + Q = scarry(tmp,addend) || Q; #this instruction never clears the Q flag + tmp = tmp + addend; + Rd0811 = tmp(2); +} + +:smlsd^thdXbot^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb4 & Rn0003; Ra1215 & Rd0811 & thc0507=0 & thdXbot & thdXtop & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + local rnbot:2 = tmpRn0003:2; + local rntop:2 = tmpRn0003(2); + local prod1:4 = sext(rnbot) * sext(thdXbot); + local prod2:4 = sext(rntop) * sext(thdXtop); + local diff = prod1 - prod2; + Q = scarry(diff,Ra1215) || Q; #instruction never clears Q flag + Rd0811 = diff + Ra1215; +} + +:smlsld^thdXbot^ItCond Rt1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbd & Rn0003; Rt1215 & Rd0811 & thc0507=6 & thdXbot & thdXtop & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + local rnbot:2 = tmpRn0003:2; + local rntop:2 = tmpRn0003(2); + local tmpbot:4 = sext(rnbot) * sext(thdXbot); + local tmptop:4 = sext(rntop) * sext(thdXtop); + local accum:8 = (sext(Rd0811) << 32) | zext(Rt1215); + local tmp:8 = sext(tmpbot) - sext(tmptop); + accum = tmp + accum; + Rt1215 = accum:4; + Rd0811 = accum(4); +} + +:smmla^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb5 & Rn0003; Ra1215 & Rd0811 & thc0407=0 & Rm0003 +{ + build ItCond; + local val:8 = sext(Rn0003) * sext(Rm0003); + local accum:8 = (zext(Ra1215)) << 32; + val = val + accum; + Rd0811 = val(4); +} + +:smmlar^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb5 & Rn0003; Ra1215 & Rd0811 & thc0407=1 & Rm0003 +{ + build ItCond; + local val:8 = sext(Rn0003) * sext(Rm0003); + local accum:8 = (zext(Ra1215)) << 32; + val = val + accum + 0x80000000; + Rd0811 = val(4); +} + +:smmls^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb6 & Rn0003; Ra1215 & Rd0811 & thc0407=0 & Rm0003 +{ + build ItCond; + local val:8 = sext(Rn0003) * sext(Rm0003); + val = (zext(Ra1215) << 32) - val; + Rd0811 = val(4); +} + +:smmlsr^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb6 & Rn0003; Ra1215 & Rd0811 & thc0407=1 & Rm0003 +{ + build ItCond; + local val:8 = sext(Rn0003) * sext(Rm0003); + val = (zext(Ra1215) << 32) - val; + val = val + 0x80000000; + Rd0811 = val(4); +} + +:smmul^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb5 & Rn0003; thc1215=0xf & Rd0811 & thc0407=0 & Rm0003 +{ + build ItCond; + val:8 = sext(Rn0003) * sext(Rm0003); + Rd0811 = val(4); +} + +:smmulr^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb5 & Rn0003; thc1215=0xf & Rd0811 & thc0407=1 & Rm0003 +{ + build ItCond; + val:8 = sext(Rn0003) * sext(Rm0003); + val = val + 0x80000000; + Rd0811 = val(4); +} + +:smuad^thdXbot^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb2 & Rn0003; thc1215=0xf & Rd0811 & thc0507=0 & thdXbot & thdXtop & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + local rnbot:2 = tmpRn0003:2; + local rntop:2 = tmpRn0003(2); + local prod1:4 = sext(rnbot) * sext(thdXbot); + local prod2:4 = sext(rntop) * sext(thdXtop); + Q = scarry(prod1,prod2) || Q; #instruction does not clear the Q flag + Rd0811 = prod1 + prod2; +} + +:smulbb^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb1 & Rn0003; op12=15 & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + local tmpRm0003 = Rm0003; + op1:2 = tmpRn0003:2; + op2:2 = tmpRm0003:2; + Rd0811 = sext(op1) * sext(op2); +} + +:smulbt^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb1 & Rn0003; op12=15 & Rd0811 & sop0407=1 & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + local tmpRm0003 = Rm0003; + op1:2 = tmpRn0003:2; + op2:2 = tmpRm0003(2); + Rd0811 = sext(op1) * sext(op2); +} + +:smultb^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb1 & Rn0003; op12=15 & Rd0811 & sop0407=2 & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + local tmpRm0003 = Rm0003; + op1:2 = tmpRn0003(2); + op2:2 = tmpRm0003:2; + Rd0811 = sext(op1) * sext(op2); +} + +:smultt^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb1 & Rn0003; op12=15 & Rd0811 & sop0407=3 & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + local tmpRm0003 = Rm0003; + op1:2 = tmpRn0003(2); + op2:2 = tmpRm0003(2); + Rd0811 = sext(op1) * sext(op2); +} + +:smull^ItCond Ra1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb8 & Rn0003; Ra1215 & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + val:8 = sext(Rn0003) * sext(Rm0003); + Ra1215 = val(0); + Rd0811 = val(4); +} + +:smusd^thdXbot^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb4 & Rn0003; thc1215=0xf & Rd0811 & thc0507=0 & thdXbot & thdXtop & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + rnbot:2 = tmpRn0003:2; + rntop:2 = tmpRn0003(2); + tmpbot:4 = sext(rnbot) * sext(thdXbot); + tmptop:4 = sext(rntop) * sext(thdXtop); + tmp:8 = sext(tmpbot) - sext(tmptop); + Rd0811 = tmp:4; +} + +:smulw^thYBIT^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb3 & Rn0003; thc1215=0xf & Rd0811 & thc0507=0 & thYBIT & Rm0003 +{ + build ItCond; + tmp:8 = (sext(Rn0003) * sext(thYBIT)) s>> 16; + Rd0811 = tmp:4; +} + +:srsdb^ItCond sp^"!",thSRSMode is TMode=1 & ItCond & op6=0x3a0 & sp & thc0505=1 & thc0004=0xd; op8=0xc0 & sop0507=0 & thSRSMode +{ + build ItCond; + # register list is always: r14, spsr + ptr:4 = sp - 4; + *ptr = lr; + ptr = ptr - 4; + *ptr = spsr; + sp = ptr; +} + +:srsdb^ItCond sp,thSRSMode is TMode=1 & ItCond & op6=0x3a0 & sp & thc0505=0 & thc0004=0xd; op8=0xc0 & sop0507=0 & thSRSMode +{ + build ItCond; + # register list is always: r14, spsr + ptr:4 = sp - 4; + *ptr = lr; + ptr = ptr - 4; + *ptr = spsr; +} + +:srsib^ItCond sp^"!",thSRSMode is TMode=1 & ItCond & op6=0x3a6 & sp & thc0505=1 & thc0004=0xd; op8=0xc0 & sop0507=0 & thSRSMode +{ + build ItCond; + # register list is always: r14, spsr + ptr:4 = sp + 4; + *ptr = lr; + ptr = ptr + 4; + *ptr = spsr; + sp = ptr; +} + +:srsia^ItCond sp,thSRSMode is TMode=1 & ItCond & op6=0x3a6 & sp & thc0505=0 & thc0004=0xd; op8=0xc0 & sop0507=0 & thSRSMode +{ + build ItCond; + # register list is always: r14, spsr + ptr:4 = sp + 4; + *ptr = lr; + ptr = ptr + 4; + *ptr = spsr; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +# ssat and ssat16 were defined elsewhere and moved here to preserve sort order + +# shift operands for ssat and usat: + +th2_shift0: is imm3_shft=0x0 & imm2_shft=0x0 { } +th2_shift0: ",lsl "^thLsbImm is imm3_shft & imm2_shft & thLsbImm { } +th2_shift1: ",asr "^thLsbImm is imm3_shft & imm2_shft & thLsbImm { } +th2_shift1: ",asr #32" is imm3_shft=0x0 & imm2_shft=0x0 { } + +:ssat Rt0811, thMsbImm, part2Rd0003^th2_shift0 is + TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xc & part2c0505=0x0 & part2c0404=0x0 & part2Rd0003 ; + thc1515=0x0 & Rt0811 & thc0505=0x0 & th2_shift0 & thMsbImm & thLsbImm +{ + # Shift bit is 0 + tmpRn:4 = part2Rd0003 << thLsbImm; + tmp:4 = SignedSaturate(tmpRn, thMsbImm); + Q = SignedDoesSaturate(tmpRn, thMsbImm); + Rt0811 = tmp; +} + +:ssat Rt0811, thMsbImm, part2Rd0003^th2_shift1 is + TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xc & part2c0505=0x1 & part2c0404=0x0 & part2Rd0003; + thc1515=0x0 & Rt0811 & thc0505=0x0 & th2_shift1 & thMsbImm & thLsbImm +{ + # Shift bit is 1 + tmpRn:4 = part2Rd0003 s>> thLsbImm; + tmp:4 = SignedSaturate(tmpRn, thMsbImm); + Q = SignedDoesSaturate(tmpRn, thMsbImm); + Rt0811 = tmp; +} + +:ssat16 Rt0811, "#"^Immed4, part2Rd0003 is + TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xc & part2c0505=0x1 & part2c0404=0x0 & part2Rd0003; + op12=0x0 & Rt0811 & thc0407=0x0 & Immed4 +{ + tmp:4 = SignedSaturate(part2Rd0003, Immed4); + Q = SignedDoesSaturate(part2Rd0003, Immed4); + Rt0811 = tmp; +} + +:ssax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 +{ + build ItCond; + sum:4 = sext(Rn0003[ 0,16]) + sext(Rm0003[16,16]); + diff:4 = sext(Rn0003[16,16]) - sext(Rm0003[ 0,16]); + Rd0811[ 0,16] = sum[0,16]; + Rd0811[16,16] = diff[0,16]; + GE1 = sum s>= 0; + GE2 = sum s>= 0; + GE3 = diff s>= 0; + GE4 = diff s>= 0; +} + +:ssub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 +{ + build ItCond; + diff1:4 = sext(Rn0003[ 0,16]) - sext(Rm0003[ 0,16]); + diff2:4 = sext(Rn0003[16,16]) - sext(Rm0003[16,16]); + Rd0811[ 0,16] = diff1[0,16]; + Rd0811[16,16] = diff2[0,16]; + GE1 = diff1 s>= 0; + GE2 = diff1 s>= 0; + GE3 = diff2 s>= 0; + GE4 = diff2 s>= 0; +} + +:ssub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 +{ + build ItCond; + diff1:4 = sext(Rn0003[ 0,8]) - sext(Rm0003[ 0,8]); + diff2:4 = sext(Rn0003[ 8,8]) - sext(Rm0003[ 8,8]); + diff3:4 = sext(Rn0003[16,8]) - sext(Rm0003[16,8]); + diff4:4 = sext(Rn0003[24,8]) - sext(Rm0003[24,8]); + Rd0811[ 0,8] = diff1[0,8]; + Rd0811[ 8,8] = diff2[0,8]; + Rd0811[16,8] = diff3[0,8]; + Rd0811[24,8] = diff4[0,8]; + GE1 = diff1 s>= 0; + GE2 = diff2 s>= 0; + GE3 = diff3 s>= 0; + GE4 = diff4 s>= 0; +} + +:umull^ItCond Ra1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfba & Rn0003; Ra1215 & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + val:8 = zext(Rn0003) * zext(Rm0003); + Ra1215 = val(0); + Rd0811 = val(4); +} + +:umaal^ItCond Ra1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbe & Rn0003; Ra1215 & Rd0811 & sop0407=6 & Rm0003 +{ + build ItCond; + val:8 = zext(Rn0003) * zext(Rm0003) + zext(Ra1215) + zext(Rd0811); + Ra1215 = val(0); + Rd0811 = val(4); +} + +:umlal^ItCond Ra1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbe & Rn0003; Ra1215 & Rd0811 & sop0407=0 & Rm0003 +{ + build ItCond; + accum:8 = (zext(Rd0811) << 32) | zext(Ra1215); + val:8 = zext(Rn0003) * zext(Rm0003) + accum; + Ra1215 = val(0); + Rd0811 = val(4); +} + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + +@if defined(VERSION_6) + +thumbEndianNess: "LE" is op0=0xb650 { export 0:1; } +thumbEndianNess: "BE" is op0=0xb658 { export 1:1; } + +:setend^ItCond thumbEndianNess is TMode=1 & ItCond & (op0=0xb650 | op0=0xb658) & thumbEndianNess { setEndianState(thumbEndianNess); } + + +:sev^ItCond is TMode=1 & ItCond & op0=0xbf40 +{ + build ItCond; +} + +:sev^ItCond^".w" is TMode=1 & ItCond & op0=0xf3af; op0=8004 +{ + build ItCond; +} + +@endif # VERSION_6 + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:stc^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x76 & thN6=0 & thL4=0; thCRd & thcpn) & taddrmode5 +{ + build ItCond; + build taddrmode5; + t_cpn:4 = thcpn; + coprocessor_store(t_cpn,thCRd,taddrmode5); +} + +:stcl^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x76 & thN6=1 & thL4=0; thCRd & thcpn) & taddrmode5 +{ + build ItCond; + build taddrmode5; + t_cpn:4 = thcpn; + coprocessor_storelong(t_cpn,thCRd,taddrmode5); +} + +:stc2^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x7e & thN6=0 & thL4=0; thCRd & thcpn) & taddrmode5 +{ + build ItCond; + build taddrmode5; + t_cpn:4 = thcpn; + coprocessor_store(t_cpn,thCRd,taddrmode5); +} + +:stc2l^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x7e & thN6=1 & thL4=0; thCRd & thcpn) & taddrmode5 +{ + build ItCond; + build taddrmode5; + t_cpn:4 = thcpn; + coprocessor_storelong(t_cpn,thCRd,taddrmode5); +} + +:stm^ItCond Rn0003,thstrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=0 & thc0404=0 & Rn0003; thc1515=0 & thc1313=0 & thstrlist_inc +{ + build ItCond; + mult_addr = Rn0003; + build thstrlist_inc; +} + +:stm^ItCond^".w" Rn0003!,thstrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=1 & thc0404=0 & Rn0003; thc1515=0 & thc1313=0 & thstrlist_inc +{ + build ItCond; + mult_addr = Rn0003; + build thstrlist_inc; + Rn0003 = mult_addr; +} + +:stmdb^ItCond Rn0003!,thstrlist_dec is TMode=1 & ItCond & op4=0xe92 & Rn0003; thc1515=0 & thc1313=0 & thstrlist_dec +{ + build ItCond; + mult_addr = Rn0003-4; + build thstrlist_dec; + Rn0003 = mult_addr + 4; +} + +:stmdb^ItCond Rn0003,thstrlist_dec is TMode=1 & ItCond & op4=0xe90 & Rn0003; thc1515=0 & thc1313=0 & thstrlist_dec +{ + build ItCond; + mult_addr = Rn0003-4; + build thstrlist_dec; +} + +@endif # defined(VERSION_6T2) || defined(VERSION_7) + +:stmia^ItCond Rn_exclaim,stbrace is TMode=1 & ItCond & op11=0x18 & Rn_exclaim & stbrace & Rn_exclaim_WB +{ + build ItCond; + build Rn_exclaim; + build stbrace; + build Rn_exclaim_WB; +} + +:str^ItCond Rd0002,RnIndirect4 is TMode=1 & ItCond & op11=0xc & RnIndirect4 & Rd0002 +{ + build ItCond; + *RnIndirect4 = Rd0002; +} + +:str^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x28 & RnRmIndirect & Rd0002 +{ + build ItCond; + *RnRmIndirect = Rd0002; +} + +:str^ItCond Rd0810,Sprel8Indirect is TMode=1 & ItCond & op11=0x12 & Sprel8Indirect & Rd0810 +{ + build ItCond; + *Sprel8Indirect = Rd0810; +} + + +:strb^ItCond Rd0002,RnIndirect1 is TMode=1 & ItCond & op11=0xe & RnIndirect1 & Rd0002 +{ + build ItCond; + local tmpRd0002 = Rd0002; + *RnIndirect1 = tmpRd0002:1; +} + +:strb^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2a & RnRmIndirect & Rd0002 +{ + build ItCond; + local tmpRd0002 = Rd0002; + *RnRmIndirect = tmpRd0002:1; +} + +:strh^ItCond Rd0002,RnIndirect2 is TMode=1 & ItCond & op11=0x10 & RnIndirect2 & Rd0002 +{ + build ItCond; + local tmpRd0002 = Rd0002; + *RnIndirect2 = tmpRd0002:2; +} + +:strh^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x29 & RnRmIndirect & Rd0002 +{ + build ItCond; + local tmpRd0002 = Rd0002; + *RnRmIndirect = tmpRd0002:2; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:str.w^ItCond Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf8c; Rt1215) & RnIndirect12 +{ + build ItCond; + *RnIndirect12 = Rt1215; +} + +:str.w^ItCond Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf84; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) +{ + build ItCond; + build RnIndirectPUW; + *RnIndirectPUW = Rt1215; +} + +:str^ItCond^".w" Rt1215,[Rn0003,Rm0003] is TMode=1 & ItCond & op4=0xf84 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405=0 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + Rm0003; + *tmp = Rt1215; +} + +:str^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf84 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + (Rm0003 << thc0405); + *tmp = Rt1215; +} + +:strb^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf88; Rt1215) & RnIndirect12 +{ + build ItCond; + build RnIndirect12; + local tmpRt1215 = Rt1215; + *RnIndirect12 = tmpRt1215:1; +} + +:strb^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf80; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) +{ + build ItCond; + build RnIndirectPUW; + local tmpRt1215 = Rt1215; + *RnIndirectPUW = tmpRt1215:1; +} + +:strb^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf80 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + (Rm0003 << thc0405); + local tmpRt1215 = Rt1215; + *tmp = tmpRt1215:1; +} + +:strbt^ItCond Rt1215,[Rn0003,"#"^Immed8] is TMode=1 & ItCond & op4=0xf80 & Rn0003; Rt1215 & thc0811=14 & Immed8 +{ + build ItCond; + local tmp = Rn0003 + Immed8; + local tmpRt1215 = Rt1215; + *tmp = tmpRt1215:1; +} + +:strd^ItCond Rt1215,Rt0811,RnIndirectPUW1 is TMode=1 & ItCond & (op9=0x74 & thc0910=0 & thc0606=1 & thc0404=0 & Rn0003; Rt1215 & Rt0811) & $(RN_INDIRECT_PUW1) +{ + build ItCond; + build RnIndirectPUW1; + local tmp = RnIndirectPUW1; + *tmp = Rt1215; + tmp = tmp + 4; + *tmp = Rt0811; +} + +:strh^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf8A; Rt1215) & RnIndirect12 +{ + build ItCond; + local tmpRt1215 = Rt1215; + *RnIndirect12 = tmpRt1215:2; +} + +:strh^ItCond Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf82; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) +{ + build ItCond; + build RnIndirectPUW; + local tmpRt1215 = Rt1215; + *RnIndirectPUW = tmpRt1215:2; +} + +:strh^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf82 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + (Rm0003 << thc0405); + local tmpRt1215 = Rt1215; + *tmp = tmpRt1215:2; +} + +:strht^ItCond Rt1215,[Rn0003,"#"^Immed8] is TMode=1 & ItCond & op4=0xf82 & Rn0003; Rt1215 & thc0811=14 & Immed8 +{ + build ItCond; + local tmp = Rn0003 + Immed8; + local tmpRt1215 = Rt1215; + *tmp = tmpRt1215:2; +} + +:strex^ItCond Rd0811,Rt1215,[Rn0003,Immed8_4] is TMode=1 & ItCond & op4=0xe84 & Rn0003; Rt1215 & Rd0811 & Immed8_4 +{ + build ItCond; + local tmp = Rn0003 + Immed8_4; + access:1 = hasExclusiveAccess(tmp); + Rd0811 = 1; + if (!access) goto inst_next; + Rd0811 = 0; + *tmp = Rt1215; +} + +@endif # VERSION_6T2 || VERSION_7 + +@if defined(VERSION_7) + +:strexb^ItCond Rd0003,Rt1215,[Rn0003] is TMode=1 & ItCond & op4=0xe8c & Rn0003; Rt1215 & thc0811=15 & thc0407=4 & Rd0003 +{ + build ItCond; + local tmp = Rn0003; + access:1 = hasExclusiveAccess(tmp); + Rd0003 = 1; + if (!access) goto inst_next; + Rd0003 = 0; + local tmpRt1215 = Rt1215; + *tmp = tmpRt1215:1; +} + +:strexh^ItCond Rd0003,Rt1215,[Rn0003] is TMode=1 & ItCond & op4=0xe8c & Rn0003; Rt1215 & thc0811=15 & thc0407=5 & Rd0003 +{ + build ItCond; + local tmp = Rn0003; + access:1 = hasExclusiveAccess(tmp); + Rd0003 = 1; + if (!access) goto inst_next; + Rd0003 = 0; + local tmpRt1215 = Rt1215; + *tmp = tmpRt1215:1; +} + +:strexd^ItCond Rd0003,Rt1215,Rt0811,[Rn0003] is TMode=1 & ItCond & op4=0xe8c & Rn0003; Rt1215 & Rt0811 & thc0407=7 & Rd0003 +{ + build ItCond; + local tmp = Rn0003; + access:1 = hasExclusiveAccess(tmp); + Rd0003 = 1; + if (!access) goto inst_next; + Rd0003 = 0; + *tmp = Rt1215; + tmp = tmp + 4; + *tmp = Rt0811; +} + +@endif # VERSION_7 + +:sub^CheckInIT_CZNO^ItCond Rd0002,Rn0305,Immed3 is TMode=1 & ItCond & op9=0xf & Immed3 & Rn0305 & Rd0002 & CheckInIT_CZNO +{ + build ItCond; + th_subflags(Rn0305,Immed3); + Rd0002 = Rn0305 - Immed3; + resflags(Rd0002); + build CheckInIT_CZNO; +} + +:sub^CheckInIT_CZNO^ItCond Rd0810,Immed8 is TMode=1 & ItCond & op11=7 & Rd0810 & Immed8 & CheckInIT_CZNO +{ + build ItCond; + th_subflags(Rd0810,Immed8); + Rd0810 = Rd0810 - Immed8; + resflags(Rd0810); + build CheckInIT_CZNO; +} + +:sub^CheckInIT_CZNO^ItCond Rd0002,Rn0305,Rm0608 is TMode=1 & ItCond & op9=0xd & Rm0608 & Rn0305 & Rd0002 & CheckInIT_CZNO +{ + build ItCond; + th_subflags(Rn0305,Rm0608); + Rd0002 = Rn0305 - Rm0608; + resflags(Rd0002); + build CheckInIT_CZNO; +} + +:sub^ItCond sp,Immed7_4 is TMode=1 & ItCond & op7=0x161 & sp & Immed7_4 +{ + build ItCond; + sp = sp - Immed7_4; +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:sub^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=13 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + build ThumbExpandImm12; + th_subflags(Rn0003,ThumbExpandImm12); + Rd0811 = Rn0003-ThumbExpandImm12; + resflags(Rd0811); + build thSBIT_CZNO; +} + +:subw^ItCond Rd0811,Rn0003,Immed12 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=5 & thc0404=0 & Rn0003; thc1515=0 & Rd0811) & Immed12 +{ + build ItCond; + th_subflags(Rn0003,Immed12); + Rd0811 = Rn0003-Immed12; + resflags(Rd0811); +} + +:sub^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=13 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + build thshift2; + local tmp = thshift2; + th_subflags(Rn0003,tmp); + Rd0811 = Rn0003-tmp; + resflags(Rd0811); + build thSBIT_CZNO; +} + +:sub^thSBIT_CZNO^ItCond^".w" Rd0811,sp,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=13 & thSBIT_CZNO & sp & sop0003=0xd; thc1515=0 & Rd0811) & ThumbExpandImm12 +{ + build ItCond; + build ThumbExpandImm12; + th_subflags(sp,ThumbExpandImm12); + Rd0811 = sp-ThumbExpandImm12; + resflags(Rd0811); + build thSBIT_CZNO; +} + +:sub^ItCond pc,lr,Immed8 is TMode=1 & ItCond & op4=0xf3d & pc & sop0003=0xe; op8=0x8f & lr & Immed8 +{ + build ItCond; + build Immed8; + th_subflags(lr,Immed8); + dest:4 = lr-Immed8; + resflags(dest); + cpsr=spsr; + SetThumbMode( ((cpsr >> 5) & 1) != 0 ); + pc = dest; + goto [pc]; +} + +:subw^ItCond Rd0811,sp,Immed12 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=5 & thc0404=0 & sop0003=0xd & sp; thc1515=0 & Rd0811) & Immed12 +{ + build ItCond; + th_subflags(sp,Immed12); + Rd0811 = sp-Immed12; + resflags(Rd0811); +} + +:sub^thSBIT_CZNO^ItCond^".w" Rd0811,sp,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=13 & thSBIT_CZNO & sop0003=0xd & sp; thc1515=0 & Rd0811 & thshift2 +{ + build ItCond; + build thshift2; + local tmp = thshift2; + th_subflags(sp,tmp); + Rd0811 = sp-tmp; + resflags(Rd0811); + build thSBIT_CZNO; +} + +@endif # VERSION_6T2 || VERSION_7 + +:svc^ItCond immed8 is TMode=1 & ItCond & op8=0xdf & immed8 +{ + build ItCond; + tmp:4 = immed8; + software_interrupt(tmp); +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:sxtab^ItCond Rd0811, Rn0003, Rm0003, ByteRotate is TMode=1 & ItCond & op4=0xfa4 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 +{ + build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + Rd0811 = sext(tmp:1) + Rn0003; +} + +:sxtab^ItCond Rd0811, Rn0003, Rm0003 is TMode=1 & ItCond & op4=0xfa4 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + Rd0811 = sext(tmpRm0003:1) + Rn0003; +} + +:sxtab16^ItCond Rd0811, Rn0003, Rm0003, ByteRotate is TMode=1 & ItCond & op4=0xfa2 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 +{ + build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + local tmpRn0003 = Rn0003; + tmpL:2 = sext(tmp:1) + tmpRn0003:2; + tmp = tmp >> 16; + tmpH:2 = sext(tmp:1) + tmpRn0003(2); + Rd0811 = zext(tmpL) + (zext(tmpH) << 16); +} + +:sxtab16^ItCond Rd0811, Rn0003, Rm0003 is TMode=1 & ItCond & op4=0xfa2 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 +{ + build ItCond; + local tmpRn0003 = Rn0003; + local tmpRm0003 = Rm0003; + tmpL:2 = sext(tmpRm0003:1) + tmpRn0003:2; + local tmp = tmpRm0003 >> 16; + tmpH:2 = sext(tmp:1) + tmpRn0003(2); + Rd0811 = zext(tmpL) + (zext(tmpH) << 16); +} + +:sxtah^ItCond Rd0811, Rn0003, Rm0003, ByteRotate is TMode=1 & ItCond & op4=0xfa0 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 +{ + build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + Rd0811 = sext(tmp:2) + Rn0003; +} + +:sxtah^ItCond Rd0811, Rn0003, Rm0003 is TMode=1 & ItCond & op4=0xfa0 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + Rd0811 = sext(tmpRm0003:2) + Rn0003; +} + +@endif # VERSION_6T2 || VERSION_7 + +@if defined(VERSION_6) + +:sxtb^ItCond Rd0002, Rm0305 is TMode=1 & ItCond & op8=0xb2 & thc0707=0 & thc0606=1 & Rm0305 & Rd0002 +{ + build ItCond; + local tmpRm0305 = Rm0305; + Rd0002 = sext(tmpRm0305:1); +} + +:sxtb^ItCond^".w" Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa4f; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 +{ + build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + Rd0811 = sext(tmp:1); +} + +:sxtb^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa4f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + Rd0811 = sext(tmpRm0003:1); +} + +@endif # VERSION_6 + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:sxtb16^ItCond Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa2f; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 + +{ + build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + tmpL:2 = sext(tmp:1); + tmp = tmp >> 16; + tmpH:2 = sext(tmp:1); + Rd0811 = zext(tmpL) + (zext(tmpH) << 16); +} + +:sxtb16^ItCond Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa2f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + tmpL:2 = sext(tmpRm0003:1); + tmp:4 = tmpRm0003 >> 16; + tmpH:2 = sext(tmp:1); + Rd0811 = zext(tmpL) + (zext(tmpH) << 16); +} + +@endif # VERSION_6T2 || VERSION_7 + +@if defined(VERSION_6) + +:sxth^ItCond Rd0002, Rm0305 is TMode=1 & ItCond & op8=0xb2 & thc0707=0 & thc0606=0 & Rm0305 & Rd0002 +{ + build ItCond; + local tmpRm0305 = Rm0305; + Rd0002 = sext(tmpRm0305:2); +} + +:sxth^ItCond^".w" Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa0f; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 +{ + build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + Rd0811 = sext(tmp:2); +} + +:sxth^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa0f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + Rd0811 = sext(tmpRm0003:2); +} + +@endif # VERSION_6 + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:tbb^ItCond [Rn0003,Rm0003] is TMode=1 & ItCond & op4=0xe8d & Rn0003; op8=0xf0 & thc0507=0 & thc0404=0 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + Rm0003; + offs:1 = *tmp; + SetThumbMode(1); + pc = inst_next + (zext(offs) * 2); + goto [pc]; +} + +:tbh^ItCond [Rn0003,Rm0003] is TMode=1 & ItCond & op4=0xe8d & Rn0003; op8=0xf0 & thc0507=0 & thc0404=1 & Rm0003 +{ + build ItCond; + local tmp = Rn0003 + (Rm0003 * 2); + offs:2 = *tmp; + SetThumbMode(1); + pc = inst_next + (zext(offs) * 2); + goto [pc]; +} + +Pcrel: [cloc,Rm0003] is Rm0003 & thc0404=0 [ cloc = inst_next; ] +{ + local tmp = Rm0003; tmp = cloc + tmp; val:1 = *tmp; tmp = zext(val); export tmp; +} +Pcrel: [cloc,Rm0003] is Rm0003 & thc0404=1 [ cloc = inst_next; ] +{ + local tmp = Rm0003; tmp = cloc + (tmp * 2); val:2 = *tmp; tmp = zext(val); export tmp; +} + +:tbb^ItCond Pcrel is TMode=1 & ItCond & op4=0xe8d & thc0003=15; op8=0xf0 & thc0507=0 & thc0404=0 & Pcrel +{ + build ItCond; + SetThumbMode(1); + pc = inst_next + (Pcrel * 2); + goto [pc]; +} + +:tbh^ItCond Pcrel is TMode=1 & ItCond & op4=0xe8d & thc0003=15; op8=0xf0 & thc0507=0 & thc0404=1 & Pcrel +{ + build ItCond; + SetThumbMode(1); + pc = inst_next + (Pcrel * 2); + goto [pc]; +} + +@endif # VERSION_6T2 || VERSION_7 + +:tst^ItCond Rn0002,Rm0305 is TMode=1 & ItCond & op6=0x108 & Rm0305 & Rn0002 +{ + build ItCond; + local tmp = Rn0002 & Rm0305; + ZR = (tmp == 0); + NG = (tmp s< 0); +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:teq^ItCond Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=4 & thc0404=1 & Rn0003; thc1515=0 & thc0811=0xf) & ThumbExpandImm12 +{ + build ItCond; + build ThumbExpandImm12; + local tmp = Rn0003 ^ ThumbExpandImm12; + th_test_flags(tmp); +} + +:teq^ItCond^".w" Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=4 & thc0404=1 & Rn0003; thc1515=0 & thc0811=0xf & thshift2 +{ + build ItCond; + build thshift2; + local tmp = Rn0003 ^ thshift2; + th_test_flags(tmp); +} + +:tst^ItCond Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=0 & thc0404=1 & Rn0003; thc1515=0 & thc0811=0xf) & ThumbExpandImm12 +{ + build ItCond; + build ThumbExpandImm12; + local tmp = Rn0003 & ThumbExpandImm12; + th_test_flags(tmp); +} + +:tst^ItCond^".w" Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=0 & thc0404=1 & Rn0003; thc1515=0 & thc0811=0xf & thshift2 +{ + build ItCond; + build thshift2; + local tmp = Rn0003 & thshift2; + th_test_flags(tmp); +} + +:uadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 +{ + build ItCond; + sum1:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[ 0,16]); + sum2:4 = zext(Rn0003[16,16]) + zext(Rm0003[16,16]); + GE1 = carry(Rn0003[0,16],Rm0003[0,16]); + GE2 = GE1; + GE3 = carry(Rn0003[16,16],Rm0003[16,16]); + GE4 = GE3; + Rd0811[ 0,16] = sum1[0,16]; + Rd0811[16,16] = sum2[0,16]; +} + +:uadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 +{ + build ItCond; + sum1:4 = zext(Rn0003[ 0,8]) + zext(Rm0003[ 0,8]); + sum2:4 = zext(Rn0003[ 8,8]) + zext(Rm0003[ 8,8]); + sum3:4 = zext(Rn0003[16,8]) + zext(Rm0003[16,8]); + sum4:4 = zext(Rn0003[24,8]) + zext(Rm0003[24,8]); + GE1 = carry(Rn0003[0,8],Rm0003[0,8]); + GE2 = carry(Rn0003[8,8],Rm0003[8,8]); + GE3 = carry(Rn0003[16,8],Rm0003[16,8]); + GE4 = carry(Rn0003[24,8],Rm0003[24,8]); + Rd0811[ 0,8] = sum1[0,8]; + Rd0811[ 8,8] = sum2[0,8]; + Rd0811[16,8] = sum3[0,8]; + Rd0811[24,8] = sum4[0,8]; +} + +:uasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 +{ + build ItCond; + diff:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[16,16]); + sum:4 = zext(Rn0003[16,16]) + zext(Rm0003[ 0,16]); + GE1 = diff s>= 0; + GE2 = GE1; + GE3 = carry(Rn0003[16,16],Rm0003[0,16]); + GE4 = GE3; + Rd0811[ 0,16] = diff[0,16]; + Rd0811[16,16] = sum[0,16]; + } + +:uhadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 +{ + build ItCond; + sum1:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[ 0,16]); + sum2:4 = zext(Rn0003[16,16]) + zext(Rm0003[16,16]); + Rd0811[ 0,16] = sum1[1,16]; + Rd0811[16,16] = sum2[1,16]; +} + +:uhadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 +{ + build ItCond; + sum1:4 = zext(Rn0003[ 0,8]) + zext(Rm0003[ 0,8]); + sum2:4 = zext(Rn0003[ 8,8]) + zext(Rm0003[ 8,8]); + sum3:4 = zext(Rn0003[16,8]) + zext(Rm0003[16,8]); + sum4:4 = zext(Rn0003[24,8]) + zext(Rm0003[24,8]); + Rd0811[ 0,8] = sum1[1,8]; + Rd0811[ 8,8] = sum2[1,8]; + Rd0811[16,8] = sum3[1,8]; + Rd0811[24,8] = sum4[1,8]; +} + +:uhasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 +{ + build ItCond; + diff:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[16,16]); + sum:4 = zext(Rn0003[16,16]) + zext(Rm0003[ 0,16]); + Rd0811[ 0,16] = diff[1,16]; + Rd0811[16,16] = sum[1,16]; +} + +:uhsax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 +{ + build ItCond; + sum:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[16,16]); + diff:4 = zext(Rn0003[16,16]) - zext(Rm0003[ 0,16]); + Rd0811[ 0,16] = sum[1,16]; + Rd0811[16,16] = diff[1,16]; +} + +:uhsub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 +{ + build ItCond; + diff1:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[ 0,16]); + diff2:4 = zext(Rn0003[16,16]) - zext(Rm0003[16,16]); + Rd0811[ 0,16] = diff1[1,16]; + Rd0811[16,16] = diff2[1,16]; +} + +:uhsub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 +{ + build ItCond; + diff1:4 = zext(Rn0003[ 0,8]) - zext(Rm0003[ 0,8]); + diff2:4 = zext(Rn0003[ 8,8]) - zext(Rm0003[ 8,8]); + diff3:4 = zext(Rn0003[16,8]) - zext(Rm0003[16,8]); + diff4:4 = zext(Rn0003[24,8]) - zext(Rm0003[24,8]); + Rd0811[ 0,8] = diff1[1,8]; + Rd0811[ 8,8] = diff2[1,8]; + Rd0811[16,8] = diff3[1,8]; + Rd0811[24,8] = diff4[1,8]; +} + +:uqadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 +{ + build ItCond; + sum1:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[ 0,16]); + sum2:4 = zext(Rn0003[16,16]) + zext(Rm0003[16,16]); + tmp1:4 = UnsignedSaturate(sum1, 16:2); + tmp2:4 = UnsignedSaturate(sum2, 16:2); + Rd0811[ 0,16] = tmp1[0,16]; + Rd0811[16,16] = tmp2[0,16]; +} + +:uqadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 +{ + build ItCond; + sum1:4 = zext(Rn0003[ 0,8]) + zext(Rm0003[ 0,8]); + sum2:4 = zext(Rn0003[ 8,8]) + zext(Rm0003[ 8,8]); + sum3:4 = zext(Rn0003[16,8]) + zext(Rm0003[16,8]); + sum4:4 = zext(Rn0003[24,8]) + zext(Rm0003[24,8]); + tmp1:4 = UnsignedSaturate(sum1, 8:2); + tmp2:4 = UnsignedSaturate(sum2, 8:2); + tmp3:4 = UnsignedSaturate(sum3, 8:2); + tmp4:4 = UnsignedSaturate(sum4, 8:2); + Rd0811[ 0,8] = tmp1[0,8]; + Rd0811[ 8,8] = tmp2[0,8]; + Rd0811[16,8] = tmp3[0,8]; + Rd0811[24,8] = tmp4[0,8]; +} + +:uqasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 +{ + build ItCond; + diff:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[16,16]); + sum:4 = zext(Rn0003[16,16]) + zext(Rm0003[ 0,16]); + tmpdiff:4 = UnsignedSaturate(diff, 16:2); + tmpsum:4 = UnsignedSaturate(sum, 16:2); + Rd0811[ 0,16] = tmpdiff[0,16]; + Rd0811[16,16] = tmpsum[0,16]; +} + +:uqsax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 +{ + build ItCond; + sum:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[16,16]); + diff:4 = zext(Rn0003[16,16]) - zext(Rm0003[ 0,16]); + tmpsum:4 = UnsignedSaturate(sum, 16:2); + tmpdiff:4 = UnsignedSaturate(diff, 16:2); + Rd0811[ 0,16] = tmpsum[0,16]; + Rd0811[16,16] = tmpdiff[0,16]; +} + +:uqsub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 +{ + build ItCond; + diff1:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[ 0,16]); + diff2:4 = zext(Rn0003[16,16]) - zext(Rm0003[16,16]); + tmp1:4 = UnsignedSaturate(diff1, 16:2); + tmp2:4 = UnsignedSaturate(diff2, 16:2); + Rd0811[ 0,16] = tmp1[0,16]; + Rd0811[16,16] = tmp2[0,16]; +} + +:uqsub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 +{ + build ItCond; + diff1:4 = zext(Rn0003[ 0,8]) - zext(Rm0003[ 0,8]); + diff2:4 = zext(Rn0003[ 8,8]) - zext(Rm0003[ 8,8]); + diff3:4 = zext(Rn0003[16,8]) - zext(Rm0003[16,8]); + diff4:4 = zext(Rn0003[24,8]) - zext(Rm0003[24,8]); + tmp1:4 = UnsignedSaturate(diff1, 8:2); + tmp2:4 = UnsignedSaturate(diff2, 8:2); + tmp3:4 = UnsignedSaturate(diff3, 8:2); + tmp4:4 = UnsignedSaturate(diff4, 8:2); + Rd0811[ 0,8] = tmp1[0,8]; + Rd0811[ 8,8] = tmp2[0,8]; + Rd0811[16,8] = tmp3[0,8]; + Rd0811[24,8] = tmp4[0,8]; +} + +:usad8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb7 & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 +{ + build ItCond; + diff1:4 = zext(Rn0003[ 0,8]) - zext(Rm0003[ 0,8]); + diff2:4 = zext(Rn0003[ 8,8]) - zext(Rm0003[ 8,8]); + diff3:4 = zext(Rn0003[16,8]) - zext(Rm0003[16,8]); + diff4:4 = zext(Rn0003[24,8]) - zext(Rm0003[24,8]); + absdiff1:4 = Absolute(diff1); + absdiff2:4 = Absolute(diff2); + absdiff3:4 = Absolute(diff3); + absdiff4:4 = Absolute(diff4); + Rd0811 = absdiff1 + absdiff2 + absdiff3 + absdiff4; +} + +:usada8^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb7 & Rn0003; Ra1215 & Rd0811 & thc0407=0x0 & Rm0003 +{ + build ItCond; + diff1:4 = zext(Rn0003[ 0,8]) - zext(Rm0003[ 0,8]); + diff2:4 = zext(Rn0003[ 8,8]) - zext(Rm0003[ 8,8]); + diff3:4 = zext(Rn0003[16,8]) - zext(Rm0003[16,8]); + diff4:4 = zext(Rn0003[24,8]) - zext(Rm0003[24,8]); + absdiff1:4 = Absolute(diff1); + absdiff2:4 = Absolute(diff2); + absdiff3:4 = Absolute(diff3); + absdiff4:4 = Absolute(diff4); + # The manual specifies a zero extension of Ra to an unspecified + # intermediate precision, followed by truncation to 4 bytes. In this + # model, zext is retained, but it has no effect because the + # intermediate precision is 4 bytes. + Rd0811 = zext(Ra1215) + absdiff1 + absdiff2 + absdiff3 + absdiff4; +} + +# usat and ussat16 were defined elsewhere and moved here to preserve sort order + +:usat Rt0811, thMsbImm, part2Rd0003^th2_shift0 is + TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2c0505=0x0 & part2c0404=0x0 & part2Rd0003 ; + thc1515=0x0 & Rt0811 & thc0505=0x0 & th2_shift0 & thMsbImm & thLsbImm +{ + # Shift bit is 0 + tmpRn:4 = part2Rd0003 << thLsbImm; + tmp:4 = UnsignedSaturate(tmpRn, thMsbImm); + Q = UnsignedDoesSaturate(tmpRn, thMsbImm); + Rt0811 = tmp; +} + +:usat Rt0811, thMsbImm, part2Rd0003^th2_shift1 is + TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2c0505=0x1 & part2c0404=0x0 & part2Rd0003 ; + thc1515=0x0 & Rt0811 & thc0505=0x0 & th2_shift1 & thMsbImm & thLsbImm +{ + # Shift bit is 1 + tmpRn:4 = part2Rd0003 s>> thLsbImm; + tmp:4 = UnsignedSaturate(tmpRn, thMsbImm); + Q = UnsignedDoesSaturate(tmpRn, thMsbImm); + Rt0811 = tmp; +} + +:usat16 Rt0811, "#"^Immed4, part2Rd0003 is + TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2c0505=0x1 & part2c0404=0x0 & part2Rd0003 ; + op12=0x0 & Rt0811 & thc0407=0x0 & Immed4 +{ + tmp:4 = UnsignedSaturate(part2Rd0003, Immed4); + Q = UnsignedDoesSaturate(part2Rd0003, Immed4); + Rt0811 = tmp; +} + +:usax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 +{ + build ItCond; + sum:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[16,16]); + diff:4 = zext(Rn0003[16,16]) - zext(Rm0003[ 0,16]); + Rd0811[ 0,16] = sum[0,16]; + Rd0811[16,16] = diff[0,16]; + # this odd looking condition tests that the 16 bit sum overflowed, + # which would have made it a negative number. That's how it's + # documented, but to be consistent they might have used s< 0. + GE1 = sum s>= 0x10000; + GE2 = sum s>= 0x10000; + GE3 = diff s>= 0; + GE4 = diff s>= 0; +} + +:usub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 +{ + build ItCond; + diff1:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[ 0,16]); + diff2:4 = zext(Rn0003[16,16]) - zext(Rm0003[16,16]); + Rd0811[ 0,16] = diff1[0,16]; + Rd0811[16,16] = diff2[0,16]; + GE1 = diff1 s>= 0; + GE2 = diff1 s>= 0; + GE3 = diff2 s>= 0; + GE4 = diff2 s>= 0; +} + +:usub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 +{ + build ItCond; + diff1:4 = zext(Rn0003[ 0,8]) - zext(Rm0003[ 0,8]); + diff2:4 = zext(Rn0003[ 8,8]) - zext(Rm0003[ 8,8]); + diff3:4 = zext(Rn0003[16,8]) - zext(Rm0003[16,8]); + diff4:4 = zext(Rn0003[24,8]) - zext(Rm0003[24,8]); + Rd0811[ 0,8] = diff1[0,8]; + Rd0811[ 8,8] = diff2[0,8]; + Rd0811[16,8] = diff3[0,8]; + Rd0811[24,8] = diff4[0,8]; + GE1 = diff1 s>= 0; + GE2 = diff2 s>= 0; + GE3 = diff3 s>= 0; + GE4 = diff4 s>= 0; +} + +:ubfx^ItCond Rd0811,Rn0003,thLsbImm,thWidthMinus1 is TMode=1 & ItCond & op4=0xf3c & Rn0003; thc1515=0 & Rd0811 & thLsbImm & thc0505=0 & thWidthMinus1 +{ + build ItCond; + build thLsbImm; + build thWidthMinus1; + shift:4 = 31 - (thLsbImm + thWidthMinus1); # thMsbImm represents widthMinus1 + Rd0811 = Rn0003 << shift; + shift = 31 - thWidthMinus1; # msbImm represents widthMinus1 + Rd0811 = Rd0811 >> shift; +} + +:udiv^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbb & Rn0003; op12=0xf & Rd0811 & thc0407=0xf & Rm0003 +{ + build ItCond; + result:8 = zext(Rn0003) / zext(Rm0003); + Rd0811 = result(0); +} + +:uxtab^ItCond Rd0811,Rn0003,Rm0003,ByteRotate is TMode=1 & ItCond & op4=0xfa5 & Rn0003; op12=15 & Rd0811 & thc0707=1 & ByteRotate & Rm0003 +{ + build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + Rd0811 = Rn0003 + zext(tmp:1); +} + +:uxtab16^ItCond Rd0811,Rn0003,Rm0003,ByteRotate is TMode=1 & ItCond & op4=0xfa3 & Rn0003; op12=15 & Rd0811 & thc0707=1 & ByteRotate & Rm0003 +{ + build ItCond; + rotated:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + local tmp_b = rotated:1; + local tmpRn0003 = Rn0003; + tmpl:2 = tmpRn0003:2 + zext(tmp_b); + local tmph = (rotated >> 16); + tmp_b = tmph:1; + tmph = (tmpRn0003 >> 16) + zext(tmp_b); + Rd0811 = (tmph << 16) | zext(tmpl); +} + +:uxtah^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa1 & Rn0003; op12=15 & Rd0811 & thc0707=1 & throt=0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + Rd0811 = Rn0003 + zext(tmpRm0003:2); +} + +:uxtah^ItCond Rd0811,Rn0003,Rm0003,ByteRotate is TMode=1 & ItCond & op4=0xfa1 & Rn0003; op12=15 & Rd0811 & thc0707=1 & ByteRotate & Rm0003 +{ + build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + Rd0811 = Rn0003 + zext(tmp:2); +} + +@endif # VERSION_6T2 || VERSION_7 + +@if defined(VERSION_6) + +:uxtb^ItCond Rd0002, Rm0305 is TMode=1 & ItCond & op8=0xb2 & thc0707=1 & thc0606=1 & Rm0305 & Rd0002 +{ + build ItCond; + local tmpRm0305 = Rm0305; + Rd0002 = zext(tmpRm0305:1); +} + +@endif # VERSION_6 + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:uxtb^ItCond^".w" Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa5f; op12=0xf & Rd0811 & thc0707=1 & ByteRotate & Rm0003 +{ + build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + Rd0811 = zext(tmp:1); +} + +:uxtb^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa5f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + Rd0811 = zext(tmpRm0003:1); +} + +:uxtb16^ItCond Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa3f; op12=0xf & Rd0811 & thc0707=1 & ByteRotate & Rm0003 +{ + build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + Rd0811 = tmp & 0x00ff00ff; +} + +:uxtb16^ItCond Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa3f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 +{ + build ItCond; + Rd0811 = Rm0003 & 0x00ff00ff; +} + +@endif # VERSION_6T2 || VERSION_7 + +@if defined(VERSION_6) + +:uxth^ItCond Rd0002, Rm0305 is TMode=1 & ItCond & op8=0xb2 & thc0707=1 & thc0606=0 & Rm0305 & Rd0002 +{ + build ItCond; + local tmpRm0305 = Rm0305; + Rd0002 = zext(tmpRm0305:2); +} + +@endif # VERSION_6 + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:uxth^ItCond^".w" Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa1f; op12=0xf & Rd0811 & thc0707=1 & ByteRotate & Rm0003 +{ build ItCond; + tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); + Rd0811 = zext(tmp:2); +} + +:uxth^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa1f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 +{ + build ItCond; + local tmpRm0003 = Rm0003; + Rd0811 = zext(tmpRm0003:2); +} + +@endif # VERSION_6T2 || VERSION_7 + +# V* see ARMneon.sinc + +@if defined(VERSION_6) + +:wfe^ItCond is TMode=1 & ItCond & op0=0xbf20 +{ + WaitForEvent(); +} + +:wfi^ItCond is TMode=1 & ItCond & op0=0xbf30 +{ + WaitForInterrupt(); +} + +:yield^ItCond is TMode=1 & ItCond & op0=0xbf10 +{ + HintYield(); +} + +@endif # VERSION_6 + +@if defined(VERSION_6T2) || defined(VERSION_7) + +:wfe^ItCond^".w" is TMode=1 & ItCond & op0=0xf3af; op0=0x8002 +{ + WaitForEvent(); +} + +:wfi^ItCond^".w" is TMode=1 & ItCond & op0=0xf3af; op0=0x8003 +{ + WaitForInterrupt(); +} + +:yield^ItCond^".w" is TMode=1 & ItCond & op0=0xf3af; op0=0x8001 +{ + HintYield(); +} + +} # End with : ARMcondCk=1 +@endif # VERSION_6T2 || VERSION_7 diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM_v45.cspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM_v45.cspec new file mode 100644 index 00000000..5b6a05aa --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM_v45.cspec @@ -0,0 +1,209 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ; + offset = *:1 (lr + r3); + r3 = zext(offset); + + + if (inbounds) goto ; + offset = *:1 (lr + r12); + r3 = zext(offset); + + + r3 = r3 * 2; + + r12 = lr + r3; + + ISAModeSwitch = (r12 & 1) != 1; + TB = ISAModeSwitch; + pc = r12 & 0xfffffffe; + goto [pc]; + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM_v45.pspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM_v45.pspec new file mode 100644 index 00000000..865bb586 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM_v45.pspec @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARM_win.cspec b/src/third-party/sleigh/processors/ARM/data/languages/ARM_win.cspec new file mode 100644 index 00000000..f4e1dc2d --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARM_win.cspec @@ -0,0 +1,131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARMinstructions.sinc b/src/third-party/sleigh/processors/ARM/data/languages/ARMinstructions.sinc new file mode 100644 index 00000000..e46a5be7 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARMinstructions.sinc @@ -0,0 +1,6441 @@ +# Specification for the ARM Version 4, 4T, 5, 5T, 5E +# The following boolean defines control specific support: T_VARIANT, VERSION_5, VERSION_5E + +# +# WARNING NOTE: Be very careful taking a subpiece or truncating a register with :# or (#) +# The LEBE hybrid language causes endian issues if you do not assign the register to a temp +# variable and then take a subpiece or truncate. +# + +@if defined(SIMD) || defined(VFPv2) || defined(VFPv3) +@define INCLUDE_NEON "" # Neon instructions included with SIMD, VFPv2 or VFPv3 +@endif + +@if defined(T_VARIANT) +@define AMODE "TMode=0" # T_VARIANT must restrict ARM instruction decoding and require TMode=0 +@else +@define AMODE "epsilon" # THUMB instructions not supported - ARM only +@endif + +@if defined(T_VARIANT) +@define VERSION_5_or_T "" +@endif +@if defined(VERSION_5) +@define VERSION_5_or_T "" +@endif + +define token prefix (32) + pref=(0,31) +; + +define token instrArm (32) + cond=(28,31) + I25=(25,25) + P24=(24,24) + H24=(24,24) + L24=(24,24) + U23=(23,23) + B22=(22,22) + N22=(22,22) + S22=(22,22) + op=(21,24) + W21=(21,21) + S20=(20,20) + L20=(20,20) + Rn=(16,19) + RnLo=(0,3) + msb=(16,20) + satimm5=(16,20) + satimm4=(16,19) + mask=(16,19) + Rd=(12,15) + Rd2=(12,15) + CRd=(12,15) + CRn=(16,19) + CRm=(0,3) + RdHi=(16,19) + RdLo=(12,15) + smRd=(16,19) + smRa=(12,15) + smRm=(8,11) + smRn=(0,3) + immed12=(8,19) + Rs=(8,11) + rotate=(8,11) + immedH=(8,11) + cpn=(8,11) + opc1=(21,23) + opcode1=(20,23) + opc2=(5,7) + opcode2=(5,7) + opcode3=(4,7) + lsb=(7,11) + sftimm=(7,11) + sh=(6,6) + shft=(5,6) + immed24=(0,23) + addr24=(0,23) signed + offset_12=(0,11) + immed=(0,7) + srsMode=(0,4) + immedL=(0,3) + immed4=(0,3) + dbOption=(0,3) + ibOption=(0,3) + Rm=(0,3) + RmHi=(8,11) + Rm2=(0,3) + x=(5,5) + r=(5,5) + y=(6,6) + + # Advanced SIMD and VFP instruction fields + D22=(22,22) + N7=(7,7) + L7=(7,7) + Q6=(6,6) + M5=(5,5) + Qn0=(16,19) + Qd0=(12,15) + Qm0=(0,3) + Qn1=(16,19) + Qd1=(12,15) + Qm1=(0,3) + Dn0=(16,19) + Dd0=(12,15) + Dm0=(0,3) + Dn1=(16,19) + Dd1=(12,15) + Dm1=(0,3) + Dm_3=(0,2) + Dm_4=(0,3) + Sn0=(16,19) + Sd0=(12,15) + Sm0=(0,3) + Sm0next=(0,3) + Sn1=(16,19) + Sd1=(12,15) + Sm1=(0,3) + Sm1next=(0,3) + cmode=(8,11) + + + + # Arbitrary bit fields + bit31=(31,31) + bit30=(30,30) + bit29=(29,29) + bit28=(28,28) + c2831=(28,31) + c2627=(26,27) + c2531=(25,31) + c2527=(25,27) + c2525=(25,25) + c2427=(24,27) + c2424=(24,24) + c2331=(23,31) + c2327=(23,27) + c2324=(23,24) + c2323=(23,23) + c2222=(22,22) + c2131=(21,31) + c2127=(21,27) + c2124=(21,24) + c2123=(21,23) + c2122=(21,22) + c2121=(21,21) + c2027=(20,27) + c2024=(20,24) + c2022=(20,22) + c2021=(20,21) + c2020=(20,20) + c1921=(19,21) + c1919=(19,19) + c1821=(18,21) + c1819=(18,19) + c1818=(18,18) + c1721=(17,21) + c1719=(17,19) + c1718=(17,18) + c1717=(17,17) + c1631=(16,31) + c1627=(16,27) + c1621=(16,21) + c1620=(16,20) + c1619=(16,19) + c1618=(16,18) + c1617=(16,17) + c1616=(16,16) + c1515=(15,15) + c1415=(14,15) + c1414=(14,14) + c1315=(13,15) + c1313=(13,13) + c1215=(12,15) + c1212=(12,12) + c1115=(11,15) + c1111=(11,11) + c1015=(10,15) + c1011=(10,11) + c1010=(10,10) + c0916=(9,16) + c0915=(9,15) + c0911=(9,11) + c0909=(9,9) + c0815=(8,15) + c0811=(8,11) + c0809=(8,9) + c0808=(8,8) + c0715=(7,15) + c0711=(7,11) + c0709=(7,9) + c0708=(7,8) + c0707=(7,7) + c0615=(6,15) + c0611=(6,11) + c0607=(6,7) + c0606=(6,6) + c0515=(5,15) + c0508=(5,8) + c0507=(5,7) + c0506=(5,6) + c0505=(5,5) + c0431=(4,31) + c0427=(4,27) + c0415=(4,15) + c0411=(4,11) + c0409=(4,9) + c0408=(4,8) + c0407=(4,7) + c0406=(4,6) + c0405=(4,5) + c0404=(4,4) + c0315=(3,15) + c0303=(3,3) + c0215=(2,15) + c0202=(2,2) + c0115=(1,15) + c0101=(1,1) + c0031=(0,31) + c0027=(0,27) + c0014=(0,14) + c0013=(0,13) + c0012=(0,12) + c0011=(0,11) + c0010=(0,10) + c0009=(0,9) + c0008=(0,8) + c0007=(0,7) + c0006=(0,6) + c0005=(0,5) + c0004=(0,4) + c0003=(0,3) + c0002=(0,2) + c0001=(0,1) + c0000=(0,0) + +# +# 32-bit Thumb fields which correspond closely with ARM fields for +# certain coprocessor instructions +# + +@if ENDIAN == "little" + + # Advanced SIMD and VFP instruction fields for 32-bit Little Endian Thumb + thv_D22=(6,6) + thv_N7=(23,23) + thv_L7=(23,23) + thv_Q6=(22,22) + thv_M5=(21,21) + thv_Qn0=(0,3) + thv_Qd0=(28,31) + thv_Qm0=(16,19) + thv_Qn1=(0,3) + thv_Qd1=(28,31) + thv_Qm1=(16,19) + thv_Dn0=(0,3) + thv_Dd0=(28,31) + thv_Dd_1=(28,31) + thv_Dd_2=(28,31) + thv_Dd_3=(28,31) + thv_Dd_4=(28,31) + thv_Dd_5=(28,31) + thv_Dd_6=(28,31) + thv_Dd_7=(28,31) + thv_Dd_8=(28,31) + thv_Dd_9=(28,31) + thv_Dd_10=(28,31) + thv_Dd_11=(28,31) + thv_Dd_12=(28,31) + thv_Dd_13=(28,31) + thv_Dd_14=(28,31) + thv_Dd_15=(28,31) + thv_Dd_16=(28,31) + thv_Dm0=(16,19) + thv_Dn1=(0,3) + thv_Dd1=(28,31) + thv_Dm1=(16,19) + thv_Dm_3=(16,18) + thv_Dm_4=(16,19) + thv_Sn0=(0,3) + thv_Sd0=(28,31) + thv_Sm0=(16,19) + thv_Sm0next=(16,19) + thv_Sn1=(0,3) + thv_Sd1=(28,31) + thv_Sm1=(16,19) + thv_Sm1next=(16,19) + thv_cmode=(24,27) + + thv_Rd=(28,31) + thv_Rt=(28,31) + thv_Rn=(0,3) + thv_Rm=(16,19) + thv_Rt2=(24,27) + thv_immed=(16,23) + + # Arbitrary bit fields for 32-bit Little Endian Thumb + + thv_bit31=(15,15) + thv_bit30=(14,14) + thv_bit29=(13,13) + thv_bit28=(12,12) + thv_bit23=(7,7) + thv_bit21=(5,5) + thv_bit20=(4,4) + thv_bit07=(23,23) + thv_bit06=(22,22) + thv_bit00=(16,16) + thv_c2931=(13,15) + thv_c2831=(12,15) + thv_c2828=(12,12) + thv_c2627=(10,11) + thv_c2531=(9,15) + thv_c2527=(9,11) + thv_c2525=(9,9) + thv_c2427=(8,11) + thv_c2424=(8,8) + thv_c2331=(7,15) + thv_c2327=(7,11) + thv_c2324=(7,8) + thv_c2323=(7,7) + thv_c2222=(6,6) + thv_c2131=(5,15) + thv_c2127=(5,11) + thv_c2124=(5,8) + thv_c2123=(5,7) + thv_c2122=(5,6) + thv_c2121=(5,5) + thv_c2031=(4,15) + thv_c2027=(4,11) + thv_c2024=(4,8) + thv_c2022=(4,6) + thv_c2021=(4,5) + thv_c2020=(4,4) + thv_c1921=(3,5) + thv_c1919=(3,3) + thv_c1821=(2,5) + thv_c1819=(2,3) + thv_c1818=(2,2) + thv_c1721=(1,5) + thv_c1719=(1,3) + thv_c1718=(1,2) + thv_c1717=(1,1) + thv_c1631=(0,15) + thv_c1627=(0,11) + thv_c1621=(0,5) + thv_c1620=(0,4) + thv_c1619=(0,3) + thv_c1618=(0,2) + thv_c1617=(0,1) + thv_c1616=(0,0) + thv_c1515=(31,31) + thv_c1415=(30,31) + thv_c1414=(30,30) + thv_c1313=(29,29) + thv_c1215=(28,31) + thv_c1212=(28,28) + thv_c1111=(27,27) + thv_c1011=(26,27) + thv_c1010=(26,26) + thv_c0911=(25,27) + thv_c0909=(25,25) + thv_c0811=(24,27) + thv_c0809=(24,25) + thv_c0808=(24,24) + thv_c0711=(23,27) + thv_c0709=(23,25) + thv_c0708=(23,24) + thv_c0707=(23,23) + thv_c0611=(22,27) + thv_c0607=(22,23) + thv_c0606=(22,22) + thv_c0508=(21,24) + thv_c0507=(21,23) + thv_c0506=(21,22) + thv_c0505=(21,21) + thv_c0431=(4,31) + thv_c0427=(4,27) + thv_c0411=(20,27) + thv_c0409=(20,25) + thv_c0407=(20,23) + thv_c0406=(20,22) + thv_c0405=(20,21) + thv_c0404=(20,20) + thv_c0303=(19,19) + thv_c0215=(18,31) + thv_c0202=(18,18) + thv_c0101=(17,17) + thv_c0104=(17,20) + thv_c0031=(0,31) + thv_c0027=(0,27) + thv_c0015=(16,31) + thv_c0011=(16,27) + thv_c0010=(16,26) + thv_c0008=(16,24) + thv_c0007=(16,23) + thv_c0006=(16,22) + thv_c0004=(16,20) + thv_c0003=(16,19) + thv_c0001=(16,17) + thv_c0000=(16,16) + thv_option=(16,19) + +@else # ENDIAN == "big" + + # Advanced SIMD and VFP instruction fields for 32-bit Big Endian Thumb + thv_D22=(22,22) + thv_N7=(7,7) + thv_L7=(7,7) + thv_Q6=(6,6) + thv_M5=(5,5) + thv_Qn0=(16,19) + thv_Qd0=(12,15) + thv_Qm0=(0,3) + thv_Qn1=(16,19) + thv_Qd1=(12,15) + thv_Qm1=(0,3) + thv_Dn0=(16,19) + thv_Dd0=(12,15) + thv_Dd_1=(12,15) + thv_Dd_2=(12,15) + thv_Dd_3=(12,15) + thv_Dd_4=(12,15) + thv_Dd_5=(12,15) + thv_Dd_6=(12,15) + thv_Dd_7=(12,15) + thv_Dd_8=(12,15) + thv_Dd_9=(12,15) + thv_Dd_10=(12,15) + thv_Dd_11=(12,15) + thv_Dd_12=(12,15) + thv_Dd_13=(12,15) + thv_Dd_14=(12,15) + thv_Dd_15=(12,15) + thv_Dd_16=(12,15) + thv_Dm0=(0,3) + thv_Dn1=(16,19) + thv_Dd1=(12,15) + thv_Dm1=(0,3) + thv_Dm_3=(0,2) + thv_Dm_4=(0,3) + thv_Sn0=(16,19) + thv_Sd0=(12,15) + thv_Sm0=(0,3) + thv_Sm0next=(0,3) + thv_Sn1=(16,19) + thv_Sd1=(12,15) + thv_Sm1=(0,3) + thv_Sm1next=(0,3) + thv_cmode=(8,11) + + thv_Rd=(12,15) + thv_Rt=(12,15) + thv_Rn=(16,19) + thv_Rm=(0,3) + thv_Rt2=(8,11) + thv_immed=(0,7) + + # Arbitrary bit fields for 32-bit Big Endian Thumb + thv_bit31=(31,31) + thv_bit30=(30,30) + thv_bit29=(29,29) + thv_bit28=(28,28) + thv_bit23=(23,23) + thv_bit21=(21,21) + thv_bit20=(20,20) + thv_bit07=(7,7) + thv_bit06=(6,6) + thv_bit00=(0,0) + thv_c2931=(29,31) + thv_c2831=(28,31) + thv_c2828=(28,28) + thv_c2627=(26,27) + thv_c2531=(25,31) + thv_c2527=(25,27) + thv_c2525=(25,25) + thv_c2427=(24,27) + thv_c2424=(24,24) + thv_c2331=(23,31) + thv_c2327=(23,27) + thv_c2324=(23,24) + thv_c2323=(23,23) + thv_c2222=(22,22) + thv_c2131=(21,31) + thv_c2127=(21,27) + thv_c2124=(21,24) + thv_c2123=(21,23) + thv_c2122=(21,22) + thv_c2121=(21,21) + thv_c2031=(20,31) + thv_c2027=(20,27) + thv_c2024=(20,24) + thv_c2022=(20,22) + thv_c2021=(20,21) + thv_c2020=(20,20) + thv_c1921=(19,21) + thv_c1919=(19,19) + thv_c1821=(18,21) + thv_c1819=(18,19) + thv_c1818=(18,18) + thv_c1721=(17,21) + thv_c1719=(17,19) + thv_c1718=(17,18) + thv_c1717=(17,17) + thv_c1631=(16,31) + thv_c1627=(16,27) + thv_c1621=(16,21) + thv_c1620=(16,20) + thv_c1619=(16,19) + thv_c1618=(16,18) + thv_c1617=(16,17) + thv_c1616=(16,16) + thv_c1515=(15,15) + thv_c1415=(14,15) + thv_c1414=(14,14) + thv_c1313=(13,13) + thv_c1215=(12,15) + thv_c1212=(12,12) + thv_c1111=(11,11) + thv_c1011=(10,11) + thv_c1010=(10,10) + thv_c0911=(9,11) + thv_c0909=(9,9) + thv_c0811=(8,11) + thv_c0809=(8,9) + thv_c0808=(8,8) + thv_c0711=(7,11) + thv_c0709=(7,9) + thv_c0708=(7,8) + thv_c0707=(7,7) + thv_c0611=(6,11) + thv_c0607=(6,7) + thv_c0606=(6,6) + thv_c0508=(5,8) + thv_c0507=(5,7) + thv_c0506=(5,6) + thv_c0505=(5,5) + thv_c0431=(4,31) + thv_c0427=(4,27) + thv_c0411=(4,11) + thv_c0409=(4,9) + thv_c0407=(4,7) + thv_c0406=(4,6) + thv_c0405=(4,5) + thv_c0404=(4,4) + thv_c0303=(3,3) + thv_c0215=(2,15) + thv_c0202=(2,2) + thv_c0101=(1,1) + thv_c0104=(1,4) + thv_c0031=(0,31) + thv_c0027=(0,27) + thv_c0015=(0,15) + thv_c0011=(0,11) + thv_c0010=(0,10) + thv_c0008=(0,8) + thv_c0007=(0,7) + thv_c0006=(0,6) + thv_c0004=(0,4) + thv_c0003=(0,3) + thv_c0001=(0,1) + thv_c0000=(0,0) + thv_option=(0,3) + +@endif # ENDIAN = "big" + +; + +attach variables [ Rn Rd Rs Rm RdHi RdLo smRd smRn smRm smRa RmHi RnLo ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; +attach variables [ Rd2 Rm2 ] [ r1 _ r3 _ r5 _ r7 _ r9 _ r11 _ sp _ _ _ ]; # see LDREXD +attach variables [ CRd CRn CRm ] [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 cr8 cr9 cr10 cr11 cr12 cr13 cr14 cr15 ]; +attach variables [ thv_Rd thv_Rn thv_Rt thv_Rt2 ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; + +attach names [ cpn ] [ p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 ]; +attach names [ ibOption ] [ opt0 opt1 opt2 opt3 opt4 opt5 opt6 opt7 opt8 opt9 opt10 opt11 opt12 opt13 opt14 SY ]; +attach names [ dbOption ] [ opt0 opt1 OSHST OSH opt4 opt5 NSHST NSH opt8 opt9 ISHST ISH opt12 opt13 ST SY ]; + +macro addflags(op1,op2) { + tmpCY = carry(op1,op2); + tmpOV = scarry(op1,op2); +} + +# NOTE: unlike x86, carry flag is SET if there is NO borrow +macro subflags(op1,op2) { + tmpCY = op2 <= op1; + tmpOV = sborrow(op1,op2); +} + +macro logicflags() { + tmpCY = shift_carry; + tmpOV = OV; +} + +macro CVunaffected() { + tmpCY = CY; + tmpOV = OV; +} + +macro resultflags(result) { + tmpNG = result s< 0; + tmpZR = result == 0; +} + +rn: pc is pc & c1619=15 { tmp:4 = inst_start+8; export tmp; } +rn: Rn is Rn { export Rn; } + +rm: pc is pc & Rm=15 { tmp:4 = inst_start+8; export tmp; } +rm: Rm is Rm { export Rm; } + +rs: pc is pc & Rs=15 { tmp:4 = inst_start+8; export tmp; } +rs: Rs is Rs { export Rs; } + +cc: "eq" is cond=0 { export ZR; } +cc: "ne" is cond=1 { tmp:1 = !ZR; export tmp; } +cc: "cs" is cond=2 { export CY; } +cc: "cc" is cond=3 { tmp:1 = !CY; export tmp; } +cc: "mi" is cond=4 { export NG; } +cc: "pl" is cond=5 { tmp:1 = !NG; export tmp; } +cc: "vs" is cond=6 { export OV; } +cc: "vc" is cond=7 { tmp:1 = !OV; export tmp; } +cc: "hi" is cond=8 { tmp:1 = CY && (!ZR); export tmp; } +cc: "ls" is cond=9 { tmp:1 = (!CY) || ZR; export tmp; } +cc: "ge" is cond=10 { tmp:1 = (NG==OV); export tmp; } +cc: "lt" is cond=11 { tmp:1 = (NG!=OV); export tmp; } +cc: "gt" is cond=12 { tmp:1 = (!ZR) && (NG==OV); export tmp; } +cc: "le" is cond=13 { tmp:1 = ZR || (NG!=OV); export tmp; } + +COND: cc is $(AMODE) & cc { if (!cc) goto inst_next; } # Execute conditionally +COND: is $(AMODE) & cond=14 { } # Always execute +#COND: is $(AMODE) & cond=15 { } # Always execute - deprecated, should not be used. + +@if defined(INCLUDE_NEON) # Unconditional Neon Thumb instructions share many Conditional Neon ARM constructors +COND: ItCond is TMode=1 & thv_c2831=14 & cond & ItCond { } # ItCond execute +#COND: ItCond is TMode=1 & thv_c2831=15 & cond & ItCond { } # ItCond execute +@endif + +SBIT_CZNO: is S20=0 { } # Do nothing to the flag bits +SBIT_CZNO: "s" is S20=1 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } +SBIT_ZN: is S20=0 { } # Do nothing to the flag bits +SBIT_ZN: "s" is S20=1 { ZR = tmpZR; NG = tmpNG; } + +Addr24: reloc is addr24 [ reloc = (inst_next+4) + (4*addr24); ] { export *[ram]:4 reloc; } + +# see blx(1) instruction +@if defined(T_VARIANT) && defined(VERSION_5) + +HAddr24: reloc is addr24 & H24 + [ reloc = ((inst_next+4) + (4*addr24) + (2*H24)) & 0xFFFFFFFF; TMode=1; globalset(reloc,TMode); ] + { export *[ram]:4 reloc; } +@endif # T_VARIANT && VERSION_5 + +@if defined(VERSION_5E) + +XBIT: "b" is x=0 & smRn { local tmpRn = smRn; tmp:2 = tmpRn:2; export tmp; } +XBIT: "t" is x=1 & smRn { local tmpRn = smRn; tmp:2 = tmpRn(2); export tmp; } + +YBIT: "b" is y=0 & smRm { local tmpRm = smRm; tmp:2 = tmpRm:2; export tmp; } +YBIT: "t" is y=1 & smRm { local tmpRm = smRm; tmp:2 = tmpRm(2); export tmp; } + +@endif # VERSION_5E + + + +##################### +###### shift1 ###### +##################### + +shift1: "#"^value is I25=1 & immed & rotate + [ value=((immed<<(32-rotate*2))|(immed>>(rotate*2))) $and 0xffffffff; ] +{ + local tmp:4 = (value >> 31); shift_carry = ((rotate == 0:1) && CY) || ((rotate != 0:1) && tmp(0)); export *[const]:4 value; +} + +#################### +define pcodeop coproc_moveto_Main_ID; +define pcodeop coproc_moveto_Cache_Type; +define pcodeop coproc_moveto_TCM_Status; +define pcodeop coproc_moveto_TLB_Type; +define pcodeop coproc_moveto_Control; +define pcodeop coproc_moveto_Auxiliary_Control; +define pcodeop coproc_moveto_Coprocessor_Access_Control; +define pcodeop coproc_moveto_Secure_Configuration; +define pcodeop coproc_moveto_Secure_Debug_Enable; +define pcodeop coproc_moveto_NonSecure_Access_Control; +define pcodeop coproc_moveto_Translation_table_base_0; +define pcodeop coproc_moveto_Translation_table_base_1; +define pcodeop coproc_moveto_Translation_table_control; +define pcodeop coproc_moveto_Domain_Access_Control; +define pcodeop coproc_moveto_Data_Fault_Status; +define pcodeop coproc_moveto_Instruction_Fault_Status; +define pcodeop coproc_moveto_Instruction_Fault_Address; +define pcodeop coproc_moveto_Fault_Address; +define pcodeop coproc_moveto_Instruction_Fault; +define pcodeop coproc_moveto_Wait_for_interrupt; +define pcodeop coproc_moveto_Invalidate_Entire_Instruction; +define pcodeop coproc_moveto_Invalidate_Instruction_Cache_by_MVA; +define pcodeop coproc_moveto_Flush_Prefetch_Buffer; +define pcodeop coproc_moveto_Invalidate_Entire_Data_cache; +define pcodeop coproc_moveto_Invalidate_Entire_Data_by_MVA; +define pcodeop coproc_moveto_Invalidate_Entire_Data_by_Index; +define pcodeop coproc_moveto_Clean_Entire_Data_Cache; +define pcodeop coproc_moveto_Clean_Data_Cache_by_MVA; +define pcodeop coproc_moveto_Clean_Data_Cache_by_Index; +define pcodeop coproc_moveto_Data_Synchronization; +define pcodeop coproc_moveto_Data_Memory_Barrier; +define pcodeop coproc_moveto_Invalidate_Entire_Data_Cache; +define pcodeop coproc_moveto_Invalidate_Data_Cache_by_MVA; +define pcodeop coproc_moveto_Invalidate_unified_TLB_unlocked; +define pcodeop coproc_moveto_Invalidate_unified_TLB_by_MVA; +define pcodeop coproc_moveto_Invalidate_unified_TLB_by_ASID_match; +define pcodeop coproc_moveto_FCSE_PID; +define pcodeop coproc_moveto_Context_ID; +define pcodeop coproc_moveto_User_RW_Thread_and_Process_ID; +define pcodeop coproc_moveto_User_R_Thread_and_Process_ID; +define pcodeop coproc_moveto_Privileged_only_Thread_and_Process_ID; +define pcodeop coproc_moveto_Peripherial_Port_Memory_Remap; +define pcodeop coproc_moveto_Feature_Identification; +define pcodeop coproc_moveto_ISA_Feature_Identification; +define pcodeop coproc_moveto_Peripheral_Port_Memory_Remap; +define pcodeop coproc_moveto_Control_registers; +define pcodeop coproc_moveto_Security_world_control; +define pcodeop coproc_moveto_Translation_table; +define pcodeop coproc_moveto_Instruction_cache; +define pcodeop coproc_moveto_Data_cache_operations; +define pcodeop coproc_moveto_Identification_registers; +define pcodeop coproc_moveto_Peripheral_System; + +define pcodeop coproc_movefrom_Main_ID; +define pcodeop coproc_movefrom_Cache_Type; +define pcodeop coproc_movefrom_TCM_Status; +define pcodeop coproc_movefrom_TLB_Type; +define pcodeop coproc_movefrom_Control; +define pcodeop coproc_movefrom_Auxiliary_Control; +define pcodeop coproc_movefrom_Coprocessor_Access_Control; +define pcodeop coproc_movefrom_Secure_Configuration; +define pcodeop coproc_movefrom_Secure_Debug_Enable; +define pcodeop coproc_movefrom_NonSecure_Access_Control; +define pcodeop coproc_movefrom_Translation_table_base_0; +define pcodeop coproc_movefrom_Translation_table_base_1; +define pcodeop coproc_movefrom_Translation_table_control; +define pcodeop coproc_movefrom_Domain_Access_Control; +define pcodeop coproc_movefrom_Data_Fault_Status; +define pcodeop coproc_movefrom_Instruction_Fault; +define pcodeop coproc_movefrom_Fault_Address; +define pcodeop coproc_movefrom_Instruction_Fault_Status; +define pcodeop coproc_movefrom_Instruction_Fault_Address; +define pcodeop coproc_movefrom_Wait_for_interrupt; +define pcodeop coproc_movefrom_Invalidate_Entire_Instruction; +define pcodeop coproc_movefrom_Invalidate_Instruction_Cache_by_MVA; +define pcodeop coproc_movefrom_Flush_Prefetch_Buffer; +define pcodeop coproc_movefrom_Invalidate_Entire_Data_cache; +define pcodeop coproc_movefrom_Invalidate_Entire_Data_by_MVA; +define pcodeop coproc_movefrom_Invalidate_Entire_Data_by_Index; +define pcodeop coproc_movefrom_Clean_Entire_Data_Cache; +define pcodeop coproc_movefrom_Clean_Data_Cache_by_MVA; +define pcodeop coproc_movefrom_Clean_Data_Cache_by_Index; +define pcodeop coproc_movefrom_Data_Synchronization; +define pcodeop coproc_movefrom_Data_Memory_Barrier; +define pcodeop coproc_movefrom_Invalidate_Entire_Data_Cache; +define pcodeop coproc_movefrom_Invalidate_Data_Cache_by_MVA; +define pcodeop coproc_movefrom_Invalidate_unified_TLB_unlocked; +define pcodeop coproc_movefrom_Invalidate_unified_TLB_by_MVA; +define pcodeop coproc_movefrom_Invalidate_unified_TLB_by_ASID_match; +define pcodeop coproc_movefrom_FCSE_PID; +define pcodeop coproc_movefrom_Context_ID; +define pcodeop coproc_movefrom_User_RW_Thread_and_Process_ID; +define pcodeop coproc_movefrom_User_R_Thread_and_Process_ID; +define pcodeop coproc_movefrom_Privileged_only_Thread_and_Process_ID; +define pcodeop coproc_movefrom_Peripherial_Port_Memory_Remap; +define pcodeop coproc_movefrom_Feature_Identification; +define pcodeop coproc_movefrom_ISA_Feature_Identification; +define pcodeop coproc_movefrom_Peripheral_Port_Memory_Remap; +define pcodeop coproc_movefrom_Control_registers; +define pcodeop coproc_movefrom_Security_world_control; +define pcodeop coproc_movefrom_Translation_table; +define pcodeop coproc_movefrom_Instruction_cache; +define pcodeop coproc_movefrom_Data_cache_operations; +define pcodeop coproc_movefrom_Identification_registers; +define pcodeop coproc_movefrom_Peripheral_System; + +mcrOperands: cpn,opc1,Rd,CRn,CRm,opc2 is CRm & opc2 & cpn & CRn & opc1 & Rd { } + +##################### +###### shift2 ###### +##################### + +shift2: rm is I25=0 & sftimm=0 & c0406=0 & rm +{ + shift_carry = CY; export rm; +} + +shift2: rm, "lsl #"^sftimm is I25=0 & sftimm & c0406=0 & rm +{ + local tmp1=(rm>>(32-sftimm))&1; shift_carry=tmp1(0); local tmp2=rm<>31); shift_carry=tmp1(0); tmp2:4=0; export tmp2; +} + +shift2: rm, "lsr #"^sftimm is I25=0 & sftimm & c0406=2 & rm +{ + local tmp1=(rm>>(sftimm-1))&1; shift_carry=tmp1(0); local tmp2=rm>>sftimm; export tmp2; +} + +shift2: rm, "asr #32" is I25=0 & sftimm=0 & c0406=4 & rm +{ + local tmp1=(rm>>31); shift_carry=tmp1(0); local tmp2 = rm s>> 32; export tmp2; +} + +shift2: rm, "asr #"^sftimm is I25=0 & sftimm & c0406=4 & rm +{ + local tmp1=(rm>>(sftimm-1))&1; shift_carry=tmp1(0); local tmp2=rm s>> sftimm; export tmp2; +} + +shift2: rm, "rrx" is I25=0 & c0411=6 & rm +{ + local tmp1=rm&1; shift_carry=tmp1(0); local tmp2 = (zext(CY)<<31)|(rm>>1); export tmp2; +} + +shift2: rm, "ror #"^sftimm is I25=0 & sftimm & c0406=6 & rm +{ + local tmp1=(rm>>sftimm)|(rm<<(32-sftimm)); local tmp2=(tmp1>>31)&1; shift_carry=tmp2(0); export tmp1; +} + +##################### +###### shift3 ###### +##################### + +shift3: rm, "lsl "^rs is I25=0 & rs & c0407=1 & rm +{ + local sa=rs&0xff; local tmp1=(rm>>(32-sa))&1; shift_carry=((sa==0:4)&&CY) || ((sa!=0:4)&&tmp1(0)); local tmp2=rm<>(sa-1))&1; shift_carry=((sa==0:4)&&CY) || ((sa!=0:4)&&tmp1(0)); local tmp2=rm>>sa; export tmp2; +} + +shift3: rm, "asr "^rs is I25=0 & rs & c0407=5 & rm +{ + local sa=rs&0xff; local tmp1=(rm s>>(sa-1))&1; shift_carry=((sa==0:4)&&CY) || ((sa!=0:4)&&tmp1(0)); local tmp2=rm s>> sa; export tmp2; +} + +shift3: rm, "ror "^rs is I25=0 & rs & c0407=7 & rm +{ + local sa=rs&0x1f; local tmp1=(rm>>sa)|(rm<<(32-sa)); local tmp2=tmp1>>31; shift_carry=(((rs&0xff)==0:4)&&CY) || (((rs&0xff)!=0:4)&&tmp2(0)); export tmp1; +} + + +##################### +###### shift4 ###### +##################### +@if defined(VERSION_6) + +shift4: rm is sftimm=0 & sh=0 & rm +{ + shift_carry = CY; export rm; +} + +shift4: rm, "lsl #"^sftimm is sftimm & sh=0 & rm +{ + local tmp1=(rm>>(32-sftimm))&1; shift_carry=tmp1(0); local tmp2=rm<>31); shift_carry=tmp1(0); local tmp2 = rm s>> 32; export tmp2; +} + +shift4: rm, "asr #"^sftimm is sftimm & sh=1 & rm +{ + local tmp1=(rm>>(sftimm-1))&1; shift_carry=tmp1(0); local tmp2=rm s>> sftimm; export tmp2; +} + +@endif # VERSION_6 + +##################### +###### ror1 ###### +##################### +@if defined(VERSION_6) + +ror1: rm is c1011=0 & rm +{ + local tmp = rm; + export tmp; +} + +ror1: rm, "ror #8" is c1011=1 & rm +{ + local tmp = (rm <<24)| (rm >> 8); + export tmp; +} + +ror1: rm, "ror #16" is c1011=2 & rm +{ + local tmp = (rm << 16) | (rm >> 16); + export tmp; +} + +ror1: rm, "ror #24" is c1011=3 & rm +{ + local tmp = (rm << 8) | (rm >> 24); + export tmp; +} + +@endif # VERSION_6 + +##################### +# addrmode2 is the resulting address for Addressing Mode 2 +# it takes care of bits 27-0, except for the B and L flags and the Rd register +# the Rn register is taken care of including any possible write-back +# it returns a varnode containing the address +##################### + +# addr2shift is the register rm shifting portion of Addressing Mode 2 +addr2shift: rm is c0411=0 & rm { export rm; } +addr2shift: rm,"lsl #"^sftimm is sftimm & shft=0 & c0404=0 & rm { local tmp = rm << sftimm; export tmp; } +addr2shift: rm,"lsr #"^sftimm is sftimm & shft=1 & c0404=0 & rm { local tmp = rm >> sftimm; export tmp; } +addr2shift: rm,"lsr #32" is sftimm=0 & shft=1 & c0404=0 & rm { tmp:4 = 0; export tmp; } +addr2shift: rm,"asr #"^sftimm is sftimm & shft=2 & c0404=0 & rm { local tmp = rm s>> sftimm; export tmp; } +addr2shift: rm,"asr #32" is sftimm=0 & shft=2 & c0404=0 & rm { local tmp = rm s>> 32; export tmp; } +addr2shift: rm,"ror #"^sftimm is sftimm & shft=3 & c0404=0 & rm { local tmp = (rm>>sftimm) | (rm<<(32-sftimm)); export tmp; } +addr2shift: rm,"rrx" is sftimm=0 & shft=3 & c0404=0 & rm { tmp:4 = zext(CY); tmp = (tmp<<31) | (rm>>1); export tmp; } + + # no writeback + +addrmode2: [reloff] is I25=0 & P24=1 & U23=1 & W21=0 & c1619=15 & offset_12 + [ reloff = inst_start + 8 + offset_12; ] +{ + export *[const]:4 reloff; +} + +addrmode2: [reloff] is I25=0 & P24=1 & U23=0 & W21=0 & c1619=15 & offset_12 + [ reloff = inst_start + 8 - offset_12; ] +{ + export *[const]:4 reloff; +} + +addrmode2: [rn,"#"^offset_12] is I25=0 & P24=1 & U23=1 & W21=0 & rn & offset_12 { local tmp = rn + offset_12; export tmp; } +addrmode2: [rn,"#"^noff] is I25=0 & P24=1 & U23=0 & W21=0 & rn & offset_12 [ noff = -offset_12; ] { local tmp = rn + noff; export tmp; } +addrmode2: [rn,addr2shift] is I25=1 & P24=1 & U23=1 & W21=0 & rn & addr2shift { local tmp = rn + addr2shift; export tmp; } +addrmode2: [rn,-addr2shift] is I25=1 & P24=1 & U23=0 & W21=0 & rn & addr2shift { local tmp = rn - addr2shift; export tmp; } + # pre-indexed writeback +addrmode2: [rn,"#"^offset_12]! is I25=0 & P24=1 & U23=1 & W21=1 & rn & offset_12 { rn = rn + offset_12; export rn; } +addrmode2: [rn,"#"^noff]! is I25=0 & P24=1 & U23=0 & W21=1 & rn & offset_12 [ noff = -offset_12; ] { rn = rn + noff; export rn; } +addrmode2: [rn,addr2shift]! is I25=1 & P24=1 & U23=1 & W21=1 & rn & addr2shift { rn = rn + addr2shift; export rn; } +addrmode2: [rn,-addr2shift]! is I25=1 & P24=1 & U23=0 & W21=1 & rn & addr2shift { rn = rn - addr2shift; export rn; } + # post-indexed writeback +addrmode2: [rn],"#"^offset_12 is I25=0 & P24=0 & U23=1 & W21=0 & rn & offset_12 { local tmp = rn; rn = rn + offset_12; export tmp; } +addrmode2: [rn],"#"^noff is I25=0 & P24=0 & U23=0 & W21=0 & rn & offset_12 [ noff = -offset_12; ] { local tmp = rn; rn = rn + noff; export tmp; } +addrmode2: [rn],addr2shift is I25=1 & P24=0 & U23=1 & W21=0 & rn & addr2shift { local tmp = rn; rn = rn + addr2shift; export tmp; } +addrmode2: [rn],-addr2shift is I25=1 & P24=0 & U23=0 & W21=0 & rn & addr2shift { local tmp = rn; rn = rn - addr2shift; export tmp; } + # special-form post-indexed writeback for ldrbt, ldrt, strbt, etc. +addrmode2: [rn],"#"^offset_12 is I25=0 & P24=0 & U23=1 & W21=1 & rn & offset_12 { local tmp = rn; rn = rn + offset_12; export tmp; } +addrmode2: [rn],"#"^noff is I25=0 & P24=0 & U23=0 & W21=1 & rn & offset_12 [ noff = -offset_12; ] { local tmp = rn; rn = rn + noff; export tmp; } +addrmode2: [rn],addr2shift is I25=1 & P24=0 & U23=1 & W21=1 & rn & addr2shift { local tmp = rn; rn = rn + addr2shift; export tmp; } +addrmode2: [rn],-addr2shift is I25=1 & P24=0 & U23=0 & W21=1 & rn & addr2shift { local tmp = rn; rn = rn - addr2shift; export tmp; } + +########################### +# addrmode3 is the resulting address for Addressing Mode 3 +# it takes care of bits 27-0, except for the L, S, and H flags and the Rd register +# the Rn register is taken care of including any possible write-back +# it returns a varnode containing the address +########################### + +addrmode3: [reloff] is P24=1 & U23=1 & c2122=2 & c1619=15 & immedH & c0707=1 & c0404=1 & immedL + [ reloff=inst_start+8+((immedH<<4) | immedL);] +{ + export *:4 reloff; +} + +addrmode3: [reloff] is P24=1 & U23=0 & c2122=2 & c1619=15 & immedH & c0707=1 & c0404=1 & immedL + [ reloff=inst_start+8-((immedH<<4) | immedL);] +{ + export *:4 reloff; +} + +addrmode3: [rn,"#"^off8] is P24=1 & U23=1 & c2122=2 & rn & immedH & c0707=1 & c0404=1 & immedL + [ off8=(immedH<<4)|immedL;] +{ + local tmp = rn + off8; export tmp; +} + +addrmode3: [rn,"#"^noff8] is P24=1 & U23=0 & c2122=2 & rn & immedH & c0707=1 & c0404=1 & immedL + [ noff8=-((immedH<<4)|immedL);] +{ + local tmp = rn + noff8; export tmp; +} + +addrmode3: [rn,rm] is P24=1 & U23=1 & c2122=0 & rn & c0811=0 & c0707=1 & c0404=1 & rm +{ + local tmp = rn + rm; export tmp; +} + +addrmode3: [rn,-rm] is P24=1 & U23=0 & c2122=0 & rn & c0811=0 & c0707=1 & c0404=1 & rm +{ + local tmp = rn - rm; export tmp; +} + +addrmode3: [rn,"#"^off8]! is P24=1 & U23=1 & c2122=3 & rn & immedH & c0707=1 & c0404=1 & immedL + [ off8=(immedH<<4)|immedL;] +{ + rn=rn + off8; export rn; +} + +addrmode3: [rn,"#"^noff8]! is P24=1 & U23=0 & c2122=3 & rn & immedH & c0707=1 & c0404=1 & immedL + [ noff8=-((immedH<<4)|immedL);] +{ + rn=rn + noff8; export rn; +} + +addrmode3: [rn,rm]! is P24=1 & U23=1 & c2122=1 & rn & c0811=0 & c0707=1 & c0404=1 & rm +{ + rn = rn+rm; export rn; +} + +addrmode3: [rn,-rm]! is P24=1 & U23=0 & c2122=1 & rn & c0811=0 & c0707=1 & c0404=1 & rm +{ + rn = rn - rm; export rn; +} + +addrmode3: [rn],"#"^off8 is P24=0 & U23=1 & c2122=2 & rn & immedH & c0707=1 & c0404=1 & immedL + [ off8=(immedH<<4)|immedL;] +{ + local tmp=rn; rn=rn + off8; export tmp; +} + +addrmode3: [rn],"#"^noff8 is P24=0 & U23=0 & c2122=2 & rn & immedH & c0707=1 & c0404=1 & immedL + [ noff8=-((immedH<<4)|immedL);] +{ + local tmp=rn; rn=rn + noff8; export tmp; +} + +addrmode3: [rn],rm is P24=0 & U23=1 & c2122=0 & rn & c0811=0 & c0707=1 & c0404=1 & rm +{ + local tmp=rn; rn=rn+rm; export tmp; +} + +addrmode3: [rn],-rm is P24=0 & U23=0 & c2122=0 & rn & c0811=0 & c0707=1 & c0404=1 & rm +{ + local tmp=rn; rn=rn-rm; export tmp; +} + +############################ +# Addressing Mode 4. These 4 types take care of the register_list argument in Addressing Mode 4. +############################ + + +# ldlist_inc is the list of registers to be loaded using IA or IB in Addressing Mode 4 +linc15: r0 is c0000=1 & r0 { r0 = * mult_addr; mult_addr = mult_addr + 4; } +linc15: is c0000=0 { } +linc14: linc15 r1 is c0101=1 & linc15 & r1 { r1 = * mult_addr; mult_addr = mult_addr + 4; } +linc14: r1 is c0101=1 & c0000=0 & r1 { r1 = * mult_addr; mult_addr = mult_addr + 4; } +linc14: linc15 is c0101=0 & linc15 { } +linc13: linc14 r2 is c0202=1 & linc14 & r2 { r2 = * mult_addr; mult_addr = mult_addr + 4; } +linc13: r2 is c0202=1 & c0001=0 & r2 { r2 = * mult_addr; mult_addr = mult_addr + 4; } +linc13: linc14 is c0202=0 & linc14 { } +linc12: linc13 r3 is c0303=1 & linc13 & r3 { r3 = * mult_addr; mult_addr = mult_addr + 4; } +linc12: r3 is c0303=1 & c0002=0 & r3 { r3 = * mult_addr; mult_addr = mult_addr + 4; } +linc12: linc13 is c0303=0 & linc13 { } +linc11: linc12 r4 is c0404=1 & linc12 & r4 { r4 = * mult_addr; mult_addr = mult_addr + 4; } +linc11: r4 is c0404=1 & c0003=0 & r4 { r4 = * mult_addr; mult_addr = mult_addr + 4; } +linc11: linc12 is c0404=0 & linc12 { } +linc10: linc11 r5 is c0505=1 & linc11 & r5 { r5 = * mult_addr; mult_addr = mult_addr + 4; } +linc10: r5 is c0505=1 & c0004=0 & r5 { r5 = * mult_addr; mult_addr = mult_addr + 4; } +linc10: linc11 is c0505=0 & linc11 { } +linc9: linc10 r6 is c0606=1 & linc10 & r6 { r6 = * mult_addr; mult_addr = mult_addr + 4; } +linc9: r6 is c0606=1 & c0005=0 & r6 { r6 = * mult_addr; mult_addr = mult_addr + 4; } +linc9: linc10 is c0606=0 & linc10 { } +linc8: linc9 r7 is c0707=1 & linc9 & r7 { r7 = * mult_addr; mult_addr = mult_addr + 4; } +linc8: r7 is c0707=1 & c0006=0 & r7 { r7 = * mult_addr; mult_addr = mult_addr + 4; } +linc8: linc9 is c0707=0 & linc9 { } +linc7: linc8 r8 is c0808=1 & linc8 & r8 { r8 = * mult_addr; mult_addr = mult_addr + 4; } +linc7: r8 is c0808=1 & c0007=0 & r8 { r8 = * mult_addr; mult_addr = mult_addr + 4; } +linc7: linc8 is c0808=0 & linc8 { } +linc6: linc7 r9 is c0909=1 & linc7 & r9 { r9 = * mult_addr; mult_addr = mult_addr + 4; } +linc6: r9 is c0909=1 & c0008=0 & r9 { r9 = * mult_addr; mult_addr = mult_addr + 4; } +linc6: linc7 is c0909=0 & linc7 { } +linc5: linc6 r10 is c1010=1 & linc6 & r10 { r10 = * mult_addr; mult_addr = mult_addr + 4; } +linc5: r10 is c1010=1 & c0009=0 & r10 { r10 = * mult_addr; mult_addr = mult_addr + 4; } +linc5: linc6 is c1010=0 & linc6 { } +linc4: linc5 r11 is c1111=1 & linc5 & r11 { r11 = * mult_addr; mult_addr = mult_addr + 4; } +linc4: r11 is c1111=1 & c0010=0 & r11 { r11 = * mult_addr; mult_addr = mult_addr + 4; } +linc4: linc5 is c1111=0 & linc5 { } +linc3: linc4 r12 is c1212=1 & linc4 & r12 { r12 = * mult_addr; mult_addr = mult_addr + 4; } +linc3: r12 is c1212=1 & c0011=0 & r12 { r12 = * mult_addr; mult_addr = mult_addr + 4; } +linc3: linc4 is c1212=0 & linc4 { } +linc2: linc3 sp is c1313=1 & linc3 & sp { sp = * mult_addr; mult_addr = mult_addr + 4; } +linc2: sp is c1313=1 & c0012=0 & sp { sp = * mult_addr; mult_addr = mult_addr + 4; } +linc2: linc3 is c1313=0 & linc3 { } +linc1: linc2 lr is c1414=1 & linc2 & lr { lr = * mult_addr; mult_addr = mult_addr + 4; } +linc1: lr is c1414=1 & c0013=0 & lr { lr = * mult_addr; mult_addr = mult_addr + 4; } +linc1: linc2 is c1414=0 & linc2 { } +linc0: linc1 pc is c1515=1 & linc1 & pc { pc = * mult_addr; mult_addr = mult_addr + 4; } +linc0: pc is c1515=1 & c0014=0 & pc { pc = * mult_addr; mult_addr = mult_addr + 4; } +linc0: linc1 is c1515=0 & linc1 { } +ldlist_inc: {linc0} is linc0 { } + +# stlist_inc is the list of registers to be stored using IA or IB in Addressing Mode 4 +sinc15: r0 is c0000=1 & r0 { * mult_addr = r0; mult_addr = mult_addr + 4; } +sinc15: is c0000=0 { } +sinc14: sinc15 r1 is c0101=1 & sinc15 & r1 { * mult_addr = r1; mult_addr = mult_addr + 4; } +sinc14: r1 is c0101=1 & c0000=0 & r1 { * mult_addr = r1; mult_addr = mult_addr + 4; } +sinc14: sinc15 is c0101=0 & sinc15 { } +sinc13: sinc14 r2 is c0202=1 & sinc14 & r2 { * mult_addr = r2; mult_addr = mult_addr + 4; } +sinc13: r2 is c0202=1 & c0001=0 & r2 { * mult_addr = r2; mult_addr = mult_addr + 4; } +sinc13: sinc14 is c0202=0 & sinc14 { } +sinc12: sinc13 r3 is c0303=1 & sinc13 & r3 { * mult_addr = r3; mult_addr = mult_addr + 4; } +sinc12: r3 is c0303=1 & c0002=0 & r3 { * mult_addr = r3; mult_addr = mult_addr + 4; } +sinc12: sinc13 is c0303=0 & sinc13 { } +sinc11: sinc12 r4 is c0404=1 & sinc12 & r4 { * mult_addr = r4; mult_addr = mult_addr + 4; } +sinc11: r4 is c0404=1 & c0003=0 & r4 { * mult_addr = r4; mult_addr = mult_addr + 4; } +sinc11: sinc12 is c0404=0 & sinc12 { } +sinc10: sinc11 r5 is c0505=1 & sinc11 & r5 { * mult_addr = r5; mult_addr = mult_addr + 4; } +sinc10: r5 is c0505=1 & c0004=0 & r5 { * mult_addr = r5; mult_addr = mult_addr + 4; } +sinc10: sinc11 is c0505=0 & sinc11 { } +sinc9: sinc10 r6 is c0606=1 & sinc10 & r6 { * mult_addr = r6; mult_addr = mult_addr + 4; } +sinc9: r6 is c0606=1 & c0005=0 & r6 { * mult_addr = r6; mult_addr = mult_addr + 4; } +sinc9: sinc10 is c0606=0 & sinc10 { } +sinc8: sinc9 r7 is c0707=1 & sinc9 & r7 { * mult_addr = r7; mult_addr = mult_addr + 4; } +sinc8: r7 is c0707=1 & c0006=0 & r7 { * mult_addr = r7; mult_addr = mult_addr + 4; } +sinc8: sinc9 is c0707=0 & sinc9 { } +sinc7: sinc8 r8 is c0808=1 & sinc8 & r8 { * mult_addr = r8; mult_addr = mult_addr + 4; } +sinc7: r8 is c0808=1 & c0007=0 & r8 { * mult_addr = r8; mult_addr = mult_addr + 4; } +sinc7: sinc8 is c0808=0 & sinc8 { } +sinc6: sinc7 r9 is c0909=1 & sinc7 & r9 { * mult_addr = r9; mult_addr = mult_addr + 4; } +sinc6: r9 is c0909=1 & c0008=0 & r9 { * mult_addr = r9; mult_addr = mult_addr + 4; } +sinc6: sinc7 is c0909=0 & sinc7 { } +sinc5: sinc6 r10 is c1010=1 & sinc6 & r10 { * mult_addr = r10; mult_addr = mult_addr + 4; } +sinc5: r10 is c1010=1 & c0009=0 & r10 { * mult_addr = r10; mult_addr = mult_addr + 4; } +sinc5: sinc6 is c1010=0 & sinc6 { } +sinc4: sinc5 r11 is c1111=1 & sinc5 & r11 { * mult_addr = r11; mult_addr = mult_addr + 4; } +sinc4: r11 is c1111=1 & c0010=0 & r11 { * mult_addr = r11; mult_addr = mult_addr + 4; } +sinc4: sinc5 is c1111=0 & sinc5 { } +sinc3: sinc4 r12 is c1212=1 & sinc4 & r12 { * mult_addr = r12; mult_addr = mult_addr + 4; } +sinc3: r12 is c1212=1 & c0011=0 & r12 { * mult_addr = r12; mult_addr = mult_addr + 4; } +sinc3: sinc4 is c1212=0 & sinc4 { } +sinc2: sinc3 sp is c1313=1 & sinc3 & sp { * mult_addr = sp; mult_addr = mult_addr + 4; } +sinc2: sp is c1313=1 & c0012=0 & sp { * mult_addr = sp; mult_addr = mult_addr + 4; } +sinc2: sinc3 is c1313=0 & sinc3 { } +sinc1: sinc2 lr is c1414=1 & sinc2 & lr { * mult_addr = lr; mult_addr = mult_addr + 4; } +sinc1: lr is c1414=1 & c0013=0 & lr { * mult_addr = lr; mult_addr = mult_addr + 4; } +sinc1: sinc2 is c1414=0 & sinc2 { } +sinc0: sinc1 pc is c1515=1 & sinc1 & pc { *:4 mult_addr = (inst_start + 8); mult_addr = mult_addr + 4; } +sinc0: pc is c1515=1 & c0014=0 & pc { *:4 mult_addr = (inst_start + 8); mult_addr = mult_addr + 4; } +sinc0: sinc1 is c1515=0 & sinc1 { } +stlist_inc: { sinc0 } is sinc0 { } + + +# ldlist_dec is the list of registers to be loaded using DA or DB in Addressing Mode 4 +ldec15: pc is c1515=1 & pc { pc = * mult_addr; mult_addr = mult_addr - 4; } +ldec15: is c1515=0 { } +ldec14: lr ldec15 is c1414=1 & ldec15 & lr { lr = * mult_addr; mult_addr = mult_addr - 4; } +ldec14: lr is c1414=1 & c1515=0 & lr { lr = * mult_addr; mult_addr = mult_addr - 4; } +ldec14: ldec15 is c1414=0 & ldec15 { } +ldec13: sp ldec14 is c1313=1 & ldec14 & sp { sp = * mult_addr; mult_addr = mult_addr - 4; } +ldec13: sp is c1313=1 & c1415=0 & sp { sp = * mult_addr; mult_addr = mult_addr - 4; } +ldec13: ldec14 is c1313=0 & ldec14 { } +ldec12: r12 ldec13 is c1212=1 & ldec13 & r12 { r12 = * mult_addr; mult_addr = mult_addr - 4; } +ldec12: r12 is c1212=1 & c1315=0 & r12 { r12 = * mult_addr; mult_addr = mult_addr - 4; } +ldec12: ldec13 is c1212=0 & ldec13 { } +ldec11: r11 ldec12 is c1111=1 & ldec12 & r11 { r11 = * mult_addr; mult_addr = mult_addr - 4; } +ldec11: r11 is c1111=1 & c1215=0 & r11 { r11 = * mult_addr; mult_addr = mult_addr - 4; } +ldec11: ldec12 is c1111=0 & ldec12 { } +ldec10: r10 ldec11 is c1010=1 & ldec11 & r10 { r10 = * mult_addr; mult_addr = mult_addr - 4; } +ldec10: r10 is c1010=1 & c1115=0 & r10 { r10 = * mult_addr; mult_addr = mult_addr - 4; } +ldec10: ldec11 is c1010=0 & ldec11 { } +ldec9: r9 ldec10 is c0909=1 & ldec10 & r9 { r9 = * mult_addr; mult_addr = mult_addr - 4; } +ldec9: r9 is c0909=1 & c1015=0 & r9 { r9 = * mult_addr; mult_addr = mult_addr - 4; } +ldec9: ldec10 is c0909=0 & ldec10 { } +ldec8: r8 ldec9 is c0808=1 & ldec9 & r8 { r8 = * mult_addr; mult_addr = mult_addr - 4; } +ldec8: r8 is c0808=1 & c0915=0 & r8 { r8 = * mult_addr; mult_addr = mult_addr - 4; } +ldec8: ldec9 is c0808=0 & ldec9 { } +ldec7: r7 ldec8 is c0707=1 & ldec8 & r7 { r7 = * mult_addr; mult_addr = mult_addr - 4; } +ldec7: r7 is c0707=1 & c0815=0 & r7 { r7 = * mult_addr; mult_addr = mult_addr - 4; } +ldec7: ldec8 is c0707=0 & ldec8 { } +ldec6: r6 ldec7 is c0606=1 & ldec7 & r6 { r6 = * mult_addr; mult_addr = mult_addr - 4; } +ldec6: r6 is c0606=1 & c0715=0 & r6 { r6 = * mult_addr; mult_addr = mult_addr - 4; } +ldec6: ldec7 is c0606=0 & ldec7 { } +ldec5: r5 ldec6 is c0505=1 & ldec6 & r5 { r5 = * mult_addr; mult_addr = mult_addr - 4; } +ldec5: r5 is c0505=1 & c0615=0 & r5 { r5 = * mult_addr; mult_addr = mult_addr - 4; } +ldec5: ldec6 is c0505=0 & ldec6 { } +ldec4: r4 ldec5 is c0404=1 & ldec5 & r4 { r4 = * mult_addr; mult_addr = mult_addr - 4; } +ldec4: r4 is c0404=1 & c0515=0 & r4 { r4 = * mult_addr; mult_addr = mult_addr - 4; } +ldec4: ldec5 is c0404=0 & ldec5 { } +ldec3: r3 ldec4 is c0303=1 & ldec4 & r3 { r3 = * mult_addr; mult_addr = mult_addr - 4; } +ldec3: r3 is c0303=1 & c0415=0 & r3 { r3 = * mult_addr; mult_addr = mult_addr - 4; } +ldec3: ldec4 is c0303=0 & ldec4 { } +ldec2: r2 ldec3 is c0202=1 & ldec3 & r2 { r2 = * mult_addr; mult_addr = mult_addr - 4; } +ldec2: r2 is c0202=1 & c0315=0 & r2 { r2 = * mult_addr; mult_addr = mult_addr - 4; } +ldec2: ldec3 is c0202=0 & ldec3 { } +ldec1: r1 ldec2 is c0101=1 & ldec2 & r1 { r1 = * mult_addr; mult_addr = mult_addr - 4; } +ldec1: r1 is c0101=1 & c0215=0 & r1 { r1 = * mult_addr; mult_addr = mult_addr - 4; } +ldec1: ldec2 is c0101=0 & ldec2 { } +ldec0: r0 ldec1 is c0000=1 & ldec1 & r0 { r0 = * mult_addr; mult_addr = mult_addr - 4; } +ldec0: r0 is c0000=1 & c0115=0 & r0 { r0 = * mult_addr; mult_addr = mult_addr - 4; } +ldec0: ldec1 is c0000=0 & ldec1 { } +ldlist_dec: { ldec0 } is ldec0 { } + +# stlist_dec is the list of registers to be stored using DA or DB in Addressing Mode 4 +sdec15: pc is c1515=1 & pc { *:4 mult_addr = (inst_start + 8); mult_addr = mult_addr - 4; } +sdec15: is c1515=0 { } +sdec14: lr sdec15 is c1414=1 & sdec15 & lr { * mult_addr=lr; mult_addr = mult_addr - 4; } +sdec14: lr is c1414=1 & c1515=0 & lr { * mult_addr=lr; mult_addr = mult_addr - 4; } +sdec14: sdec15 is c1414=0 & sdec15 { } +sdec13: sp sdec14 is c1313=1 & sdec14 & sp { * mult_addr=sp; mult_addr = mult_addr - 4; } +sdec13: sp is c1313=1 & c1415=0 & sp { * mult_addr=sp; mult_addr = mult_addr - 4; } +sdec13: sdec14 is c1313=0 & sdec14 { } +sdec12: r12 sdec13 is c1212=1 & sdec13 & r12 { * mult_addr=r12; mult_addr = mult_addr - 4; } +sdec12: r12 is c1212=1 & c1315=0 & r12 { * mult_addr=r12; mult_addr = mult_addr - 4; } +sdec12: sdec13 is c1212=0 & sdec13 { } +sdec11: r11 sdec12 is c1111=1 & sdec12 & r11 { * mult_addr=r11; mult_addr = mult_addr - 4; } +sdec11: r11 is c1111=1 & c1215=0 & r11 { * mult_addr=r11; mult_addr = mult_addr - 4; } +sdec11: sdec12 is c1111=0 & sdec12 { } +sdec10: r10 sdec11 is c1010=1 & sdec11 & r10 { * mult_addr=r10; mult_addr = mult_addr - 4; } +sdec10: r10 is c1010=1 & c1115=0 & r10 { * mult_addr=r10; mult_addr = mult_addr - 4; } +sdec10: sdec11 is c1010=0 & sdec11 { } +sdec9: r9 sdec10 is c0909=1 & sdec10 & r9 { * mult_addr=r9; mult_addr = mult_addr - 4; } +sdec9: r9 is c0909=1 & c1015=0 & r9 { * mult_addr=r9; mult_addr = mult_addr - 4; } +sdec9: sdec10 is c0909=0 & sdec10 { } +sdec8: r8 sdec9 is c0808=1 & sdec9 & r8 { * mult_addr=r8; mult_addr = mult_addr - 4; } +sdec8: r8 is c0808=1 & c0915=0 & r8 { * mult_addr=r8; mult_addr = mult_addr - 4; } +sdec8: sdec9 is c0808=0 & sdec9 { } +sdec7: r7 sdec8 is c0707=1 & sdec8 & r7 { * mult_addr=r7; mult_addr = mult_addr - 4; } +sdec7: r7 is c0707=1 & c0815=0 & r7 { * mult_addr=r7; mult_addr = mult_addr - 4; } +sdec7: sdec8 is c0707=0 & sdec8 { } +sdec6: r6 sdec7 is c0606=1 & sdec7 & r6 { * mult_addr=r6; mult_addr = mult_addr - 4; } +sdec6: r6 is c0606=1 & c0715=0 & r6 { * mult_addr=r6; mult_addr = mult_addr - 4; } +sdec6: sdec7 is c0606=0 & sdec7 { } +sdec5: r5 sdec6 is c0505=1 & sdec6 & r5 { * mult_addr=r5; mult_addr = mult_addr - 4; } +sdec5: r5 is c0505=1 & c0615=0 & r5 { * mult_addr=r5; mult_addr = mult_addr - 4; } +sdec5: sdec6 is c0505=0 & sdec6 { } +sdec4: r4 sdec5 is c0404=1 & sdec5 & r4 { * mult_addr=r4; mult_addr = mult_addr - 4; } +sdec4: r4 is c0404=1 & c0515=0 & r4 { * mult_addr=r4; mult_addr = mult_addr - 4; } +sdec4: sdec5 is c0404=0 & sdec5 { } +sdec3: r3 sdec4 is c0303=1 & sdec4 & r3 { * mult_addr=r3; mult_addr = mult_addr - 4; } +sdec3: r3 is c0303=1 & c0415=0 & r3 { * mult_addr=r3; mult_addr = mult_addr - 4; } +sdec3: sdec4 is c0303=0 & sdec4 { } +sdec2: r2 sdec3 is c0202=1 & sdec3 & r2 { * mult_addr=r2; mult_addr = mult_addr - 4; } +sdec2: r2 is c0202=1 & c0315=0 & r2 { * mult_addr=r2; mult_addr = mult_addr - 4; } +sdec2: sdec3 is c0202=0 & sdec3 { } +sdec1: r1 sdec2 is c0101=1 & sdec2 & r1 { * mult_addr=r1; mult_addr = mult_addr - 4; } +sdec1: r1 is c0101=1 & c0215=0 & r1 { * mult_addr=r1; mult_addr = mult_addr - 4; } +sdec1: sdec2 is c0101=0 & sdec2 { } + +sdec0: r0 sdec1 is c0000=1 & sdec1 & r0 { * mult_addr=r0; mult_addr = mult_addr - 4; } +sdec0: r0 is c0000=1 & c0115=0 & r0 { * mult_addr=r0; mult_addr = mult_addr - 4; } +sdec0: sdec1 is c0000=0 & sdec1 { } + +stlist_dec: {sdec0} is sdec0 { } + +# reglist deals with Addressing Mode 4 +# it takes care of bits 0-27 +# we assume that alignment checking is turned on +reglist: rn,ldlist_inc is P24=0 & U23=1 & S22=0 & W21=0 & L20=1 & rn & ldlist_inc +{ + mult_addr=rn; build ldlist_inc; +} + +reglist: rn,ldlist_inc"^" is P24=0 & U23=1 & S22=1 & W21=0 & L20=1 & rn & ldlist_inc +{ + mult_addr=rn; build ldlist_inc; +} + +reglist: rn!,ldlist_inc is P24=0 & U23=1 & S22=0 & W21=1 & L20=1 & rn & ldlist_inc +{ + mult_addr=rn; build ldlist_inc; rn=mult_addr; +} + +reglist: rn!,ldlist_inc"^" is P24=0 & U23=1 & S22=1 & W21=1 & L20=1 & rn & ldlist_inc +{ + mult_addr=rn; build ldlist_inc; rn=mult_addr; +} + +reglist: rn,ldlist_inc is P24=1 & U23=1 & S22=0 & W21=0 & L20=1 & rn & ldlist_inc +{ + mult_addr=(rn+4); build ldlist_inc; +} + +reglist: rn,ldlist_inc"^" is P24=1 & U23=1 & S22=1 & W21=0 & L20=1 & rn & ldlist_inc +{ + mult_addr=(rn+4); build ldlist_inc; +} + +reglist: rn!,ldlist_inc is P24=1 & U23=1 & S22=0 & W21=1 & L20=1 & rn & ldlist_inc +{ + mult_addr=(rn+4); build ldlist_inc; rn=mult_addr-4; +} + +reglist: rn!,ldlist_inc"^" is P24=1 & U23=1 & S22=1 & W21=1 & L20=1 & rn & ldlist_inc +{ + mult_addr=(rn+4); build ldlist_inc; rn=mult_addr-4; +} + +reglist: rn,ldlist_dec is P24=0 & U23=0 & S22=0 & W21=0 & L20=1 & rn & ldlist_dec +{ + mult_addr=rn; build ldlist_dec; +} + +reglist: rn,ldlist_dec"^" is P24=0 & U23=0 & S22=1 & W21=0 & L20=1 & rn & ldlist_dec +{ + mult_addr=rn; build ldlist_dec; +} + +reglist: rn!,ldlist_dec is P24=0 & U23=0 & S22=0 & W21=1 & L20=1 & rn & ldlist_dec +{ + mult_addr=rn; build ldlist_dec; rn=mult_addr; +} + +reglist: rn!,ldlist_dec"^" is P24=0 & U23=0 & S22=1 & W21=1 & L20=1 & rn & ldlist_dec +{ + mult_addr=rn; build ldlist_dec; rn=mult_addr; +} + +reglist: rn,ldlist_dec is P24=1 & U23=0 & S22=0 & W21=0 & L20=1 & rn & ldlist_dec +{ + mult_addr=(rn-4); build ldlist_dec; +} + +reglist: rn,ldlist_dec"^" is P24=1 & U23=0 & S22=1 & W21=0 & L20=1 & rn & ldlist_dec +{ + mult_addr=(rn-4); build ldlist_dec; +} + +reglist: rn!,ldlist_dec is P24=1 & U23=0 & S22=0 & W21=1 & L20=1 & rn & ldlist_dec +{ + mult_addr=(rn-4); build ldlist_dec; rn=mult_addr+4; +} + +reglist: rn!,ldlist_dec"^" is P24=1 & U23=0 & S22=1 & W21=1 & L20=1 & rn & ldlist_dec +{ + mult_addr=(rn-4); build ldlist_dec; rn=mult_addr+4; +} + +reglist: rn,stlist_inc is P24=0 & U23=1 & S22=0 & W21=0 & L20=0 & rn & stlist_inc +{ + mult_addr=rn; build stlist_inc; +} + +reglist: rn,stlist_inc"^" is P24=0 & U23=1 & S22=1 & W21=0 & L20=0 & rn & stlist_inc +{ + mult_addr=rn; build stlist_inc; +} + +## This is here to allow old versions of this instruction to decode. +## The W-Bit21 is specified as (0) in the manual meaning should be 0 but is unpredictable if 1 +## Some older processors did not specify that Writeback was not available if the P24=0 and S22=0, +## which is a system interrupt instruction. +## I AM ASSUMING, that the W-bit is honored on these processors and does update the register!!!! +## This is probably an arbitrary decision, but keeps with what old processor did. +reglist: rn,stlist_inc"^" is P24=0 & U23=1 & S22=1 & W21=1 & L20=0 & rn & stlist_inc +{ + mult_addr=rn; build stlist_inc; rn=mult_addr; +} + + +reglist: rn!,stlist_inc is P24=0 & U23=1 & S22=0 & W21=1 & L20=0 & rn & stlist_inc +{ + mult_addr=rn; build stlist_inc; rn=mult_addr; +} + +reglist: rn,stlist_inc is P24=1 & U23=1 & S22=0 & W21=0 & L20=0 & rn & stlist_inc +{ + mult_addr=(rn+4); build stlist_inc; +} + +reglist: rn,stlist_inc"^" is P24=1 & U23=1 & S22=1 & W21=0 & L20=0 & rn & stlist_inc +{ + mult_addr=(rn+4); build stlist_inc; +} + +reglist: rn!,stlist_inc is P24=1 & U23=1 & S22=0 & W21=1 & L20=0 & rn & stlist_inc +{ + mult_addr=(rn+4); build stlist_inc; rn=mult_addr-4; +} + +reglist: rn,stlist_dec is P24=0 & U23=0 & S22=0 & W21=0 & L20=0 & rn & stlist_dec +{ + mult_addr=rn; build stlist_dec; +} + +reglist: rn,stlist_dec"^" is P24=0 & U23=0 & S22=1 & W21=0 & L20=0 & rn & stlist_dec +{ + mult_addr=rn; build stlist_dec; +} + +reglist: rn!,stlist_dec is P24=0 & U23=0 & S22=0 & W21=1 & L20=0 & rn & stlist_dec +{ + mult_addr=rn; build stlist_dec; rn=mult_addr; +} + +reglist: rn,stlist_dec is P24=1 & U23=0 & S22=0 & W21=0 & L20=0 & rn & stlist_dec +{ + mult_addr=(rn-4); build stlist_dec; +} + +reglist: rn,stlist_dec"^" is P24=1 & U23=0 & S22=1 & W21=0 & L20=0 & rn & stlist_dec +{ + mult_addr=(rn-4); build stlist_dec; +} + +reglist: rn!,stlist_dec is P24=1 & U23=0 & S22=0 & W21=1 & L20=0 & rn & stlist_dec +{ + mult_addr=(rn-4); build stlist_dec; rn=mult_addr+4; +} + +# mdir is for attaching the load/store multiple addressing mode mnemonic to the mnemonic +mdir: "ia" is P24=0 & U23=1 { } +mdir: "ib" is P24=1 & U23=1 { } +mdir: "da" is P24=0 & U23=0 { } +mdir: "db" is P24=1 & U23=0 { } + +# addrmode5 is the parameter in Addressing Mode5 +# it takes care of bits 27-0 except for the N and L flags and CRd and cp# +# it takes care of possible writebacks to Rn +addrmode5: [rn,"#"^off8] is P24=1 & U23=1 & W21=0 & rn & immed [ off8=immed*4; ] { local tmp = rn + off8; export tmp; } +addrmode5: [rn,"#"^noff8] is P24=1 & U23=0 & W21=0 & rn & immed [ noff8=-(immed*4); ] { local tmp = rn + noff8; export tmp; } +addrmode5: [rn,"#"^off8]! is P24=1 & U23=1 & W21=1 & rn & immed [ off8=immed*4; ] { rn = rn + off8; export rn; } +addrmode5: [rn,"#"^noff8]! is P24=1 & U23=0 & W21=1 & rn & immed [ noff8=-(immed*4); ] { rn = rn + noff8; export rn; } +addrmode5: [rn],"#"^off8 is P24=0 & U23=1 & W21=1 & rn & immed [ off8=immed*4; ] { local tmp = rn; rn = rn+off8; export tmp; } +addrmode5: [rn],"#"^noff8 is P24=0 & U23=0 & W21=1 & rn & immed [ noff8=-(immed*4); ] { local tmp = rn; rn = rn + noff8; export tmp; } +addrmode5: [rn],{immed} is P24=0 & U23=1 & W21=0 & rn & immed { export rn; } + +# cpsrmask is the resulting cpsr mask for the msr instruction + +cpsrmask: is mask=0 { export 0:4; } +cpsrmask: "cpsr_c" is mask=1 { export 0xff:4; } +cpsrmask: "cpsr_x" is mask=2 { export 0xff00:4; } +cpsrmask: "cpsr_cx" is mask=3 { export 0xffff:4; } +cpsrmask: "cpsr_s" is mask=4 { export 0xff0000:4; } +cpsrmask: "cpsr_cs" is mask=5 { export 0xff00ff:4; } +cpsrmask: "cpsr_xs" is mask=6 { export 0xffff00:4; } +cpsrmask: "cpsr_cxs" is mask=7 { export 0xffffff:4; } +cpsrmask: "cpsr_f" is mask=8 { export 0xff000000:4; } +cpsrmask: "cpsr_cf" is mask=9 { export 0xff0000ff:4; } +cpsrmask: "cpsr_xf" is mask=10 { export 0xff00ff00:4; } +cpsrmask: "cpsr_cxf" is mask=11 { export 0xff00ffff:4; } +cpsrmask: "cpsr_sf" is mask=12 { export 0xffff0000:4; } +cpsrmask: "cpsr_csf" is mask=13 { export 0xffff00ff:4; } +cpsrmask: "cpsr_xsf" is mask=14 { export 0xffffff00:4; } +cpsrmask: "cpsr_cxsf" is mask=15 { export 0xffffffff:4; } + +# spsrmask is the mask for spsr in the msr instruction + +spsrmask: is mask=0 { export 0:4; } +spsrmask: "spsr_c" is mask=1 { export 0xff:4; } +spsrmask: "spsr_x" is mask=2 { export 0xff00:4; } +spsrmask: "spsr_cx" is mask=3 { export 0xffff:4; } +spsrmask: "spsr_s" is mask=4 { export 0xff0000:4; } +spsrmask: "spsr_cs" is mask=5 { export 0xff00ff:4; } +spsrmask: "spsr_xs" is mask=6 { export 0xffff00:4; } +spsrmask: "spsr_cxs" is mask=7 { export 0xffffff:4; } +spsrmask: "spsr_f" is mask=8 { export 0xff000000:4; } +spsrmask: "spsr_cf" is mask=9 { export 0xff0000ff:4; } +spsrmask: "spsr_xf" is mask=10 { export 0xff00ff00:4; } +spsrmask: "spsr_cxf" is mask=11 { export 0xff00ffff:4; } +spsrmask: "spsr_sf" is mask=12 { export 0xffff0000:4; } +spsrmask: "spsr_csf" is mask=13 { export 0xffff00ff:4; } +spsrmask: "spsr_xsf" is mask=14 { export 0xffffff00:4; } +spsrmask: "spsr_cxsf" is mask=15 { export 0xffffffff:4; } + +##################### +###### immediate bit-number data for unsigned/signed saturated instructions +##################### +@if defined(VERSION_6) + +sSatImm5: "#"^satimm is satimm5 [ satimm = satimm5 + 1; ] { export *[const]:2 satimm; } +sSatImm4: "#"^satimm is satimm4 [ satimm = satimm4 + 1; ] { export *[const]:2 satimm; } +uSatImm5: "#"^satimm5 is satimm5 { export *[const]:2 satimm5; } +uSatImm4: "#"^satimm4 is satimm4 { export *[const]:2 satimm4; } + +@endif # VERSION_6 + +@if defined(VERSION_6K) || defined(VERSION_6T2) +optionImm: "#"^immed4 is immed4 { export *[const]:4 immed4; } +@endif + +@if defined(VERSION_6T2) || defined(VERSION_7) + +lsbImm: "#"^lsb is lsb { export *[const]:4 lsb; } +msbImm: "#"^msb is msb { export *[const]:4 msb; } +widthMinus1: "#"^width is msb [ width = msb + 1; ] { export *[const]:4 msb; } +bitWidth: "#"^w is lsb & msb [ w = msb - lsb + 1; ] { export *[const]:4 w; } + +@endif # VERSION_6T2 || VERSION_7 + +# +# Modes for SRS instructions +# +@if defined(VERSION_6) +SRSMode: "usr" is srsMode=8 & c0004 { export *[const]:1 c0004; } +SRSMode: "fiq" is srsMode=9 & c0004 { export *[const]:1 c0004; } +SRSMode: "irq" is srsMode=10 & c0004 { export *[const]:1 c0004; } +SRSMode: "svc" is srsMode=11 & c0004 { export *[const]:1 c0004; } +SRSMode: "mon" is srsMode=14 & c0004 { export *[const]:1 c0004; } +SRSMode: "abt" is srsMode=15 & c0004 { export *[const]:1 c0004; } +SRSMode: "und" is srsMode=19 & c0004 { export *[const]:1 c0004; } +SRSMode: "sys" is srsMode=23 & c0004 { export *[const]:1 c0004; } +SRSMode: "#"^srsMode is srsMode { export *[const]:1 srsMode; } +@endif # VERSION_6 + +# Add a hat instruction to set the ARMcond context variable which +# tells whether this is a legal conditional instruction (for v7 and +# later). + +@if defined(VERSION_6T2) || defined(VERSION_7) +:^instruction is ARMcondCk=0 & itmode=0 & TMode=0 & (bit31=0|bit30=0|bit29=0|bit28=0) & instruction [ ARMcondCk=1; ARMcond=1; ] {} +:^instruction is ARMcondCk=0 & instruction [ ARMcondCk=1; ARMcond=0; ] {} + +# Ensure one of the recursive rules above is applied for assembly +with : ARMcondCk=1 { + +@endif + +################################################# +# +# Include the SIMD/VFP instructions before the +# other ARM instructions to avoid incorrect +# constructor matching for those that use the +# COND subconstructor. This also ensures +# that the various VFP instructions supersede the +# CDP/MCR/MRC general coprocessor instructions +# +################################################# +@if defined(INCLUDE_NEON) +@include "ARMneon.sinc" +@endif + +################################################# +# +# Do the same now for ARMv8, which also has neon +# +################################################# +@if defined(VERSION_8) +@include "ARMv8.sinc" +@endif # VERSION_8 + +################################################ +# +# These instructions must come first because the cond pattern match +# is more specific than the subconstructor COND. If a base intruction +# matches and then COND fails (cond=14 or cond=15) then the disassembly +# will fail +# +################################################ + +@if defined(VERSION_5) + +# Exception Generation and UDF + +# immed12_4 used in Exception Generation and Media instructions class + +immed12_4: "#"^tmp is $(AMODE) & immed12 & immed4 [tmp = (immed12 << 4) | immed4; ] { export *[const]:4 tmp; } + +:hlt immed12_4 is $(AMODE) & cond=0xe & c2027=0x10 & c0407=0x7 & immed12_4 +{ + software_hlt(immed12_4); +} + +:bkpt immed12_4 is $(AMODE) & cond=0xe & c2027=0x12 & c0407=0x7 & immed12_4 +{ + software_bkpt(immed12_4); +} + +:hvc immed12_4 is $(AMODE) & cond=0xe & c2027=0x14 & c0407=0x7 & immed12_4 +{ + software_hvc(immed12_4); +} + +@if defined(VERSION_6T2) || defined(VERSION_7) + +define pcodeop SG; + +:sg is TMode=1 & thv_c0031=0xe97fe97f +{ + SG(); +} +@endif + +# Requires Security Extensions +:smc^COND immed4 is $(AMODE) & COND & c2027=0x16 & c0407=0x7 & immed4 +{ + build COND; + software_smc(immed4:4); +} + +@if defined(VERSION_6T2) || defined(VERSION_7) +define pcodeop TT; + +:tt^ItCond thv_Rt2, thv_Rn is TMode=1 & ItCond & thv_c2031=0b111010000100 & thv_c1215=0b1111 & thv_bit07=0 & thv_bit06=0 & thv_Rt2 & thv_Rn +{ + thv_Rt2 = TT(thv_Rn); +} + +define pcodeop TTA; + +:tta^ItCond thv_Rt2, thv_Rn is TMode=1 & ItCond & thv_c2031=0b111010000100 & thv_c1215=0b1111 & thv_bit07=1 & thv_bit06=0 & thv_Rt2 & thv_Rn +{ + thv_Rt2 = TTA(thv_Rn); +} + +define pcodeop TTAT; + +:ttat^ItCond thv_Rt2, thv_Rn is TMode=1 & ItCond & thv_c2031=0b111010000100 & thv_c1215=0b1111 & thv_bit07=1 & thv_bit06=1 & thv_Rt2 & thv_Rn +{ + thv_Rt2 = TTAT(thv_Rn); +} + +define pcodeop TTT; + +:ttt^ItCond thv_Rt2, thv_Rn is TMode=1 & ItCond & thv_c2031=0b111010000100 & thv_c1215=0b1111 & thv_bit07=0 & thv_bit06=1 & thv_Rt2 & thv_Rn +{ + thv_Rt2 = TTT(thv_Rn); +} + +@endif + +:udf immed12_4 is $(AMODE) & cond=0xe & c2027=0x7f & c0407=0xf & immed12_4 +{ + local excaddr:4 = inst_start; + local target:4 = software_udf(immed12_4:4, excaddr); + goto [target]; +} + +@endif # VERSION_5 + +@if defined(VERSION_6) + +AFLAG: "a" is c0808=1 & c1819=2 { enableDataAbortInterrupts(); } +AFLAG: "a" is c0808=1 { disableDataAbortInterrupts(); } +AFLAG: is c0808=0 { } +IFLAG: "i" is c0707=1 & c1819=2 { enableIRQinterrupts(); } +IFLAG: "i" is c0707=1 { disableIRQinterrupts(); } +IFLAG: is c0707=0 { } +FFLAG: "f" is c0606=1 & c1819=2 { enableFIQinterrupts(); } +FFLAG: "f" is c0606=1 { disableFIQinterrupts(); } +FFLAG: is c0606=0 { } +IFLAGS: AFLAG^IFLAG^FFLAG is AFLAG & IFLAG & FFLAG { } + +SetMode: "#"^16 is c0004=0x10 { setUserMode(); } +SetMode: "#"^17 is c0004=0x11 { setFIQMode(); } +SetMode: "#"^18 is c0004=0x12 { setIRQMode(); } +SetMode: "#"^19 is c0004=0x13 { setSupervisorMode(); } +SetMode: "#"^22 is c0004=0x16 { setMonitorMode(); } +SetMode: "#"^23 is c0004=0x17 { setAbortMode(); } +SetMode: "#"^27 is c0004=0x1b { setUndefinedMode(); } +SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } + +:cps SetMode is $(AMODE) & cond=15 & c2027=16 & c1819=0 & c1717=1 & c0916=0 & c0508=0 & SetMode { } +:cpsie IFLAGS is $(AMODE) & cond=15 & c2027=16 & c1819=2 & c1717=0 & c0916=0 & c0505=0 & c0004=0 & IFLAGS { } +:cpsid IFLAGS is $(AMODE) & cond=15 & c2027=16 & c1819=3 & c1717=0 & c0916=0 & c0505=0 & c0004=0 & IFLAGS { } +:cpsie IFLAGS, SetMode is $(AMODE) & cond=15 & c2027=16 & c1819=2 & c1717=1 & c0916=0 & c0505=0 & IFLAGS & SetMode { } +:cpsid IFLAGS, SetMode is $(AMODE) & cond=15 & c2027=16 & c1819=3 & c1717=1 & c0916=0 & c0505=0 & IFLAGS & SetMode { } + +@endif # VERSION_6 + +@if defined(VERSION_5E) + +:pld addrmode2 is $(AMODE) & cond=0xf & c2627=1 & c2424=1 & c2022=5 & c1215=0xf & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + HintPreloadData(addrmode2); +} + +# prevent literal form getting matched by pldw +:pld addrmode2 is $(AMODE) & cond=0xf & c2627=1 & c2424=1 & c2022=5 & c1619=0xf & c1215=0xf & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + HintPreloadData(addrmode2); +} + +@endif # VERSION_5E + +@if defined(VERSION_7) + +:pldw addrmode2 is $(AMODE) & cond=0xf & c2627=1 & c2424=1 & c2022=1 & c1215=0xf & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + HintPreloadDataForWrite(addrmode2); +} + +:pli addrmode2 is $(AMODE) & cond=0xf & c2627=1 & c2424=0 & c2022=5 & c1215=0xf & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + HintPreloadInstruction(addrmode2); +} + +@endif # VERSION_7 + + +@if defined(VERSION_6) + +:rfeia rn is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=1 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 +{ + # register list is always: pc, cpsr + ptr:4 = rn; + cpsr = *ptr; + ptr = ptr + 4; + pc = *ptr; + return [pc]; +} + +:rfeib rn is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=1 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 +{ + # register list is always: pc, cpsr + ptr:4 = rn + 4; + cpsr = *ptr; + ptr = ptr + 4; + pc = *ptr; + return [pc]; +} + +:rfeda rn is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=0 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 +{ + # register list is always: pc, cpsr + ptr:4 = rn; + cpsr = *ptr; + ptr = ptr - 4; + pc = *ptr; + return [pc]; +} + +:rfedb rn is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=0 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 +{ + # register list is always: pc, cpsr + ptr:4 = rn - 4; + cpsr = *ptr; + ptr = ptr - 4; + pc = *ptr; + return [pc]; +} + +:rfeia Rn! is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=1 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 +{ + # register list is always: pc, cpsr + ptr:4 = Rn; + cpsr = *ptr; + ptr = ptr + 4; + pc = *ptr; + Rn = ptr + 4; + return [pc]; +} + +:rfeib Rn! is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=1 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 +{ + # register list is always: pc, cpsr + ptr:4 = Rn + 4; + cpsr = *ptr; + ptr = ptr + 4; + pc = *ptr; + Rn = ptr; + return [pc]; +} + +:rfeda Rn! is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=0 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 +{ + # register list is always: pc, cpsr + ptr:4 = Rn; + cpsr = *ptr; + ptr = ptr - 4; + pc = *ptr; + Rn = ptr - 4; + return [pc]; +} + +:rfedb Rn! is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=0 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 +{ + # register list is always: pc, cpsr + ptr:4 = Rn - 4; + cpsr = *ptr; + ptr = ptr - 4; + pc = *ptr; + Rn = ptr; + return [pc]; +} + +:srsia SRSMode is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=1 & S22=1 & W21=0 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +{ + # register list is always: r14, spsr + ptr:4 = sp; + *ptr = lr; + ptr = ptr + 4; + *ptr = spsr; + ptr = ptr + 4; +} + +:srsib SRSMode is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=1 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +{ + # register list is always: r14, spsr + ptr:4 = sp + 4; + *ptr = lr; + ptr = ptr + 4; + *ptr = spsr; +} + +:srsda SRSMode is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=0 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +{ + # register list is always: r14, spsr + ptr:4 = sp; + *ptr = lr; + ptr = ptr - 4; + *ptr = spsr; + ptr = ptr - 4; +} + +:srsdb SRSMode is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=0 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +{ + # register list is always: r14, spsr + ptr:4 = sp - 4; + *ptr = lr; + ptr = ptr - 4; + *ptr = spsr; +} + +:srsia SRSMode! is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=1 & S22=1 & W21=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +{ + # register list is always: r14, spsr + ptr:4 = sp; + *ptr = lr; + ptr = ptr + 4; + *ptr = spsr; + ptr = ptr + 4; + sp = ptr; +} + +:srsib SRSMode! is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=1 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +{ + # register list is always: r14, spsr + ptr:4 = sp + 4; + *ptr = lr; + ptr = ptr + 4; + *ptr = spsr; + sp = ptr; +} + +:srsda SRSMode! is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=0 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +{ + # register list is always: r14, spsr + ptr:4 = sp; + *ptr = lr; + ptr = ptr - 4; + *ptr = spsr; + ptr = ptr - 4; + sp = ptr; +} + +:srsdb SRSMode! is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=0 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +{ + # register list is always: r14, spsr + ptr:4 = sp; + ptr = ptr - 4; + *ptr = lr; + ptr = ptr - 4; + *ptr = spsr; + sp = ptr; +} + +@endif # VERSION_6 + +@if defined(VERSION_5) + +:stc2 cpn,CRd,addrmode5 is $(AMODE) & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=0 +{ + t_cpn:4 = cpn; + coprocessor_store2(t_cpn,CRd,addrmode5); +} + +:stc2l cpn,CRd,addrmode5 is $(AMODE) & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=0 +{ + t_cpn:4 = cpn; + coprocessor_storelong2(t_cpn,CRd,addrmode5); +} + +@endif # VERSION_5 + +################################################# +# +# Here are the rest of instructions in alphabetical order +# +################################################# + +#See ARM Architecture reference section "Pseudocode details of addition and subtraction" +macro add_with_carry_flags(op1,op2){ + local CYz = zext(CY); + local result = op1 + op2; + tmpCY = carry( op1, op2) || carry( result, CYz ); + tmpOV = scarry( op1, op2 ) ^^ scarry( result, CYz ); +} + +#Note: used for subtraction op1 - (op2 + !CY) +#sets tmpCY if there is NO borrow +macro sub_with_carry_flags(op1, op2){ + local result = op1 - op2; + tmpCY = (op1 > op2) || (result < zext(CY)); + tmpOV = sborrow(op1,op2) ^^ sborrow(result,zext(!CY)); +} + + +:adc^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + add_with_carry_flags(rn,shift1); + Rd = rn+shift1+zext(CY); + resultflags(Rd); + build SBIT_CZNO; +} + +:adc^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + add_with_carry_flags(rn,shift2); + Rd = rn+shift2+zext(CY); + resultflags(Rd); + build SBIT_CZNO; +} + +:adc^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + add_with_carry_flags(rn,shift3); + Rd = rn+shift3+zext(CY); + resultflags(Rd); + build SBIT_CZNO; +} + +:adc^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + add_with_carry_flags(rn,shift1); + dest:4 = rn + shift1 + zext(CY); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:adc^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + add_with_carry_flags(rn,shift2); + dest:4 = rn + shift2 + zext(CY); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:adc^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + add_with_carry_flags(rn,shift3); + dest:4 = rn + shift3 + zext(CY); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +ArmPCRelImmed12: reloff is U23=1 & immed & rotate + [ reloff = inst_start + 8 + ( ((immed<<(32-rotate*2))|(immed>>(rotate*2))) $and 0xffffffff); ] +{ + export *[const]:4 reloff; +} + +ArmPCRelImmed12: reloff is U23=0 & immed & rotate + [ reloff = inst_start + 8 - ( ((immed<<(32-rotate*2))|(immed>>(rotate*2))) $and 0xffffffff); ] +{ + export *[const]:4 reloff; +} + +# +# ADR constructors must appear before ADD constructors to give ADR parsing precedence +# + +:adr^COND Rd,ArmPCRelImmed12 is $(AMODE) & COND & c2527=1 & (c2024=8 | c2024=4) & Rn=15 & Rd & ArmPCRelImmed12 +{ + build COND; + Rd = ArmPCRelImmed12; +} + +:adr^COND pc,ArmPCRelImmed12 is $(AMODE) & COND & c2527=1 & (c2024=8 | c2024=4) & Rn=15 & Rd=15 & pc & ArmPCRelImmed12 +{ + build COND; + dest:4 = ArmPCRelImmed12; + ALUWritePC(dest); + goto [pc]; +} + + +:add^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + addflags(rn,shift1); + Rd = rn + shift1; + resultflags(Rd); + build SBIT_CZNO; +} + +:add^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + addflags(rn,shift2); + Rd = rn + shift2; + resultflags(Rd); + build SBIT_CZNO; +} + +:add^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + addflags(rn,shift3); + Rd = rn + shift3; + resultflags(Rd); + build SBIT_CZNO; +} + +:add^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + addflags(rn,shift1); + dest:4 = rn + shift1; + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:add^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + addflags(rn,shift2); + dest:4 = rn + shift2; + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:add^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + addflags(rn,shift3); + dest:4 = rn + shift3; + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:and^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + Rd = rn & shift1; + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:and^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + Rd = rn & shift2; + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:and^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + Rd = rn & shift3; + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:and^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + dest:4 = rn & shift1; + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:and^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + dest:4 = rn & shift2; + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:and^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + dest:4 = rn & shift3; + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +# must match first! before conditional goto + +:b Addr24 is $(AMODE) & cond=14 & c2527=5 & L24=0 & Addr24 +{ + goto Addr24; +} + +:b^cc Addr24 is $(AMODE) & cc & c2527=5 & L24=0 & Addr24 +{ + if (cc) goto Addr24; +} + + + +@if defined(VERSION_6T2) + +:bfc^COND Rd,lsbImm,bitWidth is $(AMODE) & COND & c2127=0x3e & msbImm & Rd & lsbImm & bitWidth & c0006=0x1f { + build COND; + build lsbImm; + build msbImm; + build bitWidth; + clearMask:4 = (-1 << (msbImm + 1)) | (-1 >> (32 - lsbImm)); + Rd = Rd & clearMask; +} + +:bfi^COND Rd,Rm,lsbImm,bitWidth is $(AMODE) & COND & c2127=0x3e & msbImm & Rd & Rm & lsbImm & bitWidth & c0406=1 { + build COND; + build lsbImm; + build msbImm; + build bitWidth; + clearMask:4 = (-1 << (msbImm + 1)) | (-1 >> (32 - lsbImm)); + bits:4 = (Rm << lsbImm) & ~clearMask; + Rd = (Rd & clearMask) | bits; +} + +@endif # VERSION_6T2 + +:bic^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + Rd = rn&(~shift1); + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:bic^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + Rd = rn&(~shift2); + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:bic^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + Rd = rn&(~shift3); + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:bic^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + dest:4 = rn&(~shift1); + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:bic^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + dest:4 = rn&(~shift2); + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:bic^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + dest:4 = rn&(~shift3); + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +# bl used as a PIC instruction to get at current PC in lr +:bl Addr24 is $(AMODE) & cond=14 & c2527=5 & L24=1 & immed24=0xffffff & Addr24 +{ + lr = inst_next; + goto Addr24; +} + +:bl Addr24 is $(AMODE) & cond=14 & c2527=5 & L24=1 & Addr24 +{ + lr = inst_next; + call Addr24; +} + +:bl^COND Addr24 is $(AMODE) & CALLoverride=0 & COND & c2527=5 & L24=1 & Addr24 +{ + build COND; + build Addr24; + lr = inst_next; + call Addr24; +} + +:bl^COND Addr24 is $(AMODE) & CALLoverride=1 & COND & c2527=5 & L24=1 & Addr24 +{ + build COND; + build Addr24; + lr = inst_next; + goto Addr24; +} + +# blx(1) instruction +@if defined(T_VARIANT) && defined(VERSION_5) + +# Two forms of blx needed to distinguish from b +:blx HAddr24 is $(AMODE) & CALLoverride=0 & cond=15 & c2527=5 & H24=0 & HAddr24 +{ + lr = inst_next; + SetThumbMode(1); + call HAddr24; + # don't do causes decompiler trouble TB = 0; +} # Always changes to THUMB mode + +:blx HAddr24 is $(AMODE) & CALLoverride=1 & cond=15 & c2527=5 & H24=0 & HAddr24 +{ + lr = inst_next; + SetThumbMode(1); + goto HAddr24; +} # Always changes to THUMB mode + + +:blx HAddr24 is $(AMODE) & CALLoverride=0 & cond=15 & c2527=5 & H24=1 & HAddr24 +{ + lr = inst_next; + SetThumbMode(1); + call HAddr24; + # don't do causes decompiler trouble TB = 0; +} # Always changes to THUMB mode + +:blx HAddr24 is $(AMODE) & CALLoverride=1 & cond=15 & c2527=5 & H24=1 & HAddr24 +{ + lr = inst_next; + SetThumbMode(1); + goto HAddr24; +} # Always changes to THUMB mode + +@endif # T_VARIANT && VERSION_5 + +@if defined(VERSION_5) + +:blx^COND rm is $(AMODE) & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=3 & rm +{ + build COND; + build rm; + BXWritePC(rm); + lr=inst_next; + call [pc]; +# don't do causes decompiler trouble TB = 0; +} # Optional THUMB + +:blx^COND rm is $(AMODE) & CALLoverride=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=3 & rm +{ + build COND; + build rm; + BXWritePC(rm); + lr=inst_next; + goto [pc]; +} # Optional THUMB + +@endif # VERSION_5 + +@if defined(VERSION_5_or_T) + +# if branching using lr, assume return +:bx^COND rm is $(AMODE) & REToverride=0 & LRset=0 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm=14 +{ + build COND; + build rm; + BXWritePC(rm); + return [pc]; +} # Optional change to THUMB + +:bx^COND rm is $(AMODE) & REToverride=0 & LRset=0 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm +{ + build COND; + build rm; + BXWritePC(rm); + goto [pc]; +} # Optional change to THUMB + +# if lr has just been set, assume call +:bx^COND rm is $(AMODE) & REToverride=0 & LRset=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm +{ + build COND; + build rm; + BXWritePC(rm); + call [pc]; +} # Optional change to THUMB + +:bx^COND rm is $(AMODE) & REToverride=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm +{ + build COND; + build rm; + BXWritePC(rm); + goto [pc]; +} # Optional change to THUMB + +#:bx^COND lr is $(AMODE) & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & Rm=14 & lr +#{ +# build COND; +# TB=(lr&0x00000001)!=0; +# tmp = lr & 0xfffffffe; +# return [tmp]; +#} # Optional change to THUMB + +@endif # VERSION_5_or_T + +@if defined(VERSION_6) + +# bxj behaves like bx except that Jazelle state is enabled if available (added with Version-5 J-variant) + +:bxj^COND rm is $(AMODE) & REToverride=0 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm +{ + build COND; + build rm; + success:1 = jazelle_branch(); + if (success) goto ; + BXWritePC(rm); + return [pc]; + +} # Optional change to THUMB + +# if branching using "ip" then is a goto +:bxj^COND rm is $(AMODE) & REToverride=0 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm & Rm=12 +{ + build COND; + build rm; + success:1 = jazelle_branch(); + if (success) goto ; + BXWritePC(rm); + goto [pc]; + +} # Optional change to THUMB + +:bxj^COND rm is $(AMODE) & REToverride=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm +{ + build COND; + build rm; + success:1 = jazelle_branch(); + if (success) goto ; + BXWritePC(rm); + goto [pc]; + +} # Optional change to THUMB + +@endif # VERSION_6 + +@if defined(VERSION_5) + +:cdp2 cpn,opcode1,CRd,CRn,CRm,opcode2 is $(AMODE) & cond=15 & c2427=14 & opcode1 & CRn & CRd & cpn & opcode2 & c0404=0 & CRm +{ + t_cpn:4 = cpn; + t_op1:4 = opcode1; + t_op2:4 = opcode2; + coprocessor_function2(t_cpn,t_op1,t_op2,CRd,CRn,CRm); +} + +@endif # VERSION_5 + +:cdp^COND cpn,opcode1,CRd,CRn,CRm,opcode2 is $(AMODE) & COND & c2427=14 & opcode1 & CRn & CRd & cpn & opcode2 & c0404=0 & CRm +{ + build COND; + t_cpn:4 = cpn; + t_op1:4 = opcode1; + t_op2:4 = opcode2; + coprocessor_function(t_cpn,t_op1,t_op2,CRd,CRn,CRm); +} + +@if defined(VERSION_6K) || defined(VERSION_7) + +:clrex is $(AMODE) & c0031=0xf57ff01f { + ClearExclusiveLocal(); +} + +@endif # VERSION_6K + +@if defined(VERSION_5) + +:clz^COND Rd,rm is $(AMODE) & COND & c2027=22 & c1619=15 & Rd & c0811=15 & c0407=1 & rm +{ + build COND; + build rm; + Rd = count_leading_zeroes(rm); +} + +@endif # VERSION_5 + +:cmn^COND rn,shift1 is $(AMODE) & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + addflags(rn,shift1); + local tmp = rn + shift1; + resultflags(tmp); + affectflags(); +} + +:cmn^COND rn,shift2 is $(AMODE) & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + addflags(rn,shift2); + local tmp = rn + shift2; + resultflags(tmp); + affectflags(); +} + +:cmn^COND rn,shift3 is $(AMODE) & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + addflags(rn,shift3); + local tmp = rn + shift3; + resultflags(tmp); + affectflags(); +} + +:cmp^COND rn,shift1 is $(AMODE) & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + subflags(rn,shift1); + local tmp = rn - shift1; + resultflags(tmp); + affectflags(); +} + +:cmp^COND rn,shift2 is $(AMODE) & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + subflags(rn,shift2); + local tmp = rn - shift2; + resultflags(tmp); + affectflags(); +} + +:cmp^COND rn,shift3 is $(AMODE) & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + subflags(rn,shift3); + local tmp = rn - shift3; + resultflags(tmp); + affectflags(); +} + +@if defined(VERSION_6) + +# cpy is a pre-UAL synonym for mov +:cpy^COND pc,rm is $(AMODE) & COND & pc & c2027=0x1a & c1619=0 & c0411=0 & Rd=15 & rm +{ + build COND; + build rm; + BXWritePC(rm); + goto [pc]; +} + +:cpy^COND lr,rm is $(AMODE) & COND & c2027=0x1a & c1619=0 & c0411=0 & Rd=14 & lr & rm & Rm2=15 + [ LRset=1; globalset(inst_next,LRset); ] +{ + build COND; + lr = rm; +} + +:cpy^COND Rd,rm is $(AMODE) & COND & c2027=0x1a & c1619=0 & c0411=0 & Rd & rm +{ + build COND; + build rm; + Rd = rm; +} + +@endif # VERSION_6 + +@if defined(VERSION_6K) || defined(VERSION_6T2) + +:dbg^COND optionImm is $(AMODE) & COND & c0427=0x320f0f & optionImm { +@if defined(VERSION_7) + build COND; + build optionImm; + HintDebug(optionImm); +@endif # VERSION_7 +} + +@endif # VERSION_6K || VERSION_6T2 + +@if defined(VERSION_7) + + + +:dmb dbOption is $(AMODE) & c0431=0xf57ff05 & dbOption { + DataMemoryBarrier(dbOption:1); +} + +:dsb dbOption is $(AMODE) & c0431=0xf57ff04 & dbOption { + DataSynchronizationBarrier(dbOption:1); +} + +@endif # VERSION_7 + +:eor^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + Rd = rn^shift1; + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:eor^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + Rd = rn^shift2; + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:eor^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + Rd = rn^shift3; + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:eor^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + dest:4 = rn^shift1; + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:eor^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + dest:4 = rn^shift2; + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:eor^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + dest:4 = rn^shift3; + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +@if defined(VERSION_7) + + +:isb ibOption is $(AMODE) & c0431=0xf57ff06 & ibOption { + InstructionSynchronizationBarrier(ibOption:1); +} + +@endif # VERSION_7 + +### These must come first, because of cond=15 match +@if defined(VERSION_5) + +:ldc2 cpn,CRd,addrmode5 is $(AMODE) & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=1 +{ + t_cpn:4 = cpn; + coprocessor_load2(t_cpn,CRd,addrmode5); +} + +:ldc2l cpn,CRd,addrmode5 is $(AMODE) & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=1 +{ + t_cpn:4 = cpn; + coprocessor_loadlong2(t_cpn,CRd,addrmode5); +} + +@endif # VERSION_5 +######## cond=15 match + +:ldc^COND cpn,CRd,addrmode5 is $(AMODE) & COND & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=1 +{ + build COND; + build addrmode5; + t_cpn:4 = cpn; + coprocessor_load(t_cpn,CRd,addrmode5); +} + +:ldcl^COND cpn,CRd,addrmode5 is $(AMODE) & COND & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=1 +{ + build COND; + build addrmode5; + t_cpn:4 = cpn; + coprocessor_loadlong(t_cpn,CRd,addrmode5); +} + +:ldm^mdir^COND reglist is $(AMODE) & COND & c2527=4 & mdir & L20=1 & c1515=0 & reglist +{ + build COND; + build reglist; +} + +:ldm^mdir^COND reglist is $(AMODE) & COND & c2527=4 & mdir & L20=1 & c1515=1 & reglist +{ + build COND; + build reglist; + LoadWritePC(pc); + return [pc]; +} + +#:ldr^COND Rd,addrmode2 is $(AMODE) & COND & B22=0 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +#{ +# build COND; +# build addrmode2; +# tmp:4=addrmode2&0xfffffffc; +# tmp2:4=(addrmode2&3)<<3; +# Rd=*tmp; +# Rd = (Rd >> tmp2) | (Rd << (32-tmp2)); +#} + +# The following form of ldr assumes alignment checking is on +:ldr^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + build COND; + build addrmode2; + Rd = *addrmode2; +} + +# Two forms of ldr with destination=pc needed to distinguish from ldrt +:ldr^COND pc,addrmode2 is $(AMODE) & pc & COND & LRset=1 & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=1 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 + [ LRset=0; globalset(inst_next,LRset); ] +{ + build COND; + build addrmode2; + dest:4=*addrmode2; + SetThumbMode((dest&0x00000001)!=0); + pc=dest&0xfffffffe; + call [pc]; + SetThumbMode(0); +} # No unaligned address + +:ldr^COND pc,addrmode2 is $(AMODE) & pc & COND & LRset=1 & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=0 & W21=0 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 + [ LRset=0; globalset(inst_next,LRset); ] +{ + build COND; + build addrmode2; + dest:4=*addrmode2; + SetThumbMode((dest&0x00000001)!=0); + pc=dest&0xfffffffe; + call [pc]; + SetThumbMode(0); +} # No unaligned address + +# Two forms of ldr with destination=pc needed to distinguish from ldrt +:ldr^COND pc,addrmode2 is $(AMODE) & pc & COND & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=1 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + build COND; + build addrmode2; + dest:4=*addrmode2; + BXWritePC(dest); + goto [pc]; +} # No unaligned address + +:ldr^COND pc,addrmode2 is $(AMODE) & pc & COND & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=0 & W21=0 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + build COND; + build addrmode2; + dest:4=*addrmode2; + BXWritePC(dest); + goto [pc]; +} # No unaligned address + +:ldrb^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=1 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + build COND; + build addrmode2; + Rd = zext( *:1 addrmode2); +} + +:ldrbt^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=1 & L20=1 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + build COND; + build addrmode2; + Rd = zext( *:1 addrmode2); +} + +@if defined(VERSION_5E) + +:ldrd^COND Rd,Rd2,addrmode3 is $(AMODE) & COND & c2527=0 & c0407=13 & c1212=0 & L20=0 & Rd & Rd2 & addrmode3 +{ + build COND; + build addrmode3; + Rd = *(addrmode3); + Rd2 = *(addrmode3+4); +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +:ldrex^COND Rd,[Rn] is $(AMODE) & COND & c2027=0x19 & Rn & Rd & c0011=0xf9f +{ + build COND; + Rd = *Rn; +} + +@endif # VERSION_6 + +@if defined(VERSION_6K) + +:ldrexb^COND Rd,[Rn] is $(AMODE) & COND & c2027=0x1d & Rn & Rd & c0011=0xf9f +{ + build COND; + Rd = zext(*:1 Rn); +} + +:ldrexd^COND Rd,Rd2,[Rn] is $(AMODE) & COND & c2027=0x1b & Rn & Rd & Rd2 & c0011=0xf9f +{ + build COND; + Rd = *(Rn); + Rd2 = *(Rn + 4); +} + +:ldrexh^COND Rd,[Rn] is $(AMODE) & COND & c2027=0x1f & Rn & Rd & c0011=0xf9f +{ + build COND; + Rd = zext(*:2 Rn); +} + +@endif # VERSION_6K + +:ldrh^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & L20=1 & c0407=11 & Rd & addrmode3 +{ + build COND; + build addrmode3; + Rd = zext( *:2 addrmode3); +} + +@if defined(VERSION_6T2) + +:ldrht^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=11 & Rd & addrmode3 { + build COND; + build addrmode3; + Rd = zext( *:2 addrmode3); +} + +@endif # VERSION_6T2 + +:ldrsb^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & L20=1 & c0407=13 & Rd & addrmode3 +{ + build COND; + build addrmode3; + Rd = sext( *:1 addrmode3); +} + +@if defined(VERSION_6T2) + +:ldrsbt^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=13 & Rd & addrmode3 { + build COND; + build addrmode3; + Rd = sext( *:1 addrmode3); +} + +@endif # VERSION_6T2 + +:ldrsh^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & L20=1 & c0407=15 & Rd & addrmode3 +{ + build COND; + build addrmode3; + Rd = sext( *:2 addrmode3); +} + +@if defined(VERSION_6T2) + +:ldrsht^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=15 & Rd & addrmode3 { + build COND; + build addrmode3; + Rd = sext( *:2 addrmode3); +} + +@endif # VERSION_6T2 + +# The following form of ldr assumes alignment checking is on +:ldrt^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=1 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + build COND; + build addrmode2; + Rd = *addrmode2; +} + + +###### must come first cond=15 +@if defined(VERSION_5) +:mcr2 cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & cond=15 & c2427=14 & opc1 & c2020=0 & CRn & Rd & cpn & opc2 & c0404=1 & CRm +{ + t_cpn:4 = cpn; + t_op1:4 = opc1; + t_op2:4 = opc2; + coprocessor_moveto(t_cpn,t_op1,t_op2,Rd,CRn,CRm); +} +@endif # VERSION_5 +###### must come first cond=15 + + +# ===== START mcr + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Main_ID(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Cache_Type(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_TCM_Status(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_TLB_Type(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Control(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Auxiliary_Control(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Coprocessor_Access_Control(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=1 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Secure_Configuration(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=1 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Secure_Debug_Enable(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=1 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_NonSecure_Access_Control(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Translation_table_base_0(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Translation_table_base_1(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Translation_table_control(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=3 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Domain_Access_Control(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=5 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Data_Fault_Status(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=5 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Instruction_Fault(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=6 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Fault_Address(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=6 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Instruction_Fault(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Wait_for_interrupt(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=5 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Invalidate_Entire_Instruction(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=5 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Invalidate_Instruction_Cache_by_MVA(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=5 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Flush_Prefetch_Buffer(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=6 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Invalidate_Entire_Data_cache(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=6 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Invalidate_Entire_Data_by_MVA(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=6 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Invalidate_Entire_Data_by_Index(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Clean_Entire_Data_Cache(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Clean_Data_Cache_by_MVA(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Clean_Data_Cache_by_Index(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Data_Synchronization(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2=5 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Data_Memory_Barrier(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=14 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Invalidate_Entire_Data_Cache(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=14 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Invalidate_Data_Cache_by_MVA(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=7 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Invalidate_unified_TLB_unlocked(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=7 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Invalidate_unified_TLB_by_MVA(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=7 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Invalidate_unified_TLB_by_ASID_match(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_FCSE_PID(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Context_ID(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_User_RW_Thread_and_Process_ID(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_User_R_Thread_and_Process_ID(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Privileged_only_Thread_and_Process_ID(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=2 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=15 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + coproc_moveto_Peripherial_Port_Memory_Remap(Rd); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opt2:4=opc2; + coproc_moveto_Feature_Identification(Rd,t_opt2); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=2 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opt2:4=opc2; + coproc_moveto_ISA_Feature_Identification(Rd,t_opt2); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=4 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=2 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + coproc_moveto_Peripheral_Port_Memory_Remap(Rd,t_opc2); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + coproc_moveto_Control_registers(Rd, t_opc2); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + coproc_moveto_Security_world_control(Rd, t_opc2); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + coproc_moveto_Translation_table(Rd,t_opc2); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=5 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + coproc_moveto_Instruction_cache(Rd,t_opc2); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + coproc_moveto_Data_cache_operations(Rd,t_opc2); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; t_crm:4 = CRm; + coproc_moveto_Identification_registers(Rd,t_opc2,t_crm); +} + + +:mcr^COND mcrOperands is + $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=15 & c2020=0 & opc1 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; t_crm:4 = CRm; t_op1:4 = opc1; + coproc_moveto_Peripheral_System(Rd,t_opc2,t_crm,t_op1); +} + + +# ===== END mcr + +:mcr^COND cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & COND & c2427=14 & opc1 & c2020=0 & CRn & Rd & cpn & opc2 & c0404=1 & CRm +{ + build COND; + t_cpn:4 = cpn; + t_op1:4 = opc1; + t_op2:4 = opc2; + coprocessor_moveto(t_cpn,t_op1,t_op2,Rd,CRn,CRm); +} + +##### must come first cond=15 +@if defined(VERSION_6) +:mcrr2 cpn,opcode3,Rd,Rn,CRm is $(AMODE) & cond=15 & c2027=0xc4 & cpn & opcode3 & Rd & Rn & CRm +{ + t_cpn:4 = cpn; + t_op:4 = opcode3; + coprocessor_moveto2(t_cpn,t_op,Rd,Rn,CRm); +} + +:mrrc2 cpn,opcode3,Rd,Rn,CRm is $(AMODE) & cond=15 & c2027=0xc5 & cpn & opcode3 & Rd & Rn & CRm +{ + t_cpn:4 = cpn; + t_op:4 = opcode3; + Rd = coprocessor_movefromRt(t_cpn,t_op,CRm); + Rn = coprocessor_movefromRt2(t_cpn,t_op,CRm); +} +@endif # VERSION_6 +##### must come first cond=15 + + +@if defined(VERSION_5E) + +:mcrr^COND cpn,opcode3,Rd,Rn,CRm is $(AMODE) & c2027=0xc4 & COND & cpn & opcode3 & Rd & Rn & CRm +{ + build COND; + t_cpn:4 = cpn; + t_op:4 = opcode3; + coprocessor_moveto2(t_cpn,t_op,Rd,Rn,CRm); +} + +:mrrc^COND cpn,opcode3,Rd,Rn,CRm is $(AMODE) & c2027=0xc5 & COND & cpn & opcode3 & Rd & Rn & CRm +{ + build COND; + t_cpn:4 = cpn; + t_op:4 = opcode3; + Rd = coprocessor_movefromRt(t_cpn,t_op,CRm); + Rn = coprocessor_movefromRt2(t_cpn,t_op,CRm); +} + +@endif # VERSION_5E + +:mla^COND^SBIT_ZN Rn,Rm,Rs,Rd is $(AMODE) & COND & c2527=0 & c2124=1 & SBIT_ZN & Rn & Rd & Rs & c0407=9 & Rm +{ + build COND; + Rn = Rm*Rs + Rd; + resultflags(Rn); + build SBIT_ZN; +} + +@if defined(VERSION_6T2) + +:mls^COND Rn,Rm,Rs,Rd is $(AMODE) & COND & c2027=0x06 & Rn & Rd & Rs & c0407=9 & Rm { + build COND; + Rn = Rd - Rm*Rs; +} + +@endif # VERSION_6T2 + +:mov^COND^SBIT_CZNO Rd,shift1 is $(AMODE) & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift1 +{ + build COND; + build shift1; + Rd = shift1; + resultflags(Rd); + logicflags(); + build SBIT_CZNO; +} + +:mov^COND^SBIT_CZNO Rd,shift2 is $(AMODE) & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift2 +{ + build COND; + build shift2; + Rd = shift2; + resultflags(Rd); + logicflags(); + build SBIT_CZNO; +} + +:mov lr,pc is $(AMODE) & c0031=0xe1a0e00f & lr & pc + [ LRset=1; globalset(inst_next,LRset); ] +{ + lr = inst_next + 4; + resultflags(lr); + logicflags(); +} + +:mov^COND^SBIT_CZNO Rd,shift3 is $(AMODE) & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift3 +{ + build COND; + build shift3; + Rd = shift3; + resultflags(Rd); + logicflags(); + build SBIT_CZNO; +} + +:mov^COND^SBIT_CZNO pc,shift1 is $(AMODE) & pc & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift1 +{ + build COND; + build shift1; + SetThumbMode((shift1&0x00000001)!=0); + local tmp=shift1&0xfffffffe; + resultflags(tmp); + logicflags(); + build SBIT_CZNO; + ALUWritePC(tmp); + goto [pc]; +} + +:mov^COND^SBIT_CZNO pc,shift2 is $(AMODE) & pc & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift2 +{ + build COND; + build shift2; + SetThumbMode((shift2&0x00000001)!=0); + local tmp=shift2&0xfffffffe; + resultflags(tmp); + logicflags(); + build SBIT_CZNO; + ALUWritePC(tmp); + goto [pc]; +} +:mov^COND^SBIT_CZNO pc,shift2 is $(AMODE) & LRset=1 & pc & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift2 +{ + build COND; + build shift2; + SetThumbMode((shift2&0x00000001)!=0); + local tmp=shift2&0xfffffffe; + resultflags(tmp); + logicflags(); + build SBIT_CZNO; + ALUWritePC(tmp); + call [pc]; +} + +:mov^COND^SBIT_CZNO pc,shift3 is $(AMODE) & pc & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift3 +{ + build COND; + build shift3; + SetThumbMode((shift3&0x00000001)!=0); + local tmp=shift3&0xfffffffe; + resultflags(tmp); + logicflags(); + build SBIT_CZNO; + ALUWritePC(tmp); + goto [pc]; +} + +:mov lr,rm is $(AMODE) & cond=15 & c2527=0 & S20=0 & c2124=13 & c1619=0 & rm & Rm2=15 & sftimm=0 & c0406=0 & Rd=14 & lr + [ LRset=1; globalset(inst_next,LRset); ] +{ + lr = rm; +} + +:mov^COND pc,lr is $(AMODE) & pc & COND & c2527=0 & S20=0 & c2124=13 & c1619=0 & Rd=15 & sftimm=0 & c0406=0 & Rm=14 & lr +{ + build COND; + dest:4 = lr; + ALUWritePC(dest); + return [pc]; +} + +@if defined(VERSION_6T2) + +:movw^COND Rd,"#"^val is $(AMODE) & COND & c2027=0x30 & c1619 & Rd & c0011 [ val = (c1619 << 12) | c0011; ] { + build COND; + Rd = val; +} + +:movt^COND Rd,"#"^val is $(AMODE) & COND & c2027=0x34 & c1619 & Rd & c0011 [ val = (c1619 << 12) | c0011; ] { + build COND; + Rd = (val << 16) | (Rd & 0xffff); +} + +@endif # VERSION_6T2 + +###### must come before next instruction because cond=15 +@if defined(VERSION_5) + + +:mrc2 cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & cond=15 & c2427=14 & opc1 & c2020=1 & CRn & Rd & cpn & opc2 & c0404=1 & CRm +{ + t_cpn:4 = cpn; + t_op1:4 = opc1; + t_op2:4 = opc2; + Rd = coprocessor_movefromRt(t_cpn,t_op1,t_op2,CRn,CRm); +} +@endif # VERSION_5 + +# ===== Start mrc + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Main_ID(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Cache_Type(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_TCM_Status(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_TLB_Type(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Control(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Auxiliary_Control(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Coprocessor_Access_Control(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=1 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Secure_Configuration(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=1 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Secure_Debug_Enable(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=1 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_NonSecure_Access_Control(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Translation_table_base_0(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Translation_table_base_1(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Translation_table_control(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=3 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Domain_Access_Control(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=5 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Instruction_Fault_Status(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=5 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Data_Fault_Status(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=6 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Fault_Address(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=6 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Instruction_Fault_Address(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Wait_for_interrupt(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=5 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Invalidate_Entire_Instruction(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=5 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Invalidate_Instruction_Cache_by_MVA(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=5 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Flush_Prefetch_Buffer(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=6 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Invalidate_Entire_Data_cache(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=6 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Invalidate_Entire_Data_by_MVA(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=6 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Invalidate_Entire_Data_by_Index(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Clean_Entire_Data_Cache(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Clean_Data_Cache_by_MVA(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Clean_Data_Cache_by_Index(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Data_Synchronization(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2=5 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Data_Memory_Barrier(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=14 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Invalidate_Entire_Data_Cache(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=14 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Invalidate_Data_Cache_by_MVA(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=7 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Invalidate_unified_TLB_unlocked(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=7 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Invalidate_unified_TLB_by_MVA(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=7 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Invalidate_unified_TLB_by_ASID_match(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_FCSE_PID(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Context_ID(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_User_RW_Thread_and_Process_ID(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_User_R_Thread_and_Process_ID(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Privileged_only_Thread_and_Process_ID(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=2 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=15 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + Rd = coproc_movefrom_Peripherial_Port_Memory_Remap(); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opt2:4=opc2; + Rd = coproc_movefrom_Feature_Identification(t_opt2); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=2 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + Rd = coproc_movefrom_ISA_Feature_Identification(t_opc2); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=4 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=2 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + Rd = coproc_movefrom_Peripheral_Port_Memory_Remap(t_opc2); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + Rd = coproc_movefrom_Control_registers(t_opc2); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + Rd = coproc_movefrom_Security_world_control(t_opc2); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + Rd = coproc_movefrom_Translation_table(t_opc2); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=5 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + Rd = coproc_movefrom_Instruction_cache(t_opc2); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm=10 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; + Rd = coproc_movefrom_Data_cache_operations(t_opc2); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; t_crm:4 = CRm; + Rd = coproc_movefrom_Identification_registers(t_opc2,t_crm); +} + + + +:mrc^COND mcrOperands is + $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=15 & c2020=1 & opc1 & c2427=14 & COND & + mcrOperands +{ + build COND; + t_opc2:4 = opc2; t_crm:4 = CRm; t_op1:4 = opc1; + Rd = coproc_movefrom_Peripheral_System(t_opc2,t_crm,t_op1); +} + + + +# ===== End mrc + +:mrc^COND cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & COND & c2427=14 & opc1 & c2020=1 & CRn & Rd & cpn & opc2 & c0404=1 & CRm +{ + build COND; + t_cpn:4 = cpn; + t_op1:4 = opc1; + t_opc2:4 = opc2; + Rd = coprocessor_movefromRt(t_cpn,t_op1,t_opc2,CRn,CRm); +} + + +:mrs^COND Rd,cpsr is $(AMODE) & COND & c2027=16 & c1619=15 & Rd & offset_12=0 & cpsr +{ +# TODO: GE bits have not been included + build COND; + Rd = zext( (NG<<4) | (ZR<<3) | (CY<<2) | (OV<<1) | (Q) ) << 27; +} + +:mrs^COND Rd,spsr is $(AMODE) & COND & c2027=20 & c1619=15 & Rd & offset_12=0 & spsr +{ + build COND; + Rd = spsr; +} + +:msr^COND cpsrmask,shift1 is $(AMODE) & COND & c2027=50 & cpsrmask & c1215=15 & c2627=0 & shift1 +{ + build COND; + build cpsrmask; + build shift1; + cpsr = (cpsr& ~cpsrmask) | (shift1 & cpsrmask); +} + +:msr^COND cpsrmask,rm is $(AMODE) & COND & c2027=18 & cpsrmask & c1215=15 & c0811=0 & c0407=0 & rm +{ +# TODO: GE bits have not been included + build COND; + build cpsrmask; + cpsr = (cpsr& ~cpsrmask) | (rm & cpsrmask); + local tmp = cpsr >> 27 & 0x1f; + Q = ((tmp ) & 0x1) != 0; + OV = ((tmp >> 1) & 0x1) != 0; + CY = ((tmp >> 2) & 0x1) != 0; + ZR = ((tmp >> 3) & 0x1) != 0; + NG = ((tmp >> 4) & 0x1) != 0; +} + +:msr^COND spsrmask,shift1 is $(AMODE) & COND & c2027=54 & spsrmask & c1215=15 & c2627=0 & shift1 +{ + build COND; + build spsrmask; + build shift1; + spsr = (spsr& ~spsrmask) | (shift1 & spsrmask); +} + +:msr^COND spsrmask,rm is $(AMODE) & COND & c2027=22 & spsrmask & c1215=15 & c0811=0 & c0407=0 & rm +{ + build COND; + build spsrmask; + spsr = (spsr& ~spsrmask) | (rm & spsrmask); +} + +:mul^COND^SBIT_ZN rn,rm,rs is $(AMODE) & COND & c2527=0 & c2124=0 & SBIT_ZN & rn & c1215=0 & rs & c0407=9 & rm +{ + build COND; + build rm; + build rs; + rn = rm*rs; + resultflags(rn); + build SBIT_ZN; +} + +:mvn^COND^SBIT_CZNO Rd,shift1 is $(AMODE) & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift1 +{ + build COND; + build shift1; + Rd=~shift1; + resultflags(Rd); + logicflags(); + build SBIT_CZNO; +} + +:mvn^COND^SBIT_CZNO Rd,shift2 is $(AMODE) & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift2 +{ + build COND; + build shift2; + Rd=~shift2; + resultflags(Rd); + logicflags(); + build SBIT_CZNO; +} + +:mvn^COND^SBIT_CZNO Rd,shift3 is $(AMODE) & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift3 +{ + build COND; + build shift3; + Rd=~shift3; + resultflags(Rd); + logicflags(); + build SBIT_CZNO; +} + +:mvn^COND^SBIT_ZN pc,shift1 is $(AMODE) & pc & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift1 +{ + build COND; + build shift1; + dest:4 = ~shift1; + resultflags(dest); + build SBIT_ZN; + ALUWritePC(dest); + goto [pc]; +} + +:mvn^COND^SBIT_ZN pc,shift2 is $(AMODE) & pc & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift2 +{ + build COND; + build shift2; + dest:4 = ~shift2; + resultflags(dest); + build SBIT_ZN; + ALUWritePC(dest); + goto [pc]; +} + +:mvn^COND^SBIT_ZN pc,shift3 is $(AMODE) & pc & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift3 +{ + build COND; + build shift3; + dest:4 = ~shift3; + resultflags(dest); + build SBIT_ZN; + ALUWritePC(dest); + goto [pc]; +} + +@if defined(VERSION_6K) || defined(VERSION_6T2) || defined(VERSION_7) + +:nop^COND is $(AMODE) & COND & c0027=0x320f000 { +} + +@endif # VERSION_6K + +:orr^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + Rd = rn|shift1; + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:orr^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + Rd = rn|shift2; + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:orr^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + Rd = rn|shift3; + logicflags(); + resultflags(Rd); + build SBIT_CZNO; +} + +:orr^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + dest:4 = rn|shift1; + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:orr^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + dest:4 = rn|shift2; + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:orr^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + dest:4 = rn|shift3; + logicflags(); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +@if defined(VERSION_6) + +:pkhbt^COND Rd,rn,shift4 is $(AMODE) & COND & c2027=0x68 & c0406=1 & Rd & rn & shift4 +{ + build COND; + build rn; + build shift4; + Rd = (rn & 0xffff) + (shift4 & 0xffff0000); +} + +:pkhtb^COND Rd,rn,shift4 is $(AMODE) & COND & c2027=0x68 & c0406=5 & Rd & rn & shift4 +{ + build COND; + build rn; + build shift4; + Rd = (shift4 & 0xffff) + (rn & 0xffff0000); +} + +@endif # VERSION_6 + +@if defined(VERSION_5E) + +:qadd^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=0x10 & Rn & Rd & c0811=0 & c0407=5 & Rm +{ + build COND; + local sum1 = Rm + Rn; + sum1 = SignedSaturate(sum1,32:2); + Q = SignedDoesSaturate(sum1,32:2); + Rd = sum1; +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +:qadd16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=1 & Rn & Rd & Rm +{ + build COND; + local lRn = Rn & 0xffff; + local lRm = Rm & 0xffff; + local uRn = (Rn >> 16) & 0xffff; + local uRm = (Rm >> 16) & 0xffff; + sum1:2 = lRn:2 + lRm:2; + sum1 = SignedSaturate(sum1,16:2); + sum2:2 = uRn:2 + uRm:2; + sum2 = SignedSaturate(sum2,16:2); + Rd = (zext(sum2) << 16) | zext(sum1); +} + +:qadd8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=9 & Rn & Rd & Rm +{ + build COND; + local rn1 = Rn & 0xff; + local rm1 = Rm & 0xff; + local rn2 = (Rn >> 8) & 0xff; + local rm2 = (Rm >> 8) & 0xff; + local rn3 = (Rn >> 16) & 0xff; + local rm3 = (Rm >> 16) & 0xff; + local rn4 = (Rn >> 24) & 0xff; + local rm4 = (Rm >> 24) & 0xff; + sum1:1 = rn1:1 + rm1:1; + sum1 = SignedSaturate(sum1,8:2); + sum2:1 = rn2:1 + rm2:1; + sum2 = SignedSaturate(sum2,8:2); + sum3:1 = rn3:1 + rm3:1; + sum3 = SignedSaturate(sum3,8:2); + sum4:1 = rn4:1 + rm4:1; + sum4 = SignedSaturate(sum4,8:2); + Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); +} + +# qaddsubx +:qasx^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=3 & Rn & Rd & Rm +{ + build COND; + local lRn = Rn & 0xffff; + local lRm = Rm & 0xffff; + local uRn = (Rn >> 16) & 0xffff; + local uRm = (Rm >> 16) & 0xffff; + sum1:2 = lRn:2 - lRm:2; + sum1 = SignedSaturate(sum1,16:2); + sum2:2 = uRn:2 + uRm:2; + sum2 = SignedSaturate(sum2,16:2); + Rd = (zext(sum2) << 16) | zext(sum1); +} + +@endif # VERSION_6 + +@if defined(VERSION_5E) + +:qdadd^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=0x14 & Rn & Rd & c0811=0 & c0407=5 & Rm +{ + build COND; + tmp:4 = Rn * 2; + tmp = SignedSaturate(tmp,32:2); + Q = SignedDoesSaturate(tmp,32:2); + tmp = tmp + Rm; + tmp = SignedSaturate(tmp,32:2); + Q = Q | SignedDoesSaturate(tmp,32:2); + Rd = tmp; +} + +:qdsub^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=0x16 & Rn & Rd & c0811=0 & c0407=5 & Rm +{ + build COND; + tmp:4 = Rn * 2; + tmp = SignedSaturate(tmp); + Q = SignedDoesSaturate(tmp,32:2); + tmp = Rm - tmp; + tmp = SignedSaturate(tmp,32:2); + Q = Q | SignedDoesSaturate(tmp,32:2); + Rd = tmp; +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +# qsubaddx +:qsax^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=5 & Rn & Rd & Rm +{ + build COND; + local lRn = Rn & 0xffff; + local lRm = Rm & 0xffff; + local uRn = (Rn >> 16) & 0xffff; + local uRm = (Rm >> 16) & 0xffff; + sum1:2 = lRn:2 + lRm:2; + sum1 = SignedSaturate(sum1,16:2); + sum2:2 = uRn:2 - uRm:2; + sum2 = SignedSaturate(sum2,16:2); + Rd = (zext(sum2) << 16) | zext(sum1); +} + +@endif # VERSION_6 + +@if defined(VERSION_5E) + +:qsub^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=0x12 & Rn & Rd & c0811=0 & c0407=5 & Rm +{ + build COND; + tmp:4 = Rm - Rn; + tmp = SignedSaturate(tmp,32:2); + Q = SignedDoesSaturate(tmp,32:2); + Rd = tmp; +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +:qsub16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=7 & Rn & Rd & Rm +{ + build COND; + local lRn = Rn & 0xffff; + local lRm = Rm & 0xffff; + local uRn = (Rn >> 16) & 0xffff; + local uRm = (Rm >> 16) & 0xffff; + sum1:2 = lRn:2 - lRm:2; + sum1 = SignedSaturate(sum1,16:2); + sum2:2 = uRn:2 - uRm:2; + sum2 = SignedSaturate(sum2,16:2); + Rd = (zext(sum2) << 16) | zext(sum1); +} + +:qsub8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=15 & Rn & Rd & Rm +{ + build COND; + local rn1 = Rn & 0xff; + local rm1 = Rm & 0xff; + local rn2 = (Rn >> 8) & 0xff; + local rm2 = (Rm >> 8) & 0xff; + local rn3 = (Rn >> 16) & 0xff; + local rm3 = (Rm >> 16) & 0xff; + local rn4 = (Rn >> 24) & 0xff; + local rm4 = (Rm >> 24) & 0xff; + sum1:1 = rn1:1 - rm1:1; + sum1 = SignedSaturate(sum1,8:2); + sum2:1 = rn2:1 - rm2:1; + sum2 = SignedSaturate(sum2,8:2); + sum3:1 = rn3:1 - rm3:1; + sum3 = SignedSaturate(sum3,8:2); + sum4:1 = rn4:1 - rm4:1; + sum4 = SignedSaturate(sum4,8:2); + Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); +} + +@endif # VERSION_6 + +@if defined(VERSION_6T2) + +macro BitReverse_arm(val) { + tval:1 = val; + result:1 = 0; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + result = (result << 1) | (tval & 1); + tval = tval >> 1; + val = result; +} + + +:rbit^COND Rd, rm is $(AMODE) & COND & c2327=13 & c2022=7 & c0407=3 & c1619=15 & c0811=15 & Rd & rm +{ + build COND; + build rm; + local t:4 = rm & 0xff; + local b1:1 = t:1; + t = (rm >> 8) & 0xff; + local b2:1 = t:1; + t = (rm >> 16) & 0xff; + local b3:1 = t:1; + t = (rm >> 24) & 0xff; + local b4:1 = t:1; + BitReverse_arm(b1); + BitReverse_arm(b2); + BitReverse_arm(b3); + BitReverse_arm(b4); + Rd = (zext(b1) << 24) | (zext(b2) << 16) | (zext(b3) << 8) | zext(b4); +} + +@endif # VERSION_6T2 + +@if defined(VERSION_6) + +:rev^COND Rd, rm is $(AMODE) & COND & c2327=13 & c2022=3 & c0407=3 & c1619=15 & c0811=15 & Rd & rm +{ + build COND; + build rm; + local tmp1 = rm & 0xff; + local tmp2 = (rm >> 8) & 0xff; + local tmp3 = (rm >> 16) & 0xff; + local tmp4 = (rm >> 24) & 0xff; + Rd = (tmp1 << 24) | (tmp2 << 16) | (tmp3 << 8) | tmp4; +} + +:rev16^COND Rd, rm is $(AMODE) & COND & c2327=13 & c2022=3 & c0407=11 & Rd & rm +{ + build COND; + build rm; + local tmp1 = rm & 0xff; + local tmp2 = (rm >> 8) & 0xff; + local tmp3 = (rm >> 16) & 0xff; + local tmp4 = (rm >> 24) & 0xff; + Rd = (tmp3 << 24) | (tmp4 << 16) | (tmp1 << 8) | tmp2; +} + +:revsh^COND Rd, rm is $(AMODE) & COND & c2327=13 & c2022=7 & c0407=11 & Rd & rm +{ + build COND; + build rm; + local tmp1 = rm & 0xff; + local tmp2 = (rm >> 8) & 0xff; + tmp3:2 = zext(tmp1:1) << 8 | zext(tmp2:1); + Rd = sext(tmp3); +} + +@endif # VERSION_6 + +:rsb^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + subflags(shift1,rn); + Rd = shift1-rn; + resultflags(Rd); + build SBIT_CZNO; +} + +:rsb^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + subflags(shift2,rn); + Rd = shift2-rn; + resultflags(Rd); + build SBIT_CZNO; +} + +:rsb^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + subflags(shift3,rn); + Rd = shift3-rn; + resultflags(Rd); + build SBIT_CZNO; +} + +:rsb^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + subflags(shift1,rn); + dest:4 = shift1-rn; + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:rsb^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + subflags(shift2,rn); + dest:4 = shift2-rn; + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:rsb^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + subflags(shift3,rn); + dest:4 = shift3-rn; + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:rsc^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + sub_with_carry_flags(shift1,rn); + Rd=shift1-(rn+zext(!CY)); + resultflags(Rd); + build SBIT_CZNO; +} + +:rsc^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + sub_with_carry_flags(shift2,rn); + Rd=shift2-(rn+zext(!CY)); + resultflags(Rd); + build SBIT_CZNO; +} + +:rsc^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + sub_with_carry_flags(shift3,rn); + Rd=shift3-(rn+zext(!CY)); + resultflags(Rd); + build SBIT_CZNO; +} + +:rsc^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + sub_with_carry_flags(shift1,rn); + local dest:4=shift1-(rn+zext(!CY)); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:rsc^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + sub_with_carry_flags(shift2,rn); + local dest:4=shift2-(rn + zext(!CY)); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:rsc^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + sub_with_carry_flags(shift3,rn); + local dest:4=shift3-(rn + zext(!CY)); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +@if defined(VERSION_6) + +:sadd16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=1 & Rn & Rd & Rm +{ + build COND; + local tmpRn = Rn & 0xffff; + local tmpRm = Rm & 0xffff; + local sum1 = sext(tmpRn:2) + sext(tmpRm:2); + GE1 = sum1 s>= 0; + GE2 = sum1 s>= 0; + tmpRn = (Rn >> 16) & 0xffff; + tmpRm = (Rm >> 16) & 0xffff; + local sum2 = sext(tmpRn:2) + sext(tmpRm:2); + GE3 = sum2 s>= 0; + GE4 = sum2 s>= 0; + Rd = ((sum2 & 0xffff) << 16) | (sum1 & 0xffff); +} + +:sadd8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=9 & Rn & Rd & Rm +{ + build COND; + local tmpRn = Rn & 0xff; + local tmpRm = Rm & 0xff; + local sum1 = sext(tmpRn:1) + sext(tmpRm:1); + GE1 = sum1 s>= 0; + tmpRn = (Rn >> 8) & 0xff; + tmpRm = (Rm >> 8) & 0xff; + local sum2 = sext(tmpRn:1) + sext(tmpRm:1); + GE2 = sum2 s>= 0; + tmpRn = (Rn >> 16) & 0xff; + tmpRm = (Rm >> 16) & 0xff; + local sum3 = sext(tmpRn:1) + sext(tmpRm:1); + GE3 = sum3 s>= 0; + tmpRn = (Rn >> 24) & 0xff; + tmpRm = (Rm >> 24) & 0xff; + local sum4 = sext(tmpRn:1) + sext(tmpRm:1); + GE4 = sum4 s>= 0; + Rd = ((sum4 & 0xff) << 24) | ((sum3 & 0xff) << 16) | ((sum2 & 0xff) << 8) | (sum1 & 0xff); +} + +# saddsubx +:sasx^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=3 & Rn & Rd & Rm +{ + build COND; + local lRn = Rn & 0xffff; + local lRm = Rm & 0xffff; + local uRn = (Rn >> 16) & 0xffff; + local uRm = (Rm >> 16) & 0xffff; + local sum1 = sext(uRn:2) + sext(lRm:2); + GE3 = sum1 s>= 0; + GE4 = sum1 s>= 0; + local diff = sext(lRn:2) - sext(uRm:2); + GE1 = diff s>= 0; + GE2 = diff s>= 0; + + Rd = ((sum1 & 0xffff) << 16) | (diff & 0xffff); +} + +@endif # VERSION_6 + +:sbc^SBIT_CZNO^COND Rd,rn,shift1 is $(AMODE) & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + sub_with_carry_flags(rn,shift1); + Rd = rn-(shift1+zext(!CY)); + resultflags(Rd); + build SBIT_CZNO; +} + +:sbc^SBIT_CZNO^COND Rd,rn,shift2 is $(AMODE) & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + sub_with_carry_flags(rn,shift2); + Rd = rn-(shift2 + zext(!CY)); + resultflags(Rd); + build SBIT_CZNO; +} + +:sbc^SBIT_CZNO^COND Rd,rn,shift3 is $(AMODE) & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + sub_with_carry_flags(rn,shift3); + Rd = rn-(shift3+zext(!CY)); + resultflags(Rd); + build SBIT_CZNO; +} + +:sbc^SBIT_CZNO^COND pc,rn,shift1 is $(AMODE) & pc & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + sub_with_carry_flags(rn,shift1); + local dest:4 = rn-(shift1 + zext(!CY)); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:sbc^SBIT_CZNO^COND pc,rn,shift2 is $(AMODE) & pc & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + sub_with_carry_flags(rn,shift2); + local dest:4 = rn-(shift2+zext(!CY)); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +:sbc^SBIT_CZNO^COND pc,rn,shift3 is $(AMODE) & pc & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + sub_with_carry_flags(rn,shift3); + local dest:4 = rn-(shift3 + zext(!CY)); + resultflags(dest); + build SBIT_CZNO; + ALUWritePC(dest); + goto [pc]; +} + +@if defined(VERSION_6) + +@if defined(VERSION_6T2) + +:sbfx^COND Rd,Rm,lsbImm,widthMinus1 is $(AMODE) & COND & c2127=0x3d & widthMinus1 & Rd & lsbImm & c0406=5 & Rm +{ + build COND; + build lsbImm; + build widthMinus1; + shift:4 = 31 - (lsbImm + widthMinus1); + Rd = Rm << shift; + shift = 31 - widthMinus1; + Rd = Rd s>> shift; +} + +@endif # VERSION_6T2 + +@if defined(VERSION_7) + +# Warning: note the non-standard use of Rd, Rm, Rn +:sdiv^COND RdHi,RnLo,RmHi is $(AMODE) & COND & c2027=0x71 & RdHi & c1215=0xf & RmHi & c0407=0x1 & RnLo +{ + build COND; + local result = RnLo / RmHi; + RdHi = result; +} + +@endif # VERSION_7 + +:sel^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x68 & Rn & Rd & c0811=15 & c0407=11 & Rm +{ + build COND; + local rD1 = ((zext(GE1) * Rn) + (zext(!GE1) * Rm)) & 0x0ff; + local rD2 = ((zext(GE2) * Rn) + (zext(!GE2) * Rm)) & 0x0ff00; + local rD3 = ((zext(GE3) * Rn) + (zext(!GE3) * Rm)) & 0x0ff0000; + local rD4 = ((zext(GE4) * Rn) + (zext(!GE4) * Rm)) & 0x0ff000000; + Rd = rD1 | rD2 | rD3 | rD4; +} + +@if defined(VERSION_6K) + +:sev^COND is $(AMODE) & COND & c0027=0x320f004 +{ + build COND; + SendEvent(); +} + +@endif # VERSION_6K + +# Hopefully we never encounter this instruction since we can not change the effective endianess of the language +armEndianNess: "LE" is c0031=0xf1010000 { export 0:1; } +armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } + +:setend armEndianNess is $(AMODE) & (c0031=0xf1010000 | c0031=0xf1010200) & armEndianNess { setEndianState(armEndianNess); } + + +:shadd16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=1 & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + sum1:4 = (sext(tmpRn:2) + sext(tmpRm:2)) >> 1; + sum2:4 = ((tmpRn s>> 16) + (tmpRm s>> 16)) >> 1; + Rd = (sum2 << 16) + (sum1 & 0xffff); +} + +:shadd8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=9 & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + sum1:4 = (sext(tmpRn:1) + sext(tmpRm:1)) >> 1; + local tmpn = tmpRn >> 8; + local tmpm = tmpRm >> 8; + sum2:4 = (sext(tmpn:1) + sext(tmpm:1)) >> 1; + tmpn = tmpRn >> 16; + tmpm = tmpRm >> 16; + sum3:4 = (sext(tmpn:1) + sext(tmpm:1)) >> 1; + tmpn = tmpRn >> 24; + tmpm = tmpRm >> 24; + sum4:4 = (sext(tmpn:1) + sext(tmpm:1)) >> 1; + Rd = (sum4 << 24) + ((sum3 & 0xff) << 16) + ((sum2 & 0xff) << 8) + (sum1 & 0xff); +} + +# shaddsubx +:shasx^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=3 & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + local diff:4 = sext(tmpRn[ 0,16]) - sext(tmpRm[16,16]); + local sum:4 = sext(tmpRn[16,16]) + sext(tmpRm[ 0,16]); + Rd[0,16] = diff[1,16]; + Rd[16,16] = sum[1,16]; +} + +# shsubbaddx +:shsax^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=5 & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + local sum:4 = sext(tmpRn[ 0,16]) + sext(tmpRm[16,16]); + local diff:4 = sext(tmpRn[16,16]) - sext(tmpRm[ 0,16]); + Rd[ 0,16] = sum[1,16]; + Rd[16,16] = diff[1,16]; +} + +:shsub16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=7 & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + sum1:4 = (sext(tmpRn:2) - sext(tmpRm:2)) >> 1; + sum2:4 = ((tmpRn s>> 16) - (tmpRm s>> 16)) >> 1; + Rd = (sum2 << 16) + (sum1 & 0xffff); +} + +:shsub8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=15 & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + sum1:4 = (sext(tmpRn:1) - sext(tmpRm:1)) >> 1; + local tmpn = tmpRn >> 8; + local tmpm = tmpRm >> 8; + sum2:4 = (sext(tmpn:1) - sext(tmpm:1)) >> 1; + tmpn = tmpRn >> 16; + tmpm = tmpRm >> 16; + sum3:4 = (sext(tmpn:1) - sext(tmpm:1)) >> 1; + tmpn = tmpRn >> 24; + tmpm = tmpRm >> 24; + sum4:4 = (sext(tmpn:1) - sext(tmpm:1)) >> 1; + Rd = (sum4 << 24) + ((sum3 & 0xff) << 16) + ((sum2 & 0xff) << 8) + (sum1 & 0xff); +} + +@endif # VERSION_6 + +@if defined(VERSION_5E) + +:smla^XBIT^YBIT^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x10 & smRd & smRn & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRa +{ + build COND; + local tmp:4 = sext(XBIT) * sext(YBIT); + Q = scarry(tmp,smRa) || Q; #Q flag is sticky + smRd = tmp+smRa; +} + +@endif + +@if defined(VERSION_6) + +:smlad^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x70 & c0407=1 & smRd & smRa & smRm & smRn +{ + build COND; + local tmpRn = smRn; + local tmpRm = smRm; + local tmpLRn = tmpRn:2; + local tmpURn = tmpRn >> 16; + local tmpLRm = tmpRm:2; + local tmpURm = tmpRm >> 16; + local product1 = sext(tmpLRn) * sext(tmpLRm); + local product2 = sext(tmpURn:2) * sext(tmpURm:2); + local tmpprod = product1 + product2; + Q = scarry(smRa, tmpprod) || Q; #Q is sticky + smRd = smRa + tmpprod; +} + +:smladx^COND smRd, smRn, smRm, smRa is $(AMODE) & COND & c2027=0x70 & c0407=3 & smRd & smRn & smRm & smRa +{ + build COND; + local tmpRn = smRn; + local tmpRm = smRm; + local tmpLRn = tmpRn:2; + local tmpURn = tmpRn >> 16; + local tmpLRm = tmpRm:2; + local tmpURm = tmpRm >> 16; + local product1 = sext(tmpLRn) * sext(tmpURm:2); + local product2 = sext(tmpURn:2) * sext(tmpLRm); + local tmpprod = product1 + product2; + Q = scarry(smRa, tmpprod) || Q; #Q is sticky + smRd = smRa + tmpprod; +} + +@endif # VERSION_6 + +:smlal^COND^SBIT_ZN RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2527=0 & c2124=7 & SBIT_ZN & RdLo & RdHi & smRn & c0407=9 & smRm +{ + build COND; + tmp:8 = (zext(RdHi) << 32) | zext(RdLo); + rs64:8 = sext(smRm); + rm64:8 = sext(smRn); + tmp = rs64 * rm64 + tmp; + resultflags(tmp); + RdLo = tmp(0); + RdHi = tmp(4); + build SBIT_ZN; +} + +@if defined(VERSION_5E) + +:smlal^XBIT^YBIT^COND RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2027=0x14 & RdLo & RdHi & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRn +{ + build COND; + local prod:8 = sext(XBIT) * sext(YBIT); + local result:8 = (zext(RdHi) << 32) | zext(RdLo); + result = result + prod; + RdLo = result(0); + RdHi = result(4); +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +:smlald^COND RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2027=0x74 & RdLo & RdHi & c0607=0 & c0405=1 & smRn & smRm +{ + build COND; + local tmpRn = smRn; + local tmpRm = smRm; + prod1:8 = sext(tmpRn:2) * sext(tmpRm:2); + rmHi:2 = tmpRm(2); + rnHi:2 = tmpRn(2); + prod2:8 = sext(rmHi) * sext(rnHi); + result:8 = zext(RdLo) + (zext(RdHi) << 32) + prod1 + prod2; + RdLo = result:4; + RdHi = result(4); +} + +:smlaldx^COND RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2027=0x74 & RdLo & RdHi & c0607=0 & c0405=3 & smRn & smRm +{ + build COND; + local tmpRn = smRn; + local tmpRm = smRm; + rmHi:2 = tmpRm(2); + rnHi:2 = tmpRn(2); + prod1:8 = sext(tmpRn:2) * sext(rmHi); + prod2:8 = sext(rnHi) * sext(tmpRm:2); + result:8 = zext(RdLo) + (zext(RdHi) << 32) + prod1 + prod2; + RdLo = result:4; + RdHi = result(4); +} + +@endif # VERSION_6 + +@if defined(VERSION_5E) + +:smlaw^YBIT^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x12 & smRd & smRn & smRm & c0707=1 & YBIT & x=0 & c0404=0 & smRa +{ + build COND; + local tmp64:6 = sext(smRn) * sext(YBIT); + local tmp32:4 = tmp64(2); + Q = scarry(tmp32, smRa) || Q; #Q flag is sticky + smRd = tmp32 + smRa; +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +:smlsd^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x70 & smRd & smRn & c0607=1 & x=0 & c0404=1 & smRm & smRa +{ + build COND; + local tmpRn = smRn; + local tmpRm = smRm; + prod1:4 = sext(tmpRn:2) * sext(tmpRm:2); + rnHi:2 = tmpRn(2); + rmHi:2 = tmpRm(2); + prod2:4 = sext(rnHi) * sext(rmHi); + diff:4 = prod1 - prod2; + Q = scarry(diff, smRa) || Q; #Q is sticky + smRd = smRa + diff; +} + +:smlsdx^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x70 & smRd & smRn & c0607=1 & x=1 & c0404=1 & smRm & smRa +{ + build COND; + local tmpRn = smRn; + local tmpRm = smRm; + rnHi:2 = tmpRn(2); + rmHi:2 = tmpRm(2); + prod1:4 = sext(tmpRn:2) * sext(rmHi); + prod2:4 = sext(rnHi) * sext(tmpRm:2); + diff:4 = prod1 - prod2; + Q = scarry(diff, smRa) || Q; #Q is sticky + smRd = smRa + diff; +} + +:smlsld^COND RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2027=0x74 & RdHi & RdLo & smRm & c0607=1 & x=0 & c0404=1 & smRn +{ + build COND; + local tmpRn = smRn; + local tmpRm = smRm; + prod1:8 = sext(tmpRn:2) * sext(tmpRm:2); + rnHi:2 = tmpRn(2); + rmHi:2 = tmpRm(2); + prod2:8 = sext(rnHi) * sext(rmHi); + result:8 = zext(RdLo) + (zext(RdHi) << 32) + (prod1 - prod2); + RdLo = result:4; + RdHi = result(4); +} + +:smlsldx^COND RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2027=0x74 & RdHi & RdLo & smRm & c0607=1 & x=1 & c0404=1 & smRn +{ + build COND; + local tmpRn = smRn; + local tmpRm = smRm; + rnHi:2 = tmpRn(2); + rmHi:2 = tmpRm(2); + prod1:8 = sext(tmpRn:2) * sext(rmHi); + prod2:8 = sext(rnHi) * sext(tmpRm:2); + result:8 = zext(RdLo) + (zext(RdHi) << 32) + (prod1 - prod2); + RdLo = result:4; + RdHi = result(4); +} + +:smmla^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x75 & smRd & smRn & smRm & c0607=0 & r=0 & c0404=1 & smRa +{ + build COND; + val:8 = sext(smRn) * sext(smRm); + val = (zext(smRa) << 32) + val; + smRd = val(4); +} + +:smmlar^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x75 & smRd & smRn & smRm & c0607=0 & r=1 & c0404=1 & smRa +{ + build COND; + val:8 = sext(smRn) * sext(smRm); + val = (zext(smRa) << 32) + val + 0x80000000; + smRd = val(4); +} + +:smmls^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x75 & smRd & smRn & smRm & c0607=3 & r=0 & c0404=1 & smRa +{ + build COND; + val:8 = sext(smRn) * sext(smRm); + val = (zext(smRa) << 32) - val; + smRd = val(4); +} + +:smmlsr^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x75 & smRd & smRn & smRm & c0607=3 & r=1 & c0404=1 & smRa +{ + build COND; + val:8 = sext(smRn) * sext(smRm); + val = (zext(smRa) << 32) - val + 0x80000000; + smRd = val(4); +} + +:smmul^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x75 & smRd & c1215=15 & smRn & c0607=0 & r=0 & c0404=1 & smRm +{ + build COND; + val:8 = sext(smRn) * sext(smRm); + smRd = val(4); +} + +:smmulr^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x75 & smRd & c1215=15 & smRn & c0607=0 & r=1 & c0404=1 & smRm +{ + build COND; + val:8 = (sext(smRn) * sext(smRm)) + 0x080000000; + smRd = val(4); +} + +:smuad^COND smRd, smRn, smRm is $(AMODE) & COND & c2027=0x70 & c0407=1 & smRd & c1619=15 & smRn & smRm +{ + build COND; + local tmpRm = smRm; + local tmpRn = smRn; + local tmpLRm = tmpRm:2; + local tmpURm = tmpRm >> 16; + local tmpLRn = tmpRn:2; + local tmpURn = tmpRn >> 16; + local product1 = sext(tmpLRm) * sext(tmpLRn); + local product2 = sext(tmpURm:2) * sext(tmpURn:2); + local tmpprod = product1 + product2; + Q = scarry(product1, product2); + smRd = tmpprod; +} + +:smuadx^COND smRd, smRn, smRm is $(AMODE) & COND & c2027=0x70 & c0407=3 & smRd & c1619=15 & smRn & smRm +{ + build COND; + local tmpRm = smRm; + local tmpRn = smRn; + local tmpLRm = tmpRm:2; + local tmpURm = tmpRm >> 16; + local tmpLRn = tmpRn:2; + local tmpURn = tmpRn >> 16; + local product1 = sext(tmpLRm) * sext(tmpURn:2); + local product2 = sext(tmpURm:2) * sext(tmpLRn); + local tmpprod = product1 + product2; + Q = scarry(product1, product2); + smRd = tmpprod; +} + +@endif # VERSION_6 + +@if defined(VERSION_5E) + +:smul^XBIT^YBIT^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x16 & smRd & c1215=0 & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRn +{ + build COND; + tmp:8 = sext(XBIT) * sext(YBIT); + smRd = tmp:4; +} + +@endif # VERSION_5E + +:smull^COND^SBIT_ZN RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2527=0 & c2124=6 & SBIT_ZN & RdHi & RdLo & smRn & c0407=9 & smRm +{ + build COND; + rn64:8 = sext(smRn); + rm64:8 = sext(smRm); + local tmp = rn64 * rm64; + resultflags(tmp); + RdLo = tmp(0); + RdHi = tmp(4); + build SBIT_ZN; +} + +@if defined(VERSION_5E) + +:smulw^YBIT^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x12 & smRd & c1215=0 & smRn & c0707=1 & YBIT & x=1 & c0404=0 & smRm +{ + build COND; + tmp:6 = sext(smRn) * sext(YBIT); + tmp = tmp >> 16; + smRd = tmp:4; +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +:smusd^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x70 & smRd & c1215=15 & smRm & c0607=1 & x=0 & c0404=1 & smRn +{ + build COND; + local tmpRn = smRn; + local tmpRm = smRm; + rmHi:2 = tmpRm(2); + prod1:4 = sext(tmpRn:2) * sext(tmpRm:2); + rnHi:2 = tmpRn(2); + prod2:4 = sext(rnHi) * sext(rmHi); + smRd = prod1 - prod2; +} + +:smusdx^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x70 & smRd & c1215=15 & smRm & c0607=1 & x=1 & c0404=1 & smRn +{ + build COND; + local tmpRn = smRn; + local tmpRm = smRm; + rmHi:2 = tmpRm(2); + rnHi:2 = tmpRn(2); + prod1:4 = sext(tmpRn:2) * sext(rmHi); + prod2:4 = sext(rnHi) * sext(tmpRm:2); + smRd = prod1 - prod2; +} + + +:ssat^COND Rd, sSatImm5, shift4 is $(AMODE) & COND & c2127=0x35 & c0405=1 & sSatImm5 & Rd & shift4 +{ + build COND; + build shift4; + tmp:4 = SignedSaturate(shift4, sSatImm5); + Q = SignedDoesSaturate(shift4, sSatImm5); + Rd = tmp; +} + +:ssat16^COND Rd, sSatImm4, Rm is $(AMODE) & COND & c2027=0x6a & c0811=15 & c0407=0x3 & sSatImm4 & Rd & Rm +{ + build COND; + build sSatImm4; + local tmpl = Rm & 0xffff; + tmpl = SignedSaturate(tmpl, sSatImm4); + local tmpu = Rm >> 16; + tmpu = SignedSaturate(tmpu, sSatImm4); + Q = SignedDoesSaturate(tmpl,sSatImm4) | SignedDoesSaturate(tmpu,sSatImm4); + Rd = ((tmpu & 0xffff) << 16) | (tmpl & 0xffff); +} + +# ssubaddx +:ssax^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=5 & Rn & Rd & Rm +{ + build COND; + local lRn = Rn & 0xffff; + local lRm = Rm & 0xffff; + local uRn = (Rn >> 16) & 0xffff; + local uRm = (Rm >> 16) & 0xffff; + local diff = sext(uRn:2) - sext(lRm:2); + GE3 = diff s>= 0; + GE4 = diff s>= 0; + local sum = sext(lRn:2) + sext(uRm:2); + GE1 = sum s>= 0; + GE2 = sum s>= 0; + Rd = ((diff & 0xffff) << 16) | (sum & 0xffff); +} + +:ssub16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=7 & Rn & Rd & Rm +{ + build COND; + local lRn = Rn & 0xffff; + local lRm = Rm & 0xffff; + local uRn = (Rn >> 16) & 0xffff; + local uRm = (Rm >> 16) & 0xffff; + local diffl = sext(lRn:2) - sext(lRm:2); + GE1 = diffl s>= 0; + GE2 = diffl s>= 0; + local diffu = sext(uRn:2) - sext(uRm:2); + GE3 = diffu s>= 0; + GE4 = diffu s>= 0; + Rd = ((diffu & 0xffff) << 16) | (diffl & 0xffff); +} + +:ssub8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=15 & Rn & Rd & Rm +{ + build COND; + local tmpRn = Rn & 0xff; + local tmpRm = Rm & 0xff; + local diff1 = sext(tmpRn:1) - sext(tmpRm:1); + GE1 = diff1 s>= 0; + tmpRn = (Rn >> 8) & 0xff; + tmpRm = (Rm >> 8) & 0xff; + local diff2 = sext(tmpRn:1) - sext(tmpRm:1); + GE2 = diff2 s>= 0; + tmpRn = (Rn >> 16) & 0xff; + tmpRm = (Rm >> 16) & 0xff; + local diff3 = sext(tmpRn:1) - sext(tmpRm:1); + GE3 = diff3 s>= 0; + tmpRn = (Rn >> 24) & 0xff; + tmpRm = (Rm >> 24) & 0xff; + local diff4 = sext(tmpRn:1) - sext(tmpRm:1); + GE4 = diff4 s>= 0; + Rd = ((diff4 & 0xff) << 24) | ((diff3 & 0xff) << 16) | ((diff2 & 0xff) << 8) | (diff1 & 0xff); +} + +@endif # VERSION_6 + +:stc^COND cpn,CRd,addrmode5 is $(AMODE) & COND & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=0 +{ + build COND; + build addrmode5; + t_cpn:4 = cpn; + coprocessor_store(t_cpn,CRd,addrmode5); +} + +:stcl^COND cpn,CRd,addrmode5 is $(AMODE) & COND & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=0 +{ + build COND; + build addrmode5; + t_cpn:4 = cpn; + coprocessor_storelong(t_cpn,CRd,addrmode5); +} + +:stm^mdir^COND reglist is $(AMODE) & COND & c2527=4 & mdir & L20=0 & reglist +{ + build COND; + build reglist; +} + +#:str^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +#{ +# build COND; +# build addrmode2; +# tmp=addrmode2&0xfffffffc; +# *tmp = Rd; +#} + +# The following form of str assumes alignment checking is on +:str^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + build COND; + build addrmode2; + *addrmode2 = Rd; +} + +:strb^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=1 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + build COND; + build addrmode2; + local tmpRd = Rd; + *addrmode2 = tmpRd:1; +} + +:strbt^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 &P24=0 & B22=1 & W21=1 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + build COND; + build addrmode2; + local tmpRd = Rd; + *addrmode2 = tmpRd:1; +} + +:strh^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & L20=0 & c0407=11 & Rd & addrmode3 +{ + build COND; + build addrmode3; + local tmpRd = Rd; + *addrmode3 = tmpRd:2; +} + +@if defined(VERSION_5E) + +:strd^COND Rd,Rd2,addrmode3 is $(AMODE) & COND & c2527=0 & c0407=0xf & L20=0 & Rd & Rd2 & addrmode3 +{ + build COND; + build addrmode3; + local addr = addrmode3; + *(addr) = Rd; + addr = addr + 4; + *(addr) = Rd2; +} + +@endif # VERSION_5E + +@if defined(VERSION_6) + +:strex^COND Rd,Rm,[Rn] is $(AMODE) & COND & c2027=0x18 & c0411=0xf9 & Rn & Rd & Rm +{ + build COND; + local tmp = Rn; + access:1 = hasExclusiveAccess(tmp); + Rd = 1; + if (!access) goto inst_next; + Rd = 0; + *tmp = Rm; +} + +@endif # VERSION_6 + +@if defined(VERSION_6K) + +:strexb^COND Rd,Rm,[Rn] is $(AMODE) & COND & c2027=0x1c & c0411=0xf9 & Rn & Rd & Rm +{ + build COND; + local tmp = Rn; + access:1 = hasExclusiveAccess(tmp); + Rd = 1; + if (!access) goto inst_next; + Rd = 0; + local tmpRm = Rm; + *tmp = tmpRm:1; +} + +:strexd^COND Rd,Rm,Rm2,[Rn] is $(AMODE) & COND & c2027=0x1a & Rn & Rd & c0411=0xf9 & c0003 & Rm & Rm2 +{ + build COND; + local addr = Rn; + access:1 = hasExclusiveAccess(addr); + Rd = 1; + if (!access) goto inst_next; + Rd = 0; + *(addr) = Rm; + addr = addr + 4; + *(addr) = Rm2; +} + +:strexh^COND Rd,Rm,[Rn] is $(AMODE) & COND & c2027=0x1e & c0411=0xf9 & Rn & Rd & Rm +{ + build COND; + local tmp = Rn; + access:1 = hasExclusiveAccess(tmp); + Rd = 1; + if (!access) goto inst_next; + Rd = 0; + local tmpRm = Rm; + *tmp = tmpRm:2; +} + +:strht^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & P24=0 & W21=1 & L20=0 & c0407=11 & Rd & addrmode3 { + build COND; + *:2 addrmode3 = Rd; +} + +@endif # VERSION_6K + +#:strt^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=0 & P24=0 & W21=1 & Rd & addrmode2 +#{ +# build COND; +# build addrmode2; +# tmp=addrmode2&0xfffffffc; +# *tmp = Rd; +#} + +# The following form of str assumes alignment checking is on +:strt^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=0 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +{ + build COND; + build addrmode2; + *addrmode2 = Rd; +} + +:sub^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + subflags(rn,shift1); + Rd = rn-shift1; + resultflags(Rd); + build SBIT_CZNO; +} + +:sub^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + subflags(rn,shift2); + Rd = rn-shift2; + resultflags(Rd); + build SBIT_CZNO; +} + +:sub^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + subflags(rn,shift3); + Rd = rn-shift3; + resultflags(Rd); + build SBIT_CZNO; +} + +:sub^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + subflags(rn,shift1); + dest:4 = rn-shift1; + resultflags(dest); + build SBIT_CZNO; + cpsr = spsr; + SetThumbMode( ((cpsr >> 5) & 1) != 0 ); + pc = dest; + goto [pc]; +} + +:sub^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & Rn=14 & I25=1 & immed=0 & rotate=0 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + subflags(rn,shift1); + dest:4 = rn-shift1; + resultflags(dest); + build SBIT_CZNO; + cpsr = spsr; + ALUWritePC(dest); + return [pc]; +} + +:sub^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + subflags(rn,shift2); + dest:4 = rn-shift2; + resultflags(dest); + build SBIT_CZNO; + cpsr = spsr; + SetThumbMode( ((cpsr >> 5) & 1) != 0 ); + pc = dest; + goto [pc]; +} + +:sub^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + subflags(rn,shift3); + dest:4 = rn-shift3; + resultflags(dest); + build SBIT_CZNO; + cpsr = spsr; + SetThumbMode( ((cpsr >> 5) & 1) != 0 ); + pc = dest; + goto [pc]; +} + +:swi^COND immed24 is $(AMODE) & COND & c2427=15 & immed24 +{ + build COND; + tmp:4 = immed24; + software_interrupt(tmp); +} + +#:swp^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=16 & Rn & Rd & c0811=0 & c0407=9 & Rm +#{ +# build COND; +# tmp = Rn & 0xfffffffc; +# tmp2 = (Rn&3)<<3; +# val:4 = *tmp; +# val=(val>>tmp2) | (val << (32-tmp2)); +# *tmp = Rm; +# Rd = val; +#} + +# Assuming alignment checking is enabled +:swp^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=16 & Rn & Rd & c0811=0 & c0407=9 & Rm +{ + build COND; + val:4 = *Rn; + *Rn = Rm; + Rd = val; +} + +:swpb^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=20 & Rn & Rd & c0811=0 & c0407=9 & Rm +{ + build COND; + local tmp = *:1 Rn; + local tmpRm = Rm; + *Rn = tmpRm:1; + Rd = zext(tmp); +} + +@if defined(VERSION_6) + +:sxtab^COND Rd,Rn,ror1 is $(AMODE) & COND & c2327=13 & c2022=2 & c0407=7 & Rd & Rn & ror1 +{ + build COND; + build ror1; + Rd = Rn + sext(ror1:1); +} + +:sxtab16^COND Rd,Rn,ror1 is $(AMODE) & COND & c2027=0x68 & c0407=7 & Rn & Rd & ror1 +{ + build COND; + build ror1; + b:1 = ror1:1; + lo:2 = Rn:2 + sext(b); + b = ror1(2); + hi:2 = Rn(2) + sext(b); + Rd = (zext(hi) << 16) + zext(lo); +} + +:sxtah^COND Rd,Rn,ror1 is $(AMODE) & COND & c2327=13 & c2022=3 & c0407=7 & Rd & Rn & ror1 +{ + build COND; + build ror1; + Rd = Rn + sext(ror1:2); +} + +:sxtb^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=2 & c0407=7 & Rd & c1619=15 & ror1 +{ + build COND; + build ror1; + Rd = sext(ror1:1); +} + +:sxtb16^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=0 & c0407=7 & Rd & c1619=15 & ror1 +{ + build COND; + build ror1; + local tmp1:1 = ror1:1; + local low:2 = sext(tmp1); + local tmp2:1 = ror1(2); + local high:2 = sext(tmp2); + Rd = (zext(high) << 16) | zext(low); +} + +:sxth^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=3 & c0407=7 & Rd & c1619=15 & ror1 +{ + build COND; + build ror1; + Rd = sext(ror1:2); +} + +@endif # VERSION_6 + +:teq^COND rn,shift1 is $(AMODE) & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + local tmp = rn^shift1; + logicflags(); + resultflags(tmp); + affectflags(); +} + +:teq^COND rn,shift2 is $(AMODE) & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + local tmp = rn^shift2; + logicflags(); + resultflags(tmp); + affectflags(); +} + +:teq^COND rn,shift3 is $(AMODE) & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + local tmp = rn^shift3; + logicflags(); + resultflags(tmp); + affectflags(); +} + +:teq^COND^"p" rn,shift1 is $(AMODE) & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + local tmp = rn^shift1; + logicflags(); + resultflags(tmp); + affectflags(); +} + +:teq^COND^"p" rn,shift2 is $(AMODE) & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + local tmp = rn^shift2; + logicflags(); + resultflags(tmp); + affectflags(); +} + +:teq^COND^"p" rn,shift3 is $(AMODE) & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + local tmp = rn^shift3; + logicflags(); + resultflags(tmp); + affectflags(); +} + + +:tst^COND rn,shift1 is $(AMODE) & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift1 +{ + build COND; + build rn; + build shift1; + local tmp = rn & shift1; + logicflags(); + resultflags(tmp); + affectflags(); +} + +:tst^COND rn,shift2 is $(AMODE) & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift2 +{ + build COND; + build rn; + build shift2; + local tmp = rn & shift2; + logicflags(); + resultflags(tmp); + affectflags(); +} + +:tst^COND rn,shift3 is $(AMODE) & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift3 +{ + build COND; + build rn; + build shift3; + local tmp = rn & shift3; + logicflags(); + resultflags(tmp); + affectflags(); +} + +@if defined(VERSION_6) + +:uadd16^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=1 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:2 = tmpRn:2; + tmp2:2 = tmpRm:2; + local tcarry = carry(tmp1,tmp2); + GE1 = tcarry; + GE2 = tcarry; + local tmpLow = tmp1 + tmp2; + tmp1 = rn(2); + tmp2 = rm(2); + tcarry = carry(tmp1,tmp2); + GE3 = tcarry; + GE4 = tcarry; + local tmpHigh = tmp1 + tmp2; + Rd = zext(tmpHigh) << 16 | zext(tmpLow); +} + +:uadd8^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=9 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:1 = tmpRn:1; + tmp2:1 = tmpRm:1; + GE1 = carry(tmp1,tmp2); + b1:1 = tmp1 + tmp2; + tmp1 = rn(1); + tmp2 = rm(1); + GE2 = carry(tmp1,tmp2); + b2:1 = tmp1 + tmp2; + tmp1 = rn(2); + tmp2 = rm(2); + GE3 = carry(tmp1,tmp2); + b3:1 = tmp1 + tmp2; + tmp1 = rn(3); + tmp2 = rm(3); + GE4 = carry(tmp1,tmp2); + b4:1 = tmp1 + tmp2; + Rd = (zext(b4) << 24) | (zext(b3) << 16) | (zext(b2) << 8) | zext(b1); +} + +# uaddsubx +:uasx^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=3 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:2 = tmpRn:2; + tmp2:2 = tmpRm(2); + local tmpLow:4 = zext(tmp1) - zext(tmp2); + GE1 = tmpLow s>= 0; + GE2 = tmpLow s>= 0; + tmp1 = tmpRn(2); + tmp2 = tmpRm:2; + tcarry:1 = carry(tmp1,tmp2); + GE3 = tcarry; + GE4 = tcarry; + local tmpHigh = tmp1 + tmp2; + Rd[0,16] = tmpLow[0,16]; + Rd[16,16] = tmpHigh; + } + +@endif # VERSION_6 + +@if defined(VERSION_6T2) + +:ubfx^COND Rd,Rm,lsbImm,widthMinus1 is $(AMODE) & COND & c2127=0x3f & widthMinus1 & Rd & lsbImm & c0406=5 & Rm +{ + build COND; + build lsbImm; + build widthMinus1; + shift:4 = 31 - (lsbImm + widthMinus1); + Rd = Rm << shift; + shift = 31 - widthMinus1; + Rd = Rd >> shift; +} + +@endif # VERSION_6T2 + +@if defined(VERSION_7) + +:udiv^COND RdHi,RnLo,RmHi is $(AMODE) & COND & c2027=0x73 & RdHi & c1215=0xf & RmHi & c0407=0x1 & RnLo +{ + build COND; + result:8 = zext(RnLo) / zext(RmHi); + RdHi = result(0); +} + +@endif # VERSION_7 + +@if defined(VERSION_6) + +:uhadd16^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=1 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:4 = tmpRn & 0xffff; + tmp2:4 = tmpRm & 0xffff; + local tmpLow = tmp1 + tmp2; + local tmpHigh = (tmpRn >> 16) + (tmpRm >> 16); + Rd[0,16] = tmpLow[1,16]; + Rd[16,16] = tmpHigh[1,16]; +} + +:uhadd8^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=9 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:1 = tmpRn:1; + tmp2:1 = tmpRm:1; + b1:2 = (zext(tmp1) + zext(tmp2)) >> 1; + tmp1 = tmpRn(1); + tmp2 = tmpRm(1); + b2:2 = (zext(tmp1) + zext(tmp2)) >> 1; + tmp1 = tmpRn(2); + tmp2 = tmpRm(2); + b3:2 = (zext(tmp1) + zext(tmp2)) >> 1; + tmp1 = tmpRn(3); + tmp2 = tmpRm(3); + b4:2 = (zext(tmp1) + zext(tmp2)) >> 1; + Rd = (zext(b4) << 24) | (zext(b3) << 16) | (zext(b2) << 8) | zext(b1); +} + +# uhaddsubx +:uhasx^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=3 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:2 = tmpRn:2; + tmp2:2 = tmpRm(2); + tmpLow:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ffff; + tmp1 = tmpRn(2); + tmp2 = tmpRm:2; + tmpHigh:4 = (zext(tmp1) + zext(tmp2)) >> 1; + Rd = (tmpHigh << 16) | tmpLow; +} + +# uhsubaddx +:uhsax^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=5 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:2 = tmpRn:2; + tmp2:2 = tmpRm(2); + tmpLow:4 = (zext(tmp1) + zext(tmp2)) >> 1; + tmp1 = tmpRn(2); + tmp2 = tmpRm:2; + tmpHigh:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ffff; + Rd = (tmpHigh << 16) | tmpLow; +} + +:uhsub16^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=7 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:2 = tmpRn:2; + tmp2:2 = tmpRm:2; + tmpLow:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ffff; + tmp1 = rn(2); + tmp2 = rm(2); + tmpHigh:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ffff; + Rd = (tmpHigh << 16) | tmpLow; +} + +:uhsub8^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=15 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:1 = tmpRn:1; + tmp2:1 = tmpRm:1; + b1:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ff; + tmp1 = tmpRn(1); + tmp2 = tmpRm(1); + b2:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ff; + tmp1 = tmpRn(2); + tmp2 = tmpRm(2); + b3:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ff; + tmp1 = tmpRn(3); + tmp2 = tmpRm(3); + b4:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ff; + Rd = (b4 << 24) | (b3 << 16) | (b2 << 8) | b1; +} + +:umaal^COND RdLo,RdHi,Rm,Rs is $(AMODE) & COND & c2027=0x04 & RdHi & RdLo & Rs & c0407=9 & Rm +{ + build COND; + result:8 = (zext(Rm) * zext(Rs)) + zext(RdLo) + zext(RdHi); + RdLo = result:4; + RdHi = result(4); +} + +@endif # VERSION_6 + +:umlal^COND^SBIT_ZN Rd,Rn,rm,rs is $(AMODE) & COND & c2527=0 & c2124=5 & SBIT_ZN & Rn & Rd & rs & c0407=9 & rm +{ + build COND; + build rm; + build rs; + tmp:8 = (zext(Rn) << 32) | zext(Rd); + rs64:8 = zext(rs); + rm64:8 = zext(rm); + tmp = rs64 * rm64 + tmp; + resultflags(tmp); + Rd = tmp(0); + Rn = tmp(4); + build SBIT_ZN; +} + +:umull^COND^SBIT_ZN Rd,Rn,rm,rs is $(AMODE) & COND & c2527=0 & c2124=4 & SBIT_ZN & Rn & Rd & rs & c0407=9 & rm +{ + build COND; + build rm; + build rs; + rs64:8 = zext(rs); + rm64:8 = zext(rm); + local tmp = rs64 * rm64; + resultflags(tmp); + Rd = tmp(0); + Rn = tmp(4); + build SBIT_ZN; +} + +@if defined(VERSION_6) + +:uqadd16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=1 & Rn & Rd & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + tmp2Rn:2 = tmpRn:2; + tmp2Rm:2 = tmpRm:2; + sum1:2 = UnsignedSaturate(tmp2Rn + tmp2Rm, 16:2); + tmp2Rn = tmpRn(2); + tmp2Rm = tmpRm(2); + sum2:2 = UnsignedSaturate(tmp2Rn + tmp2Rm, 16:2); + Rd = (zext(sum2) << 16) | zext(sum1); +} + +:uqadd8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=9 & Rn & Rd & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + tmp1Rn:1 = tmpRn:1; + tmp1Rm:1 = tmpRm:1; + sum1:1 = UnsignedSaturate(tmp1Rn + tmp1Rm, 16:2); + tmp1Rn = tmpRn(1); + tmp1Rm = tmpRm(1); + sum2:2 = UnsignedSaturate(tmp1Rn + tmp1Rm, 16:2); + tmp1Rn = tmpRn(2); + tmp1Rm = tmpRm(2); + sum3:2 = UnsignedSaturate(tmp1Rn + tmp1Rm, 16:2); + tmp1Rn = tmpRn(3); + tmp1Rm = tmpRm(3); + sum4:2 = UnsignedSaturate(tmp1Rn + tmp1Rm, 16:2); + Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); +} + +# uqaddsubx +:uqasx^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=3 & Rn & Rd & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + tmp2Rn:2 = tmpRn:2; + tmp2Rm:2 = tmpRm(2); + sum1:2 = UnsignedSaturate(tmp2Rn - tmp2Rm, 16:2); + tmp2Rn = tmpRn(2); + tmp2Rm = tmpRm:2; + sum2:2 = UnsignedSaturate(tmp2Rn + tmp2Rm, 16:2); + Rd = (zext(sum2) << 16) | zext(sum1); +} + +# uqsubaddx +:uqsax^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=5 & Rn & Rd & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + tmp2Rn:2 = tmpRn:2; + tmp2Rm:2 = tmpRm(2); + sum1:2 = UnsignedSaturate(tmp2Rn + tmp2Rm, 16:2); + tmp2Rn = tmpRn(2); + tmp2Rm = tmpRm:2; + sum2:2 = UnsignedSaturate(tmp2Rn - tmp2Rm, 16:2); + Rd = (zext(sum2) << 16) | zext(sum1); +} + +:uqsub16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=7 & Rn & Rd & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + tmp2Rn:2 = tmpRn:2; + tmp2Rm:2 = tmpRm:2; + sum1:2 = UnsignedSaturate(tmp2Rn - tmp2Rm, 16:2); + tmp2Rn = tmpRn(2); + tmp2Rm = tmpRm(2); + sum2:2 = UnsignedSaturate(tmp2Rn - tmp2Rm, 16:2); + Rd = (zext(sum2) << 16) | zext(sum1); +} + +:uqsub8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=15 & Rn & Rd & Rm +{ + build COND; + local tmpRn = Rn; + local tmpRm = Rm; + tmp1Rn:1 = tmpRn:1; + tmp1Rm:1 = tmpRm:1; + sum1:1 = UnsignedSaturate(tmp1Rn - tmp1Rm, 16:2); + tmp1Rn = tmpRn(1); + tmp1Rm = tmpRm(1); + sum2:2 = UnsignedSaturate(tmp1Rn - tmp1Rm, 16:2); + tmp1Rn = tmpRn(2); + tmp1Rm = tmpRm(2); + sum3:2 = UnsignedSaturate(tmp1Rn - tmp1Rm, 16:2); + tmp1Rn = tmpRn(3); + tmp1Rm = tmpRm(3); + sum4:2 = UnsignedSaturate(tmp1Rn - tmp1Rm, 16:2); + Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); +} + +:usad8^COND Rd, Rm, Rs is $(AMODE) & COND & c2027=0x78 & c1215=15 & c0407=1 & Rd & Rm & Rs +{ + build COND; + local tmpRs = Rs; + local tmpRm = Rm; + tmp1Rs:1 = tmpRs:1; + tmp1Rm:1 = tmpRm:1; + sum1:1 = Absolute(tmp1Rs - tmp1Rm); + tmp1Rs = tmpRs(1); + tmp1Rm = tmpRm(1); + sum2:1 = Absolute(tmp1Rs - tmp1Rm); + tmp1Rs = tmpRs(2); + tmp1Rm = tmpRm(2); + sum3:1 = Absolute(tmp1Rs - tmp1Rm); + tmp1Rs = tmpRs(3); + tmp1Rm = tmpRm(3); + sum4:1 = Absolute(tmp1Rs - tmp1Rm); + Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); +} + +:usada8^COND Rd, Rm, Rs, Rn is $(AMODE) & COND & c2027=0x78 & c0407=1 & Rd & Rn& Rm & Rs +{ + build COND; + local tmpRs = Rs; + local tmpRm = Rm; + tmp1Rs:1 = tmpRs:1; + tmp1Rm:1 = tmpRm:1; + sum1:1 = Absolute(tmp1Rs - tmp1Rm); + tmp1Rs = tmpRs(1); + tmp1Rm = tmpRm(1); + sum2:1 = Absolute(tmp1Rs - tmp1Rm); + tmp1Rs = tmpRs(2); + tmp1Rm = tmpRm(2); + sum3:1 = Absolute(tmp1Rs - tmp1Rm); + tmp1Rs = tmpRs(3); + tmp1Rm = tmpRm(3); + sum4:1 = Absolute(tmp1Rs - tmp1Rm); + Rd = Rn + ((zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1)); +} + +:usat^COND Rd, uSatImm5, shift4 is $(AMODE) & COND & c2127=0x37 & c0405=0x1 & uSatImm5 & Rd & shift4 +{ + build COND; + build uSatImm5; + build shift4; + tmp:4 = UnsignedSaturate(shift4, uSatImm5); + Q = UnsignedDoesSaturate(shift4, uSatImm5); + Rd = tmp; +} + +:usat16^COND Rd, uSatImm4, Rm is $(AMODE) & COND & c2027=0x6e & c0811=15 & c0407=0x3 & uSatImm4 & Rd & Rm +{ + build COND; + build uSatImm4; + local tmpl = Rm & 0xffff; + tmpl = UnsignedSaturate(tmpl, uSatImm4); + local tmpu = Rm >> 16; + tmpu = UnsignedSaturate(tmpu, uSatImm4); + Q = UnsignedDoesSaturate(tmpl,uSatImm4) | UnsignedDoesSaturate(tmpu,uSatImm4); + Rd = ((tmpu & 0xffff) << 16) | (tmpl & 0xffff); +} + +# usubaddx +:usax^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=5 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:2 = tmpRn:2; + tmp2:2 = tmpRm(2); + local tcarry = carry(tmp2,tmp1); + GE1 = tcarry; + GE2 = tcarry; + local tmpLow = tmp1 + tmp2; + tmp1 = tmpRn(2); + tmp2 = tmpRm:2; + tcarry = tmp2 <= tmp1; + GE3 = tcarry; + GE4 = tcarry; + local tmpHigh = tmp1 - tmp2; + Rd = zext(tmpHigh) << 16 | zext(tmpLow); +} + +:usub16^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=7 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:2 = tmpRn:2; + tmp2:2 = tmpRm:2; + local tcarry = tmp2 <= tmp1; + GE1 = tcarry; + GE2 = tcarry; + local tmpLow = tmp1 - tmp2; + tmp1 = tmpRn(2); + tmp2 = tmpRm(2); + tcarry = tmp2 <= tmp1; + GE3 = tcarry; + GE4 = tcarry; + local tmpHigh = tmp1 - tmp2; + Rd = zext(tmpHigh) << 16 | zext(tmpLow); +} + +:usub8^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=15 & Rd & rn & rm +{ + build COND; + build rn; + build rm; + local tmpRn = rn; + local tmpRm = rm; + tmp1:1 = tmpRn:1; + tmp2:1 = tmpRm:1; + GE1 = tmp2 <= tmp1; + b1:1 = tmp1 - tmp2; + tmp1 = tmpRn(1); + tmp2 = tmpRm(1); + GE2 = tmp2 <= tmp1; + b2:1 = tmp1 - tmp2; + tmp1 = tmpRn(2); + tmp2 = tmpRm(2); + GE3 = tmp2 <= tmp1; + b3:1 = tmp1 - tmp2; + tmp1 = tmpRn(3); + tmp2 = tmpRm(3); + GE4 = tmp2 <= tmp1; + b4:1 = tmp1 - tmp2; + Rd = (zext(b4) << 24) | (zext(b3) << 16) | (zext(b2) << 8) | zext(b1); +} + +:uxtab^COND Rd,Rn,ror1 is $(AMODE) & COND & c2327=13 & c2022=6 & c0407=7 & Rd & Rn & ror1 +{ + build COND; + build ror1; + Rd = Rn + zext(ror1:1); +} + +:uxtab16^COND Rd,Rn,ror1 is $(AMODE) & COND & c2327=13 & c2022=4 & c0407=7 & Rd & Rn & ror1 +{ + build COND; + build ror1; + local tmp1 = ror1 & 0xff; + local tmp2 = (ror1 >> 16) & 0xff; + local tmp1n = (Rn + tmp1) & 0xffff; + local tmp2n = (Rn >> 16) + tmp2; + Rd = (tmp2n << 16) | tmp1n; +} + +:uxtah^COND Rd,Rn,ror1 is $(AMODE) & COND & c2327=13 & c2022=7 & c0407=7 & Rd & Rn & ror1 +{ + build COND; + build ror1; + Rd = Rn + zext(ror1:2); +} + +:uxtb^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=6 & c0407=7 & Rd & c1619=15 & ror1 +{ + build COND; + build ror1; + Rd = ror1 & 0x0ff; +} + +:uxtb16^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=4 & c0407=7 & Rd & c1619=15 & ror1 +{ + build COND; + build ror1; + Rd = ror1 & 0x0ff00ff; +} + +:uxth^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=7 & c0407=7 & Rd & c1619=15 & ror1 +{ + build COND; + build ror1; + Rd = ror1 & 0x0ffff; +} + +@endif # VERSION_6 + +# :v* Advanced SIMD and VFP instructions - see ARMneon.sinc + +@if defined(VERSION_6K) + +:wfe^COND is $(AMODE) & COND & c0027=0x320f002 +{ + build COND; + WaitForEvent(); +} + +:wfi^COND is $(AMODE) & COND & c0027=0x320f003 +{ + build COND; + WaitForInterrupt(); +} + +:yield^COND is $(AMODE) & COND & c0027=0x320f001 +{ + build COND; + HintYield(); +} + +@endif # VERSION_6K + +## Some special pseudo ops for better distinguishing +## indirect calls, and returns + +#:callx rm is $(AMODE) & pref=0xe1a0e00f; cond=14 & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm +#{ +# lr = inst_next + 8; +# TB=(rm&0x00000001)!=0; +# tmp=rm&0xfffffffe; +# call [tmp]; +# TB=0; +#} # Optional change to THUMB + +#:call^COND^SBIT_CZNO shift1 is $(AMODE) & pref=0xe1a0e00f; COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift1 +#{ +# lr = inst_next + 8; +# build COND; +# build shift1; +# pc = shift1; +# resultflags(pc); +# logicflags(); +# build SBIT_CZNO; +# call [pc]; +#} + +#:call^COND^SBIT_CZNO shift2 is $(AMODE) & pref=0xe1a0e00f; COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift2 +#{ +# lr = inst_next + 8; +# build COND; +# build shift2; +# pc = shift2; +# resultflags(pc); +# logicflags(); +# build SBIT_CZNO; +# call [pc]; +#} + +#:call^COND^SBIT_CZNO shift3 is $(AMODE) & pref=0xe1a0e00f; COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift3 +#{ +# lr = inst_next + 8; +# build COND; +# build shift3; +# pc = shift3; +# resultflags(pc); +# logicflags(); +# build SBIT_CZNO; +# call [pc]; +#} + + +@if defined(VERSION_6T2) || defined(VERSION_7) +} # End with : ARMcondCk=1 +@endif diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARMneon.dwarf b/src/third-party/sleigh/processors/ARM/data/languages/ARMneon.dwarf new file mode 100644 index 00000000..fa2ff762 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARMneon.dwarf @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARMneon.sinc b/src/third-party/sleigh/processors/ARM/data/languages/ARMneon.sinc new file mode 100644 index 00000000..7f834993 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARMneon.sinc @@ -0,0 +1,4717 @@ +# Advanced SIMD support / NEON + +# WARNING NOTE: Be very careful taking a subpiece or truncating a register with :# or (#) +# The LEBE hybrid language causes endian issues if you do not assign the register to a temp +# variable and then take a subpiece or truncate. +# + +@define FPSCR_RMODE "fpscr[22,2]" + +@define TMODE_E "TMode=1 & thv_c2831=14" # check for neon instructions in thumb mode +@define TMODE_F "TMode=1 & thv_c2831=15" +@define TMODE_EorF "TMode=1 & thv_c2931=7" + +# The RM field is bits 22 and 23 of FPSCR +@define FPSCR_RMODE "fpscr[21,2]" + +zero: "#0" is c0000 { export 0:8; } + +@if defined(SIMD) + +attach variables [ thv_Rm ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; + +attach variables [ Qn0 Qd0 Qm0 thv_Qn0 thv_Qd0 thv_Qm0 ] [ q0 _ q1 _ q2 _ q3 _ q4 _ q5 _ q6 _ q7 _ ]; +attach variables [ Qn1 Qd1 Qm1 thv_Qn1 thv_Qd1 thv_Qm1 ] [ q8 _ q9 _ q10 _ q11 _ q12 _ q13 _ q14 _ q15 _ ]; + +Qd: Qd0 is TMode=0 & Qd0 & D22=0 { export Qd0; } +Qd: Qd1 is TMode=0 & Qd1 & D22=1 { export Qd1; } +Qd: thv_Qd0 is TMode=1 & thv_Qd0 & thv_D22=0 { export thv_Qd0; } +Qd: thv_Qd1 is TMode=1 & thv_Qd1 & thv_D22=1 { export thv_Qd1; } + +Qn: Qn0 is TMode=0 & Qn0 & N7=0 { export Qn0; } +Qn: Qn1 is TMode=0 & Qn1 & N7=1 { export Qn1; } +Qn: thv_Qn0 is TMode=1 & thv_Qn0 & thv_N7=0 { export thv_Qn0; } +Qn: thv_Qn1 is TMode=1 & thv_Qn1 & thv_N7=1 { export thv_Qn1; } + +Qm: Qm0 is TMode=0 & Qm0 & M5=0 { export Qm0; } +Qm: Qm1 is TMode=0 & Qm1 & M5=1 { export Qm1; } +Qm: thv_Qm0 is TMode=1 & thv_Qm0 & thv_M5=0 { export thv_Qm0; } +Qm: thv_Qm1 is TMode=1 & thv_Qm1 & thv_M5=1 { export thv_Qm1; } + +@endif # SIMD + +@if defined(SIMD) || defined(VFPv3) || defined(VFPv2) + +attach variables [ Dm_3 thv_Dm_3 ] [ d0 d1 d2 d3 d4 d5 d6 d7 ]; + +attach variables [ Dn0 Dd0 Dm0 Dm_4 thv_Dn0 thv_Dd0 thv_Dm0 thv_Dm_4 ] [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 ]; +attach variables [ thv_Dd_1 ] [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 ]; +attach variables [ thv_Dd_2 ] [ d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ ]; +attach variables [ thv_Dd_3 ] [ d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ ]; +attach variables [ thv_Dd_4 ] [ d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ ]; +attach variables [ thv_Dd_5 ] [ d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ _ ]; +attach variables [ thv_Dd_6 ] [ d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ _ _ ]; +attach variables [ thv_Dd_7 ] [ d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ _ _ _ ]; +attach variables [ thv_Dd_8 ] [ d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ _ _ _ _ ]; +attach variables [ thv_Dd_9 ] [ d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ _ _ _ _ _ ]; +attach variables [ thv_Dd_10 ] [ d9 d10 d11 d12 d13 d14 d15 _ _ _ _ _ _ _ _ _ ]; +attach variables [ thv_Dd_11 ] [ d10 d11 d12 d13 d14 d15 _ _ _ _ _ _ _ _ _ _ ]; +attach variables [ thv_Dd_12 ] [ d11 d12 d13 d14 d15 _ _ _ _ _ _ _ _ _ _ _ ]; +attach variables [ thv_Dd_13 ] [ d12 d13 d14 d15 _ _ _ _ _ _ _ _ _ _ _ _ ]; +attach variables [ thv_Dd_14 ] [ d13 d14 d15 _ _ _ _ _ _ _ _ _ _ _ _ _ ]; +attach variables [ thv_Dd_15 ] [ d14 d15 _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; +attach variables [ thv_Dd_16 ] [ d15 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; + +Dd: Dd0 is TMode=0 & Dd0 & D22=0 { export Dd0; } +Dn: Dn0 is TMode=0 & Dn0 & N7=0 { export Dn0; } +Dm: Dm0 is TMode=0 & Dm0 & M5=0 { export Dm0; } +Dd: thv_Dd0 is TMode=1 & thv_Dd0 & thv_D22=0 { export thv_Dd0; } +Dn: thv_Dn0 is TMode=1 & thv_Dn0 & thv_N7=0 { export thv_Dn0; } +Dm: thv_Dm0 is TMode=1 & thv_Dm0 & thv_M5=0 { export thv_Dm0; } + +Dd2: Dd is Dd { export Dd; } + +@endif # SIMD || VFPv3 || VFPv2 + +@if defined(SIMD) || defined(VFPv3) + +attach variables [ Dn1 Dd1 Dm1 thv_Dn1 thv_Dd1 thv_Dm1 ] [ d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 ]; + +Dd: Dd1 is TMode=0 & Dd1 & D22=1 { export Dd1; } +Dn: Dn1 is TMode=0 & Dn1 & N7=1 { export Dn1; } +Dm: Dm1 is TMode=0 & Dm1 & M5=1 { export Dm1; } +Dd: thv_Dd1 is TMode=1 & thv_Dd1 & thv_D22=1 { export thv_Dd1; } +Dn: thv_Dn1 is TMode=1 & thv_Dn1 & thv_N7=1 { export thv_Dn1; } +Dm: thv_Dm1 is TMode=1 & thv_Dm1 & thv_M5=1 { export thv_Dm1; } + +@endif # SIMD || VFPv3 + +@if defined(VFPv2) || defined(VFPv3) + +attach variables [ Sn0 Sd0 Sm0 thv_Sn0 thv_Sd0 thv_Sm0 ] [ s0 s2 s4 s6 s8 s10 s12 s14 s16 s18 s20 s22 s24 s26 s28 s30 ]; +attach variables [ Sn1 Sd1 Sm1 thv_Sn1 thv_Sd1 thv_Sm1 ] [ s1 s3 s5 s7 s9 s11 s13 s15 s17 s19 s21 s23 s25 s27 s29 s31 ]; + +attach variables [ Sm0next thv_Sm0next ] [ s1 s3 s5 s7 s9 s11 s13 s15 s17 s19 s21 s23 s25 s27 s29 s31 ]; +attach variables [ Sm1next thv_Sm1next ] [ s2 s4 s6 s8 s10 s12 s14 s16 s18 s20 s22 s24 s26 s28 s30 _ ]; + +# We need to create separate constructors for each register rather than attaching +# directly to a context variable +@if defined (VFPv2) || defined(SIMD) +Sreg: s0 is s0 & regNum=0 { export s0; } +Sreg: s1 is s1 & regNum=1 { export s1; } +Sreg: s2 is s2 & regNum=2 { export s2; } +Sreg: s3 is s3 & regNum=3 { export s3; } +Sreg: s4 is s4 & regNum=4 { export s4; } +Sreg: s5 is s5 & regNum=5 { export s5; } +Sreg: s6 is s6 & regNum=6 { export s6; } +Sreg: s7 is s7 & regNum=7 { export s7; } +Sreg: s8 is s8 & regNum=8 { export s8; } +Sreg: s9 is s9 & regNum=9 { export s9; } +Sreg: s10 is s10 & regNum=10 { export s10; } +Sreg: s11 is s11 & regNum=11 { export s11; } +Sreg: s12 is s12 & regNum=12 { export s12; } +Sreg: s13 is s13 & regNum=13 { export s13; } +Sreg: s14 is s14 & regNum=14 { export s14; } +Sreg: s15 is s15 & regNum=15 { export s15; } +Sreg: s16 is s16 & regNum=16 { export s16; } +Sreg: s17 is s17 & regNum=17 { export s17; } +Sreg: s18 is s18 & regNum=18 { export s18; } +Sreg: s19 is s19 & regNum=19 { export s19; } +Sreg: s20 is s20 & regNum=20 { export s20; } +Sreg: s21 is s21 & regNum=21 { export s21; } +Sreg: s22 is s22 & regNum=22 { export s22; } +Sreg: s23 is s23 & regNum=23 { export s23; } +Sreg: s24 is s24 & regNum=24 { export s24; } +Sreg: s25 is s25 & regNum=25 { export s25; } +Sreg: s26 is s26 & regNum=26 { export s26; } +Sreg: s27 is s27 & regNum=27 { export s27; } +Sreg: s28 is s28 & regNum=28 { export s28; } +Sreg: s29 is s29 & regNum=29 { export s29; } +Sreg: s30 is s30 & regNum=30 { export s30; } +Sreg: s31 is s31 & regNum=31 { export s31; } + +Sreg2: s0 is s0 & reg2Num=0 { export s0; } +Sreg2: s1 is s1 & reg2Num=1 { export s1; } +Sreg2: s2 is s2 & reg2Num=2 { export s2; } +Sreg2: s3 is s3 & reg2Num=3 { export s3; } +Sreg2: s4 is s4 & reg2Num=4 { export s4; } +Sreg2: s5 is s5 & reg2Num=5 { export s5; } +Sreg2: s6 is s6 & reg2Num=6 { export s6; } +Sreg2: s7 is s7 & reg2Num=7 { export s7; } +Sreg2: s8 is s8 & reg2Num=8 { export s8; } +Sreg2: s9 is s9 & reg2Num=9 { export s9; } +Sreg2: s10 is s10 & reg2Num=10 { export s10; } +Sreg2: s11 is s11 & reg2Num=11 { export s11; } +Sreg2: s12 is s12 & reg2Num=12 { export s12; } +Sreg2: s13 is s13 & reg2Num=13 { export s13; } +Sreg2: s14 is s14 & reg2Num=14 { export s14; } +Sreg2: s15 is s15 & reg2Num=15 { export s15; } +Sreg2: s16 is s16 & reg2Num=16 { export s16; } +Sreg2: s17 is s17 & reg2Num=17 { export s17; } +Sreg2: s18 is s18 & reg2Num=18 { export s18; } +Sreg2: s19 is s19 & reg2Num=19 { export s19; } +Sreg2: s20 is s20 & reg2Num=20 { export s20; } +Sreg2: s21 is s21 & reg2Num=21 { export s21; } +Sreg2: s22 is s22 & reg2Num=22 { export s22; } +Sreg2: s23 is s23 & reg2Num=23 { export s23; } +Sreg2: s24 is s24 & reg2Num=24 { export s24; } +Sreg2: s25 is s25 & reg2Num=25 { export s25; } +Sreg2: s26 is s26 & reg2Num=26 { export s26; } +Sreg2: s27 is s27 & reg2Num=27 { export s27; } +Sreg2: s28 is s28 & reg2Num=28 { export s28; } +Sreg2: s29 is s29 & reg2Num=29 { export s29; } +Sreg2: s30 is s30 & reg2Num=30 { export s30; } +Sreg2: s31 is s31 & reg2Num=31 { export s31; } + +Dreg: d0 is d0 & regNum=0 { export d0; } +Dreg: d1 is d1 & regNum=1 { export d1; } +Dreg: d2 is d2 & regNum=2 { export d2; } +Dreg: d3 is d3 & regNum=3 { export d3; } +Dreg: d4 is d4 & regNum=4 { export d4; } +Dreg: d5 is d5 & regNum=5 { export d5; } +Dreg: d6 is d6 & regNum=6 { export d6; } +Dreg: d7 is d7 & regNum=7 { export d7; } +Dreg: d8 is d8 & regNum=8 { export d8; } +Dreg: d9 is d9 & regNum=9 { export d9; } +Dreg: d10 is d10 & regNum=10 { export d10; } +Dreg: d11 is d11 & regNum=11 { export d11; } +Dreg: d12 is d12 & regNum=12 { export d12; } +Dreg: d13 is d13 & regNum=13 { export d13; } +Dreg: d14 is d14 & regNum=14 { export d14; } +Dreg: d15 is d15 & regNum=15 { export d15; } +Dreg2: d0 is d0 & reg2Num=0 { export d0; } +Dreg2: d1 is d1 & reg2Num=1 { export d1; } +Dreg2: d2 is d2 & reg2Num=2 { export d2; } +Dreg2: d3 is d3 & reg2Num=3 { export d3; } +Dreg2: d4 is d4 & reg2Num=4 { export d4; } +Dreg2: d5 is d5 & reg2Num=5 { export d5; } +Dreg2: d6 is d6 & reg2Num=6 { export d6; } +Dreg2: d7 is d7 & reg2Num=7 { export d7; } +Dreg2: d8 is d8 & reg2Num=8 { export d8; } +Dreg2: d9 is d9 & reg2Num=9 { export d9; } +Dreg2: d10 is d10 & reg2Num=10 { export d10; } +Dreg2: d11 is d11 & reg2Num=11 { export d11; } +Dreg2: d12 is d12 & reg2Num=12 { export d12; } +Dreg2: d13 is d13 & reg2Num=13 { export d13; } +Dreg2: d14 is d14 & reg2Num=14 { export d14; } +Dreg2: d15 is d15 & reg2Num=15 { export d15; } +@if defined(SIMD) || defined(VFPv3) +Dreg: d16 is d16 & regNum=16 { export d16; } +Dreg: d17 is d17 & regNum=17 { export d17; } +Dreg: d18 is d18 & regNum=18 { export d18; } +Dreg: d19 is d19 & regNum=19 { export d19; } +Dreg: d20 is d20 & regNum=20 { export d20; } +Dreg: d21 is d21 & regNum=21 { export d21; } +Dreg: d22 is d22 & regNum=22 { export d22; } +Dreg: d23 is d23 & regNum=23 { export d23; } +Dreg: d24 is d24 & regNum=24 { export d24; } +Dreg: d25 is d25 & regNum=25 { export d25; } +Dreg: d26 is d26 & regNum=26 { export d26; } +Dreg: d27 is d27 & regNum=27 { export d27; } +Dreg: d28 is d28 & regNum=28 { export d28; } +Dreg: d29 is d29 & regNum=29 { export d29; } +Dreg: d30 is d30 & regNum=30 { export d30; } +Dreg: d31 is d31 & regNum=31 { export d31; } +Dreg2: d16 is d16 & reg2Num=16 { export d16; } +Dreg2: d17 is d17 & reg2Num=17 { export d17; } +Dreg2: d18 is d18 & reg2Num=18 { export d18; } +Dreg2: d19 is d19 & reg2Num=19 { export d19; } +Dreg2: d20 is d20 & reg2Num=20 { export d20; } +Dreg2: d21 is d21 & reg2Num=21 { export d21; } +Dreg2: d22 is d22 & reg2Num=22 { export d22; } +Dreg2: d23 is d23 & reg2Num=23 { export d23; } +Dreg2: d24 is d24 & reg2Num=24 { export d24; } +Dreg2: d25 is d25 & reg2Num=25 { export d25; } +Dreg2: d26 is d26 & reg2Num=26 { export d26; } +Dreg2: d27 is d27 & reg2Num=27 { export d27; } +Dreg2: d28 is d28 & reg2Num=28 { export d28; } +Dreg2: d29 is d29 & reg2Num=29 { export d29; } +Dreg2: d30 is d30 & reg2Num=30 { export d30; } +Dreg2: d31 is d31 & reg2Num=31 { export d31; } +@else +# this is just a placeholder so the parse patterns will match correctly. +# regNum is 31 when the base pattern matches, and incremented when +# this constructor actually matches +Dreg: d0 is d0 & regNum=31 { export d0; } +Dreg2: d0 is d0 & reg2Num=31 { export d0; } +@endif +@endif + +VRm: Rm is TMode=0 & Rm { export Rm; } +VRm: thv_Rm is TMode=1 & thv_Rm { export thv_Rm; } + +VRn: Rn is TMode=0 & Rn { export Rn; } +VRn: thv_Rn is TMode=1 & thv_Rn { export thv_Rn; } + +VRd: Rd is TMode=0 & Rd { export Rd; } +VRd: thv_Rd is TMode=1 & thv_Rd { export thv_Rd; } + +Sd: Sd0 is TMode=0 & Sd0 & D22=0 { export Sd0; } +Sd: Sd1 is TMode=0 & Sd1 & D22=1 { export Sd1; } +Sd: thv_Sd0 is TMode=1 & thv_Sd0 & thv_D22=0 { export thv_Sd0; } +Sd: thv_Sd1 is TMode=1 & thv_Sd1 & thv_D22=1 { export thv_Sd1; } + +Sn: Sn0 is TMode=0 & Sn0 & N7=0 { export Sn0; } +Sn: Sn1 is TMode=0 & Sn1 & N7=1 { export Sn1; } +Sn: thv_Sn0 is TMode=1 & thv_Sn0 & thv_N7=0 { export thv_Sn0; } +Sn: thv_Sn1 is TMode=1 & thv_Sn1 & thv_N7=1 { export thv_Sn1; } + +Sm: Sm0 is TMode=0 & Sm0 & M5=0 { export Sm0; } +Sm: Sm1 is TMode=0 & Sm1 & M5=1 { export Sm1; } +Sm: thv_Sm0 is TMode=1 & thv_Sm0 & thv_M5=0 { export thv_Sm0; } +Sm: thv_Sm1 is TMode=1 & thv_Sm1 & thv_M5=1 { export thv_Sm1; } + +SmNext: Sm0next is TMode=0 & Sm0next & M5=0 { export Sm0next; } +SmNext: Sm1next is TMode=0 & Sm1next & M5=1 { export Sm1next; } +SmNext: thv_Sm0next is TMode=1 & thv_Sm0next & thv_M5=0 { export thv_Sm0next; } +SmNext: thv_Sm1next is TMode=1 & thv_Sm1next & thv_M5=1 { export thv_Sm1next; } + +Sd2: Sd is Sd { export Sd; } + +@endif # VFPv2 || VFPv3 + +udt: "s" is TMode=0 & c2424=0 { export 0:1; } +udt: "u" is TMode=0 & c2424=1 { export 1:1; } +udt: "s" is TMode=1 & thv_c2828=0 { export 0:1; } +udt: "u" is TMode=1 & thv_c2828=1 { export 1:1; } + +udt7: "s" is TMode=0 & c0707=0 { export 0:1; } +udt7: "u" is TMode=0 & c0707=1 { export 1:1; } +udt7: "s" is TMode=1 & thv_c0707=0 { export 0:1; } +udt7: "u" is TMode=1 & thv_c0707=1 { export 1:1; } + +fdt: "u" is TMode=0 & c0808=0 { export 0:1; } +fdt: "f" is TMode=0 & c0808=1 { export 1:1; } +fdt: "u" is TMode=1 & thv_c0808=0 { export 0:1; } +fdt: "f" is TMode=1 & thv_c0808=1 { export 1:1; } + +esize2021: "8" is TMode=0 & c2021=0 { export 1:4; } +esize2021: "16" is TMode=0 & c2021=1 { export 2:4; } +esize2021: "32" is TMode=0 & c2021=2 { export 4:4; } +esize2021: "64" is TMode=0 & c2021=3 { export 8:4; } +esize2021: "8" is TMode=1 & thv_c2021=0 { export 1:4; } +esize2021: "16" is TMode=1 & thv_c2021=1 { export 2:4; } +esize2021: "32" is TMode=1 & thv_c2021=2 { export 4:4; } +esize2021: "64" is TMode=1 & thv_c2021=3 { export 8:4; } + +esize2021x2: "16" is TMode=0 & c2021=0 { export 2:4; } +esize2021x2: "32" is TMode=0 & c2021=1 { export 4:4; } +esize2021x2: "64" is TMode=0 & c2021=2 { export 8:4; } +esize2021x2: "16" is TMode=1 & thv_c2021=0 { export 2:4; } +esize2021x2: "32" is TMode=1 & thv_c2021=1 { export 4:4; } +esize2021x2: "64" is TMode=1 & thv_c2021=2 { export 8:4; } + +esize1819: "8" is TMode=0 & c1819=0 { export 1:4; } +esize1819: "16" is TMode=0 & c1819=1 { export 2:4; } +esize1819: "32" is TMode=0 & c1819=2 { export 4:4; } +esize1819: "64" is TMode=0 & c1819=3 { export 8:4; } +esize1819: "8" is TMode=1 & thv_c1819=0 { export 1:4; } +esize1819: "16" is TMode=1 & thv_c1819=1 { export 2:4; } +esize1819: "32" is TMode=1 & thv_c1819=2 { export 4:4; } +esize1819: "64" is TMode=1 & thv_c1819=3 { export 8:4; } + +esize1819x2: "16" is TMode=0 & c1819=0 { export 2:4; } +esize1819x2: "32" is TMode=0 & c1819=1 { export 4:4; } +esize1819x2: "64" is TMode=0 & c1819=2 { export 8:4; } +esize1819x2: "16" is TMode=1 & thv_c1819=0 { export 2:4; } +esize1819x2: "32" is TMode=1 & thv_c1819=1 { export 4:4; } +esize1819x2: "64" is TMode=1 & thv_c1819=2 { export 8:4; } + +esize1819x3: "8" is TMode=0 & c1819=0 { export 1:4; } +esize1819x3: "16" is TMode=0 & c1819=1 { export 2:4; } +esize1819x3: "32" is TMode=0 & c1819=2 { export 4:4; } +esize1819x3: "8" is TMode=1 & thv_c1819=0 { export 1:4; } +esize1819x3: "16" is TMode=1 & thv_c1819=1 { export 2:4; } +esize1819x3: "32" is TMode=1 & thv_c1819=2 { export 4:4; } + +esize1011: "8" is TMode=0 & c1011=0 { export 1:4; } +esize1011: "16" is TMode=0 & c1011=1 { export 2:4; } +esize1011: "32" is TMode=0 & c1011=2 { export 4:4; } +esize1011: "64" is TMode=0 & c1011=3 { export 8:4; } +esize1011: "8" is TMode=1 & thv_c1011=0 { export 1:4; } +esize1011: "16" is TMode=1 & thv_c1011=1 { export 2:4; } +esize1011: "32" is TMode=1 & thv_c1011=2 { export 4:4; } +esize1011: "64" is TMode=1 & thv_c1011=3 { export 8:4; } + +esize0607: "8" is TMode=0 & c0607=0 { export 1:4; } +esize0607: "16" is TMode=0 & c0607=1 { export 2:4; } +esize0607: "32" is TMode=0 & c0607=2 { export 4:4; } +esize0607: "64" is TMode=0 & c0607=3 { export 8:4; } # see VLD4 (single 4-element structure to all lanes) +esize0607: "8" is TMode=1 & thv_c0607=0 { export 1:4; } +esize0607: "16" is TMode=1 & thv_c0607=1 { export 2:4; } +esize0607: "32" is TMode=1 & thv_c0607=2 { export 4:4; } +esize0607: "64" is TMode=1 & thv_c0607=3 { export 8:4; } # see VLD4 (single 4-element structure to all lanes) + +fesize2021: "16" is TMode=0 & c2020=1 { export 4:4; } +fesize2021: "32" is TMode=0 & c2020=0 { export 2:4; } +fesize2021: "16" is TMode=1 & thv_c2020=1 { export 4:4; } +fesize2021: "32" is TMode=1 & thv_c2020=0 { export 2:4; } + +define pcodeop VFPExpandImmediate; + + + +# float +vfpExpImm_4: imm is TMode=0 & c1919 & c1818 & c1617 & c0003 [ imm = (c1919 << 31) | ((c1818 $xor 1) << 30) | ((c1818 * 0x1f) << 25) | (c1617 << 23) | (c0003 << 19); ] { + export *[const]:4 imm; +} + +# float +vfpExpImm_4: imm is TMode=1 & thv_c1919 & thv_c1818 & thv_c1617 & thv_c0003 [ imm = (thv_c1919 << 31) | ((thv_c1818 $xor 1) << 30) | ((thv_c1818 * 0x1f) << 25) | (thv_c1617 << 23) | (thv_c0003 << 19); ] { + export *[const]:4 imm; +} + + +# double +vfpExpImm_8: imm is TMode=0 & c1919 & c1818 & c1617 & c0003 [ imm = (c1919 << 63) | ((c1818 $xor 1) << 62) | ((c1818 * 0xff) << 54) | (c1617 << 52) | (c0003 << 48); ] { + export *[const]:8 imm; +} + +# double +vfpExpImm_8: imm is TMode=1 & thv_c1919 & thv_c1818 & thv_c1617 & thv_c0003 [ imm = (thv_c1919 << 63) | ((thv_c1818 $xor 1) << 62) | ((thv_c1818 * 0xff) << 54) | (thv_c1617 << 52) | (thv_c0003 << 48); ] { + export *[const]:8 imm; +} + +define pcodeop SIMDExpandImmediate; + +simdExpImm_8: "#0" is TMode=0 & c2424=0 & c1618=0 & c0003=0 { + export 0:8; +} +simdExpImm_8: "simdExpand("^c0505^","^cmode^","^val^")" is TMode=0 & c2424 & c1618 & c0505 & c0003 & cmode [ val = (c2424 << 7) | (c1618 << 4) | c0003; ] { + imm64:8 = SIMDExpandImmediate(c0505:1, cmode:1, val:1); + export imm64; +} +simdExpImm_8: "#0" is TMode=1 & thv_c2828=0 & thv_c1618=0 & thv_c0003=0 { + export 0:8; +} +simdExpImm_8: "simdExpand("^thv_c0505^","^thv_cmode^","^val^")" is TMode=1 & thv_c2828 & thv_c1618 & thv_c0505 & thv_c0003 & thv_cmode [ val = (thv_c2828 << 7) | (thv_c1618 << 4) | thv_c0003; ] { + imm64:8 = SIMDExpandImmediate(thv_c0505:1, thv_cmode:1, val:1); + export imm64; +} + +simdExpImm_16: "#0" is TMode=0 & c2424=0 & c1618=0 & c0003=0 { + tmp:8 = 0; + tmp1:16 = zext(tmp); + export tmp1; +} +simdExpImm_16: "simdExpand("^c0505^","^cmode^","^val^")" is TMode=0 & c2424 & c1618 & c0505 & c0003 & cmode [ val = (c2424 << 7) | (c1618 << 4) | c0003; ] { + imm128:16 = SIMDExpandImmediate(c0505:1, cmode:1, val:1); + export imm128; +} +simdExpImm_16: "#0" is TMode=1 & thv_c2828=0 & thv_c1618=0 & thv_c0003=0 { + tmp:8 = 0; + tmp1:16 = zext(tmp); + export tmp1; +} +simdExpImm_16: "simdExpand("^thv_c0505^","^thv_cmode^","^val^")" is TMode=1 & thv_c2828 & thv_c1618 & thv_c0505 & thv_c0003 & thv_cmode [ val = (thv_c2828 << 7) | (thv_c1618 << 4) | thv_c0003; ] { + imm128:16 = SIMDExpandImmediate(thv_c0505:1, thv_cmode:1, val:1); + export imm128; +} + +simdExpImmDT: "i32" is TMode=0 & c0911=0 { } +simdExpImmDT: "i32" is TMode=0 & c0911=1 { } +simdExpImmDT: "i32" is TMode=0 & c0911=2 { } +simdExpImmDT: "i32" is TMode=0 & c0911=3 { } +simdExpImmDT: "i16" is TMode=0 & c0911=4 { } +simdExpImmDT: "i16" is TMode=0 & c0911=5 { } +simdExpImmDT: "i32" is TMode=0 & c0811=12 { } +simdExpImmDT: "i32" is TMode=0 & c0811=13 { } +simdExpImmDT: "i8" is TMode=0 & c0811=14 & c0505=0 { } +simdExpImmDT: "i64" is TMode=0 & c0811=14 & c0505=1 { } +simdExpImmDT: "f32" is TMode=0 & c0811=15 & c0505=0 { } + +simdExpImmDT: "i32" is TMode=1 & thv_c0911=0 { } +simdExpImmDT: "i32" is TMode=1 & thv_c0911=1 { } +simdExpImmDT: "i32" is TMode=1 & thv_c0911=2 { } +simdExpImmDT: "i32" is TMode=1 & thv_c0911=3 { } +simdExpImmDT: "i16" is TMode=1 & thv_c0911=4 { } +simdExpImmDT: "i16" is TMode=1 & thv_c0911=5 { } +simdExpImmDT: "i32" is TMode=1 & thv_c0811=12 { } +simdExpImmDT: "i32" is TMode=1 & thv_c0811=13 { } +simdExpImmDT: "i8" is TMode=1 & thv_c0811=14 & thv_c0505=0 { } +simdExpImmDT: "i64" is TMode=1 & thv_c0811=14 & thv_c0505=1 { } +simdExpImmDT: "f32" is TMode=1 & thv_c0811=15 & thv_c0505=0 { } + +macro replicate1to8(bytes, dest) { + local val:8 = zext(bytes); + val = val | (val << 8); + val = val | (val << 16); + dest = val | (val << 32); +} + +macro replicate2to8(bytes, dest) { + local val:8 = zext(bytes); + val = val | (val << 16); + dest = val | (val << 32); +} + +macro replicate4to8(bytes, dest) { + local val:8 = zext(bytes); + dest = val | (val << 32); +} + +define pcodeop VectorAbsoluteDifferenceAndAccumulate; +define pcodeop VectorAbsoluteDifference; +define pcodeop FloatVectorAbsoluteDifference; +define pcodeop VectorAbsolute; +define pcodeop FloatVectorAbsolute; + +@if defined(SIMD) + +# TODO: watch out for c2021=3 + +:vaba.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_c0606=0 & thv_c0404=1 ) ) & Dm & Dn & Dd & udt & esize2021 +{ + Dd = VectorAbsoluteDifferenceAndAccumulate(Dn,Dm,esize2021,udt); +} + +:vaba.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_c0606=1 & thv_c0404=1 ) ) & Qd & Qn & Qm & udt & esize2021 +{ + Qd = VectorAbsoluteDifferenceAndAccumulate(Qn,Qm,esize2021,udt); +} + +:vabal.^udt^esize2021 Qd,Dn,Dm is (($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=5 & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=5 & thv_c0606=0 & thv_c0404=0 ) ) & Qd & Dm & Dn & udt & esize2021 +{ + Qd = VectorAbsoluteDifferenceAndAccumulate(Dn,Dm,esize2021,udt); +} + +:vabd.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_c0606=0 & thv_c0404=0 ) ) & Dm & Dn & Dd & udt & esize2021 +{ + Dd = VectorAbsoluteDifference(Dn,Dm,esize2021,udt); +} + +:vabd.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_Q6=1 & thv_c0404=0 ) ) & Qd & Qn & Qm & udt & esize2021 +{ + Qd = VectorAbsoluteDifference(Qn,Qm,esize2021,udt); +} + +:vabdl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=7 & Q6=0 & c0404=0 ) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=7 & thv_c0606=0 & thv_c0404=0 ) ) & Dm & Dn & Qd & udt & esize2021 +{ + Qd = VectorAbsoluteDifference(Dn,Dm,esize2021,udt); +} + +:vabd.f32 Dd,Dn,Dm is ( ( $(AMODE) & cond=15 & c2327=6 & c2021=2 & c0811=13 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=13 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & Dn +{ + Dd = FloatVectorAbsoluteDifference(Dn,Dm,2:1,32:1); +} + +:vabd.f32 Qd,Qn,Qm is ( ( $(AMODE) & cond=15 & c2327=6 & c2021=2 & c0811=13 & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=13 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & Qn +{ + Qd = FloatVectorAbsoluteDifference(Qn,Qm,2:1,32:1); +} + +:vabs.s^esize1819 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=6 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=6 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 +{ + Dd = VectorAbsolute(Dm,esize1819); +} + +:vabs.s^esize1819 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=6 & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=6 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 +{ + Qd = VectorAbsolute(Qm,esize1819); +} + + +:vabs.f32 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819=2 & c1617=1 & c0711=0xe & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819=2 & thv_c1617=1 & thv_c0711=0xe & thv_c0606=0 & thv_c0404=0 ) ) & Dm & Dd +{ + Dd = FloatVectorAbsolute(Dm,2:1,32:1); +} + +:vabs.f32 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819=2 & c1617=1 & c0711=0xe & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819=2 & thv_c1617=1 & thv_c0711=0xe & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm +{ + Qd = FloatVectorAbsolute(Qm,2:1,32:1); +} + +@endif # SIMD + +@if defined(VFPv2) || defined(VFPv3) + +:vabs^COND^".f32" Sd,Sm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1621=0x30 & c0611=0x2b & c0404=0 ) | + ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x30 & thv_c0611=0x2b & thv_c0404=0 ) ) & Sm & Sd +{ + build COND; + build Sd; + build Sm; + Sd = abs(Sm); +} + +:vabs^COND^".f64" Dd,Dm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1621=0x30 & c0611=0x2f & c0404=0 ) | + ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x30 & thv_c0611=0x2f & thv_c0404=0 ) ) & Dd & Dm +{ + build COND; + build Dd; + build Dm; + Dd = abs(Dm); +} + +@endif # VFPv2 || VFPv3 + +define pcodeop FloatCompareGE; +define pcodeop FloatCompareGT; +define pcodeop VectorAdd; +define pcodeop VectorSub; +define pcodeop FloatVectorAdd; +define pcodeop VectorPairwiseAdd; +define pcodeop VectorPairwiseMin; +define pcodeop VectorPairwiseMax; +define pcodeop FloatVectorPairwiseAdd; +define pcodeop FloatVectorPairwiseMin; +define pcodeop FloatVectorPairwiseMax; +define pcodeop VectorPairwiseAddLong; +define pcodeop VectorPairwiseAddAccumulateLong; + +@if defined(SIMD) + +:vacge.f32 Dd,Dn,Dm is $(AMODE) & cond=15 & c2327=6 & c2021=0 & Dn & Dd & c0811=14 & Q6=0 & c0404=1 & Dm +{ + Dd = FloatCompareGE(Dn,Dm,2:1,32:1); +} + +:vacge.f32 Qd,Qn,Qm is $(AMODE) & cond=15 & c2327=6 & c2021=0 & Qn & Qd & c0811=14 & Q6=1 & c0404=1 & Qm +{ + Qd = FloatCompareGE(Qn,Qm,2:1,32:1); +} + +:vacgt.f32 Dd,Dn,Dm is $(AMODE) & cond=15 & c2327=6 & c2021=2 & Dn & Dd & c0811=14 & Q6=0 & c0404=1 & Dm +{ + Dd = FloatCompareGT(Dn,Dm,2:1,32:1); +} + +:vacgt.f32 Qd,Qn,Qm is $(AMODE) & cond=15 & c2327=6 & c2021=2 & Qn & Qd & c0811=14 & Q6=1 & c0404=1 & Qm +{ + Qd = FloatCompareGT(Qn,Qm,2:1,32:1); +} + +:vadd.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=8 & Q6=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dn & Dd & Dm +{ + Dd = VectorAdd(Dn,Dm,esize2021); +} + +:vadd.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=8 & Q6=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_Q6=1 & thv_c0404=0) ) & esize2021 & Qm & Qn & Qd +{ + Qd = VectorAdd(Qn,Qm,esize2021); +} + +:vadd.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=13 & Q6=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & Dm& Dn & Dd +{ + Dd = FloatVectorAdd(Dn,Dm,2:1,32:1); +} + +:vadd.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=13 & Q6=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=13 & thv_Q6=1 & thv_c0404=0) ) & Qn & Qd & Qm +{ + Qd = FloatVectorAdd(Qn,Qm,2:1,32:1); +} + + +:vpadd.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=11 & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=11 & thv_Q6=0 & thv_c0404=1)) & esize2021 & Dn & Dd & Dm +{ + Dd = VectorPairwiseAdd(Dn,Dm,esize2021); +} + +:vpadd.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021<3 & c0811=11 & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=11 & thv_Q6=1 & thv_c0404=1) ) & esize2021 & Qm & Qn & Qd +{ + Qd = VectorPairwiseAdd(Qn,Qm,esize2021); +} + +:vpadd.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2121=0 & c0811=13 & Q6=0 & c0404=0) | + ($(TMODE_F) &thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & fesize2021 & Dm& Dn & Dd +{ + Dd = FloatVectorPairwiseAdd(Dn,Dm,fesize2021:1); +} + + +:vpmax.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=10 & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=10 & thv_Q6=0 & thv_c0404=0)) & udt & esize2021 & Dn & Dd & Dm +{ + Dd = VectorPairwiseMax(Dn,Dm,esize2021,udt); +} + + +:vpmax.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2121=0 & c0811=15 & Q6=0 & c0404=0) | + ($(TMODE_F) &thv_c2327=0x1e & thv_c2121=0 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0) ) & fesize2021 & Dm & Dn & Dd +{ + Dd = FloatVectorPairwiseMax(Dn,Dm,fesize2021:1); +} + +:vpmin.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=10 & Q6=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=10 & thv_Q6=0 & thv_c0404=1)) & udt & esize2021 & Dn & Dd & Dm +{ + Dd = VectorPairwiseMin(Dn,Dm,esize2021,udt); +} + +:vpmin.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2121=1 & c0811=15 & Q6=0 & c0404=0) | + ($(TMODE_F) &thv_c2327=0x1e & thv_c2121=1 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0) ) & fesize2021 & Dm & Dn & Dd +{ + Dd = FloatVectorPairwiseMin(Dn,Dm,fesize2021:1); +} + +:vpadal.^udt7^esize1819 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=6 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=6 & thv_Q6=0 & thv_c0404=0)) & udt7 & esize1819 & Dd & Dm +{ + Dd = VectorPairwiseAddAccumulateLong(Dm,esize1819); +} + +:vpadal.^udt7^esize1819 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=6 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=6 & thv_Q6=1 & thv_c0404=0)) & udt7 & esize1819 & Qd & Qm +{ + Qd = VectorPairwiseAddAccumulateLong(Qm,esize1819); +} + +:vpaddl.^udt7^esize1819 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=2 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=2 & thv_Q6=0 & thv_c0404=0)) & udt7 & esize1819 & Dd & Dm +{ + Dd = VectorPairwiseAddLong(Dm,esize1819); +} + +:vpaddl.^udt7^esize1819 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=2 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=2 & thv_Q6=1 & thv_c0404=0)) & udt7 & esize1819 & Qd & Qm +{ + Qd = VectorPairwiseAddLong(Qm,esize1819); +} + +@endif # SIMD + +@if defined(VFPv2) || defined(VFPv3) + +:vadd^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1c & c2021=3 & c0811=10 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=3 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0) ) & Sm & Sd & Sn +{ + build COND; + build Sd; + build Sm; + build Sn; + Sd = Sn f+ Sm; +} + +:vadd^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1c & c2021=3 & c0811=11 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=3 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0) ) & Dm & Dd & Dn +{ + build COND; + build Dd; + build Dm; + build Dn; + Dd = Dn f+ Dm; +} + +@endif # VFPv2 || VFPv3 + +define pcodeop VectorAddReturnHigh; +define pcodeop VectorBitwiseInsertIfFalse; +define pcodeop VectorBitwiseInsertIfTrue; +define pcodeop VectorBitwiseSelect; +define pcodeop VectorCompareEqual; +define pcodeop FloatVectorCompareEqual; +define pcodeop VectorCompareGreaterThanOrEqual; +define pcodeop FloatVectorCompareGreaterThanOrEqual; +define pcodeop VectorCompareGreaterThan; +define pcodeop FloatVectorCompareGreaterThan; +define pcodeop VectorCountLeadingSignBits; +define pcodeop VectorCountLeadingZeros; + +@if defined(SIMD) + +:vaddhn.i^esize2021x2 Dd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=5 & c2021<3 & c0811=4 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=4 & thv_c0606=0 & thv_c0404=0) ) & esize2021x2 & Qn & Dd & Qm +{ + Dd = VectorAddReturnHigh(Qn,Qm,esize2021x2); +} + +:vaddl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=0 & c0606=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=0 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Dn & Qd & Dm +{ + Qd = VectorAdd(Dn,Dm,esize2021,udt); +} + +:vaddw.^udt^esize2021 Qd,Qn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=1 & c0606=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=1 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Qn & Qd & Dm +{ + Qd = VectorAdd(Qn,Dm,esize2021,udt); +} + + +:vand Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=1 & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm +{ + Dd = Dn & Dm; +} + +:vand Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=1 & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qn & Qd & Qm +{ + Qd = Qn & Qm; +} + +:vbic.i16 Dd,simdExpImm_8 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011<3 & c0808=1 & c0407=3 ) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011<3 & thv_c0808=1 & thv_c0407=3) ) & Dd & simdExpImm_8 +{ + Dd = Dd & ~simdExpImm_8; +} + +:vbic.i32 Qd,simdExpImm_16 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011<3 & c0808=1 & c0407=7 ) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011<3 & thv_c0808=1 & thv_c0407=7) ) & Qd & simdExpImm_16 +{ + Qd = Qd & ~simdExpImm_16; +} + +:vbic Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=1 & c0811=1 & Q6=0 & c0404=1 ) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1) ) & Dm & Dn & Dd +{ + Dd = Dn & ~Dm; +} + +:vbic Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=1 & c0811=1 & Q6=1 & c0404=1 ) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd +{ + Qd = Qn & ~Qm; +} + +:vbif Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=3 & c0811=1 & Q6=0 & c0404=1 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1) ) & Dm & Dn & Dd +{ + Dd = VectorBitwiseInsertIfFalse(Dd,Dn,Dm); +} + +:vbif Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=3 & c0811=1 & Q6=1 & c0404=1 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qm & Qn & Qd +{ + Qd = VectorBitwiseInsertIfFalse(Qd,Qn,Qm); +} + +:vbit Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=2 & c0811=1 & Q6=0 & c0404=1 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dm & Dn & Dd +{ + Dd = VectorBitwiseInsertIfTrue(Dd,Dn,Dm); +} + +:vbit Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=2 & c0811=1 & Q6=1 & c0404=1 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qm & Qn & Qd +{ + Qd = VectorBitwiseInsertIfTrue(Qd,Qn,Qm); +} + +:vbsl Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=1 & c0811=1 & Q6=0 & c0404=1 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dm & Dn & Dd +{ + Dd = VectorBitwiseSelect(Dd,Dn,Dm); +} + +:vbsl Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=1 & c0811=1 & Q6=1 & c0404=1 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qm & Qn & Qd +{ + Qd = VectorBitwiseSelect(Qd,Qn,Qm); +} + +:vceq.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021<3 & c0811=8 & Q6=0 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=8 & thv_Q6=0 & thv_c0404=1) ) & esize2021 & Dm & Dn & Dd +{ + Dd = VectorCompareEqual(Dn,Dm,esize2021); +} + +:vceq.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021<3 & c0811=8 & Q6=1 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=8 & thv_Q6=1 & thv_c0404=1) ) & esize2021 & Qm & Qn & Qd +{ + Qd = VectorCompareEqual(Qn,Qm,esize2021); +} + +:vceq.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=14 & Q6=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Dd +{ + Dd = FloatVectorCompareEqual(Dn,Dm,2:1,32:1); +} + +:vceq.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=14 & Q6=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & Qm & Qn & Qd +{ + Qd = FloatVectorCompareEqual(Qn,Qm,2:1,32:1); +} + +:vceq.i^esize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=2 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=2 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero +{ + Dd = VectorCompareEqual(Dm,zero,esize1819); +} + +:vceq.i^esize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=2 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=2 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero +{ + Qd = VectorCompareEqual(Qm,zero,esize1819); +} + +:vceq.f32 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819=2 & c1617=1 & c0711=10 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819=2 & thv_c1617=1 & thv_c0711=10 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero +{ + Dd = FloatVectorCompareEqual(Dm,zero,2:1,32:1); +} + +:vceq.f32 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819=2 & c1617=1 & c0711=10 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819=2 & thv_c1617=1 & thv_c0711=10 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero +{ + Qd = FloatVectorCompareEqual(Qm,zero,2:1,32:1); +} + +:vcge.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=0 & thv_c0404=1) ) & udt & esize2021 & Dm & Dn & Dd +{ + Dd = VectorCompareGreaterThanOrEqual(Dn,Dm,esize2021,udt); +} + +:vcge.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=1 & thv_c0404=1) ) & udt & esize2021 & Qm & Qn & Qd +{ + Qd = VectorCompareGreaterThanOrEqual(Qn,Qm,esize2021,udt); +} + +:vcge.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=0 & c0811=14 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Dd +{ + Dd = FloatVectorCompareGreaterThanOrEqual(Dn,Dm,2:1,32:1); +} + +:vcge.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=0 & c0811=14 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & Qm & Qn & Qd +{ + Qd = FloatVectorCompareGreaterThanOrEqual(Qn,Qm,2:1,32:1); +} + +:vcge.s^esize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=1 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=1 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero +{ + Dd = VectorCompareGreaterThanOrEqual(Dm,zero,esize1819); +} + +:vcge.s^esize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=1 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=1 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero +{ + Qd = VectorCompareGreaterThanOrEqual(Qm,zero,esize1819); +} + +:vcge.f32 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=9 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=9 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero +{ + Dd = FloatVectorCompareGreaterThanOrEqual(Dm,zero,2:1,32:1); +} + +:vcge.f32 Qd,Qm,zero is( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=9 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=9 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero +{ + Qd = FloatVectorCompareGreaterThanOrEqual(Qm,zero,2:1,32:1); +} + +:vcgt.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=0 & thv_c0404=0) ) & udt & esize2021 & Dm & Dn & Dd +{ + Dd = VectorCompareGreaterThan(Dn,Dm,esize2021); +} + +:vcgt.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=1 & thv_c0404=0) ) & udt & esize2021 & Qm & Qn & Qd +{ + Qd = VectorCompareGreaterThan(Qn,Qm,esize2021); +} + +:vcgt.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=2 & c0811=14 & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Dd +{ + Dd = FloatVectorCompareGreaterThan(Dn,Dm,2:1,32:1); +} + +:vcgt.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=2 & c0811=14 & Q6=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & Qm & Qn & Qd +{ + Qd = FloatVectorCompareGreaterThan(Qn,Qm,2:1,32:1); +} + +:vcgt.i^esize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=0 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=0 & thv_Q6=0 & thv_c0404=0 ) ) & esize1819 & Dd & Dm & zero +{ + Dd = VectorCompareGreaterThan(Dm,zero,esize1819); +} + +:vcgt.i^esize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=0 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=0 & thv_Q6=1 & thv_c0404=0 ) ) & esize1819 & Qd & Qm & zero +{ + Qd = VectorCompareGreaterThan(Qm,zero,esize1819); +} + +:vcgt.f32 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=8 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=8 & thv_Q6=0 & thv_c0404=0 ) ) & esize1819 & Dd & Dm & zero +{ + Dd = FloatVectorCompareGreaterThan(Dm,zero,2:1,32:1); +} + +:vcgt.f32 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=8 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=8 & thv_Q6=1 & thv_c0404=0 ) ) & esize1819 & Qd & Qm & zero +{ + Qd = FloatVectorCompareGreaterThan(Qm,zero,2:1,32:1); +} + +:vcle.s^esize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=3 & Q6=0 & c0404=0) | + ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=3 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dd & Dm & zero +{ + Dd = VectorCompareGreaterThanOrEqual(zero,Dm,esize1819); +} + +:vcle.s^esize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=3 & Q6=1 & c0404=0) | + ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=3 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qd & Qm & zero +{ + Qd = VectorCompareGreaterThanOrEqual(zero,Qm,esize1819); +} + +:vcle.f32 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=0xb & Q6=0 & c0404=0) | + ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=0xb & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dd & Dm & zero +{ + Dd = FloatVectorCompareGreaterThanOrEqual(zero,Dm,2:1,32:1); +} + +:vcle.f32 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=0xb & Q6=1 & c0404=0) | + ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=0xb & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qd & Qm & zero +{ + Qd = FloatVectorCompareGreaterThanOrEqual(zero,Qm,2:1,32:1); +} + +:vcls.s^esize1819 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=8 & Q6=0 & c0404=0) | + ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=0 & thv_c0711=8 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dd & Dm +{ + Dd = VectorCountLeadingSignBits(Dm,esize1819); +} + +:vcls.s^esize1819 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=8 & Q6=1 & c0404=0) | + ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=0 & thv_c0711=8 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qd & Qm +{ + Qd = VectorCountLeadingSignBits(Qm,esize1819); +} + +:vclt.s^esize1819 Dd,Dm,zero is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=4 & Q6=0 & c0404=0) | + ( $(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=4 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero +{ + Dd = VectorCompareGreaterThan(zero,Dm,esize1819); +} + +:vclt.s^esize1819 Qd,Qm,zero is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=4 & Q6=1 & c0404=0) | + ( $(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=4 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero +{ + Qd = VectorCompareGreaterThan(zero,Qm,esize1819); +} + +:vclt.f32 Dd,Dm,zero is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=12 & Q6=0 & c0404=0) | + ( $(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=12 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero +{ + Dd = FloatVectorCompareGreaterThan(zero,Dm,2:1,32:1); +} + +:vclt.f32 Qd,Qm,zero is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=12 & Q6=1 & c0404=0) | + ( $(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=12 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero +{ + Qd = FloatVectorCompareGreaterThan(zero,Qm,2:1,32:1); +} + +:vclz.i^esize1819 Dd,Dm is $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & esize1819 & c1617=0 & Dd & c0711=9 & Q6=0 & c0404=0 & Dm +{ + Dd = VectorCountLeadingZeros(Dm,esize1819); +} + +:vclz.i^esize1819 Qd,Qm is $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & esize1819 & c1617=0 & Qd & c0711=9 & Q6=1 & c0404=0 & Qm +{ + Qd = VectorCountLeadingZeros(Qm,esize1819); +} + +@endif # SIMD + +# set float register flags correctly for comparison +macro FloatVectorCompare(op1,op2,nanx) { + local tNG = op1 f< op2; + local tZR = op1 f== op2; + local tCY = op2 f<= op1; + tOV:1 = nan(op1) | nan(op2); # this is really a comparison with NAN and may also raise an exception when NAN + + fpscr = (fpscr & 0x0fffffff) | (zext(tNG) << 31) | (zext(tZR) << 30) | (zext(tCY) << 29) | (zext(tOV) << 28); +} + +@if defined(VFPv2) || defined(VFPv3) + +nanx: "e" is c0707=1 { export 1:1; } +nanx: is c0707=0 { export 0:1; } + +:vcmp^nanx^COND^".f32" Sd,Sm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=4 & c0811=0b1010 & c0606=1 & c0404=0) | + ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=4 & thv_c0811=0b1010 & thv_c0606=1 & thv_c0404=0) ) & Sd & nanx & Sm +{ + build COND; + build Sd; + build Sm; + + FloatVectorCompare(Sd,Sm,nanx); +} + +:vcmp^nanx^COND^".f64" Dd,Dm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=4 & c0811=0b1011 & c0606=1 & c0404=0) | + ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=4 & thv_c0811=0b1011 & thv_c0606=1 & thv_c0404=0) ) & Dd & nanx & Dm +{ + build COND; + build Dd; + build Dm; + FloatVectorCompare(Dd,Dm,nanx); +} + +:vcmp^nanx^COND^".f32" Sd,zero is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=5 & c0811=0b1010 & c0006=0b1000000 ) | + ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=5 & thv_c0811=0b1010 & thv_c0006=0b1000000 ) ) & Sd & nanx & zero +{ + build COND; + build Sd; + local Zero:4 = 0; + FloatVectorCompare(Sd,Zero,nanx); +} + +:vcmp^nanx^COND^".f64" Dd,zero is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=5 & c0811=0b1011 & c0006=0b1000000 ) | + ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=5 & thv_c0811=0b1011 & thv_c0006=0b1000000 ) ) & Dd & nanx & zero +{ + build COND; + build Dd; + local Zero:8 = 0; + FloatVectorCompare(Dd,Zero,nanx); +} + +@endif # VFPv2 || VFPv3 + +define pcodeop VectorCountOneBits; + + +@ifndef VERSION_8 +#second arg to conversion function indicates rounding mode (see RMODE bits of FPSCR) +define pcodeop VectorFloatToSigned; +define pcodeop VectorFloatToUnsigned; +define pcodeop VectorSignedToFloat; +define pcodeop VectorUnsignedToFloat; +@endif # VERSION_8 + +@if defined(SIMD) +####### +# VCVT (between floating-point and integer, Advanced SIMD) +# + +:vcnt.8 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0711=10 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0711=10 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm +{ + Dd = VectorCountOneBits(Dm,8:1,8:1); +} + +:vcnt.8 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819=0 & c1617=0 & c0711=10 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0711=10 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm +{ + Qd = VectorCountOneBits(Qm,8:1,8:1); +} + +@ifndef VERSION_8 +:vcvt.s32.f32 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=2 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=2 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm + +{ + Dd = VectorFloatToSigned(Dm,3:1); +} + +:vcvt.u32.f32 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=3 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=3 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm + +{ + Dd = VectorFloatToUnsigned(Dm,3:1); +} + +:vcvt.f32.s32 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=0 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=0 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm +{ + Dd = VectorSignedToFloat(Dm,0:1); +} + +:vcvt.f32.u32 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=1 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=1 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm + +{ + Dd = VectorUnsignedToFloat(Dm,0:1); +} + +:vcvt.s32.f32 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=2 & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=2 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm + +{ + Qd = VectorFloatToSigned(Qm,3:1); +} + +:vcvt.u32.f32 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=3 & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=3 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm + +{ + Qd = VectorFloatToUnsigned(Qm,3:1); +} + +:vcvt.f32.s32 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=0 & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=0 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm + +{ + Qd = VectorSignedToFloat(Qm,0:1); +} + +:vcvt.f32.u32 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=1 & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=1 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm + +{ + Qd = VectorUnsignedToFloat(Qm,0:1); +} + +@endif # ! VERSION_8 +@endif # SIMD + +@if defined(VFPv2) || defined(VFPv3) + +@ifndef VERSION_8 +####### +# VCVT (between floating-point and integer, VFP) +# + +roundMode: "r" is TMode=0 & c0707=0 { tmp:1 = $(FPSCR_RMODE); export tmp; } +roundMode: is TMode=0 & c0707=1 { export 3:1; } # Round towards zero +roundMode: "r" is TMode=1 & thv_c0707=0 { tmp:1 = $(FPSCR_RMODE); export tmp; } +roundMode: is TMode=1 & thv_c0707=1 { export 3:1; } # Round towards zero + +:vcvt^roundMode^COND^".s32.f32" Sd,Sm is COND & ( ($(AMODE) &ARMcond=1 & c2327=0x1d & c1921=7 & c1618=5 & c0911=5 & c0808=0 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1b & thv_c1921=7 & thv_c1618=5 & thv_c0911=5 & thv_c0808=0 & thv_c0606=1 & thv_c0404=0) ) & Sd & Sm & roundMode +{ + build COND; + build Sd; + build Sm; + build roundMode; + Sd = VectorFloatToSigned(Sm,roundMode); +} + +:vcvt^roundMode^COND^".s32.f64" Sd,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=5 & c0911=5 & c0808=1 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=5 & thv_c0911=5 & thv_c0808=1 & thv_c0606=1 & thv_c0404=0) ) & Sd & roundMode & Dm +{ + build COND; + build Sd; + build Dm; + build roundMode; + Sd = VectorFloatToSigned(Dm,roundMode); +} + +:vcvt^roundMode^COND^".u32.f32" Sd,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=4 & c0911=5 & c0808=0 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=4 & thv_c0911=5 & thv_c0808=0 & thv_c0606=1 & thv_c0404=0) ) & roundMode & Sd & Sm +{ + build COND; + build Sd; + build Sm; + build roundMode; + Sd = VectorFloatToUnsigned(Sm,roundMode); +} + +:vcvt^roundMode^COND^".u32.f64" Sd,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=4 & c0911=5 & c0808=1 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=4 & thv_c0911=5 & thv_c0808=1 & thv_c0606=1 & thv_c0404=0)) & roundMode& Sd & Dm +{ + build COND; + build Sd; + build Dm; + build roundMode; + Sd = VectorFloatToUnsigned(Dm,roundMode); +} + +:vcvt^COND^".f32.s32" Sd,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=0 & c0911=5 & c0808=0 & c0707=1 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=0 & thv_c0911=5 & thv_c0808=0 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & Sd & Sm +{ + build COND; + build Sd; + build Sm; + mode:1 = $(FPSCR_RMODE); + Sd = VectorSignedToFloat(Sm,mode); +} + +:vcvt^COND^".f64.s32" Dd,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=0 & c0911=5 & c0808=1 & c0707=1 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=0 & thv_c0911=5 & thv_c0808=1 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & Dd & Sm +{ + build COND; + build Dd; + build Sm; + mode:1 = $(FPSCR_RMODE); + Dd = VectorSignedToFloat(Sm,mode); +} + +:vcvt^COND^".f32.u32" Sd,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=0 & c0911=5 & c0808=0 & c0707=0 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=0 & thv_c0911=5 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & Sd & Sm +{ + build COND; + build Sd; + build Sm; + mode:1 = $(FPSCR_RMODE); + Sd = VectorUnsignedToFloat(Sm,mode); +} + +:vcvt^COND^".f64.u32" Dd,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=0 & c0911=5 & c0808=1 & c0707=0 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=0 & thv_c0911=5 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) )& Dd & Sm +{ + build COND; + build Dd; + build Sm; + mode:1 = $(FPSCR_RMODE); + Dd = VectorUnsignedToFloat(Sm,mode); +} +@endif # ! VERSION_8 +@endif # VFPv2 || VFPv3 + +@if defined(SIMD) +@ifndef VERSION_8 +define pcodeop VectorFloatToSignedFixed; +define pcodeop VectorFloatToUnsignedFixed; +define pcodeop VectorSignedFixedToFloat; +define pcodeop VectorUnsignedFixedToFloat; + +####### +# VCVT (between floating-point and fixed-point, Advanced SIMD) +# + +fbits: "#"val is c1620 [ val = 32 - c1620; ] { tmp:1 = val; export tmp; } + +:vcvt.s32.f32 Dd,Dm,fbits is $(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & fbits & Dd & c0911=7 & c0808=1 & c0707=0 & Q6=0 & c0404=1 & Dm +{ + Dd = VectorFloatToSignedFixed(Dm,fbits); +} + +:vcvt.u32.f32 Dd,Dm,fbits is $(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & fbits & Dd & c0911=7 & c0808=1 & c0707=0 & Q6=0 & c0404=1 & Dm +{ + Dd = VectorFloatToUnsignedFixed(Dm,fbits); +} + +:vcvt.f32.s32 Dd,Dm,fbits is $(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & fbits & Dd & c0911=7 & c0808=0 & c0707=0 & Q6=0 & c0404=1 & Dm +{ + Dd = VectorSignedFixedToFloat(Dm,fbits); +} + +:vcvt.f32.u32 Dd,Dm,fbits is $(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & fbits & Dd & c0911=7 & c0808=0 & c0707=0 & Q6=0 & c0404=1 & Dm +{ + Dd = VectorUnsignedFixedToFloat(Dm,fbits); +} + +:vcvt.s32.f32 Qd,Dm,fbits is $(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & fbits & Qd & c0911=7 & c0808=1 & c0707=0 & Q6=1 & c0404=1 & Dm +{ + Qd = VectorFloatToSignedFixed(Dm,fbits); +} + +:vcvt.u32.f32 Qd,Dm,fbits is $(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & fbits & Qd & c0911=7 & c0808=1 & c0707=0 & Q6=1 & c0404=1 & Dm +{ + Qd = VectorFloatToUnsignedFixed(Dm,fbits); +} + +:vcvt.f32.s32 Qd,Dm,fbits is $(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & fbits & Qd & c0911=7 & c0808=0 & c0707=0 & Q6=1 & c0404=1 & Dm +{ + Qd = VectorSignedFixedToFloat(Dm,fbits); +} + +:vcvt.f32.u32 Qd,Dm,fbits is $(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & fbits & Qd & c0911=7 & c0808=0 & c0707=0 & Q6=1 & c0404=1 & Dm +{ + Qd = VectorUnsignedFixedToFloat(Dm,fbits); +} + +@endif # ! VERSION_8 + +@endif # SIMD + +@if defined(VFPv3) + +@ifndef VERSION_8 + +####### +# VCVT (between floating-point and fixed-point, VFP) +# + +fbits16: "#"^val is TMode=0 & c0505 & c0003 [ val = 16 - ((c0505 << 4) + c0003); ] { tmp:1 = val; export tmp; } +fbits32: "#"^val is TMode=0 & c0505 & c0003 [ val = 32 - ((c0505 << 4) + c0003); ] { tmp:1 = val; export tmp; } +fbits16: "#"^val is TMode=1 & thv_c0505 & thv_c0003 [ val = 16 - ((thv_c0505 << 4) + thv_c0003); ] { tmp:1 = val; export tmp; } +fbits32: "#"^val is TMode=1 & thv_c0505 & thv_c0003 [ val = 32 - ((thv_c0505 << 4) + thv_c0003); ] { tmp:1 = val; export tmp; } + +:vcvt^COND^".s16.f32" Sd,Sd2,fbits16 is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=0 & c0911=5 & c0808=0 & c0707=0 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=0 & thv_c0911=5 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0)) & Sd & Sd2 & fbits16 +{ + build COND; + build Sd; + build Sd2; + build fbits16; + Sd = VectorFloatToSignedFixed(Sd2,16:1,fbits16); +} + +:vcvt^COND^".u16.f32" Sd,Sd2,fbits16 is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=1 & c0911=5 & c0808=0 & c0707=0 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=1 & thv_c0911=5 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & Sd & Sd2 & fbits16 +{ + build COND; + build Sd; + build Sd2; + build fbits16; + Sd = VectorFloatToUnsignedFixed(Sd2,16:1,fbits16); +} + +:vcvt^COND^".s32.f32" Sd,Sd2,fbits32 is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=0 & c0911=5 & c0808=0 & c0707=1 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=0 & thv_c0911=5 & thv_c0808=0 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & Sd & Sd2 & fbits32 +{ + build COND; + build Sd; + build Sd2; + build fbits32; + Sd = VectorFloatToSignedFixed(Sd2,32:1,fbits32); +} + +:vcvt^COND^".u32.f32" Sd,Sd2,fbits32 is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=1 & c0911=5 & c0808=0 & c0707=1 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=1 & thv_c0911=5 & thv_c0808=0 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & Sd & Sd2 & fbits32 +{ + build COND; + build Sd; + build Sd2; + build fbits32; + Sd = VectorFloatToUnsignedFixed(Sd2,32:1,fbits32); +} + +:vcvt^COND^".s16.f64" Dd,Dd2,fbits16 is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=0 & c0911=5 & c0808=1 & c0707=0 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=0 & thv_c0911=5 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & Dd & Dd2 & fbits16 +{ + build COND; + build Dd; + build Dd2; + build fbits16; + Dd = VectorFloatToSignedFixed(Dd2,16:1,fbits16); +} + +:vcvt^COND^".u16.f64" Dd,Dd2,fbits16 is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=1 & c0911=5 & c0808=1 & c0707=0 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=1 & thv_c0911=5 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & Dd & Dd2 & fbits16 +{ + build COND; + build Dd; + build Dd2; + build fbits16; + Dd = VectorFloatToUnsignedFixed(Dd2,16:1,fbits16); +} + +:vcvt^COND^".s32.f64" Dd,Dd2,fbits32 is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=0 & c0911=5 & c0808=1 & c0707=1 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=0 & thv_c0911=5 & thv_c0808=1 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & Dd & Dd2 & fbits32 +{ + build COND; + build Dd; + build Dd2; + build fbits32; + Dd = VectorFloatToSignedFixed(Dd2,32:1,fbits32); +} + +:vcvt^COND^".u32.f64" Dd,Dd2,fbits32 is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=1 & c0911=5 & c0808=1 & c0707=1 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=1 & thv_c0911=5 & thv_c0808=1 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & Dd & Dd2 & fbits32 +{ + build COND; + build Dd; + build Dd2; + build fbits32; + Dd = VectorFloatToUnsignedFixed(Dd2,32:1,fbits32); +} + +:vcvt^COND^".f32.s16" Sd,Sd2,fbits16 is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=0 & Sd & Sd2 & c0911=5 & c0808=0 & c0707=0 & c0606=1 & c0404=0 & fbits16 +{ + build COND; + build Sd; + build Sd2; + build fbits16; + Sd = VectorSignedFixedToFloat(Sd2,32:1,fbits16); +} + +:vcvt^COND^".f32.u16" Sd,Sd2,fbits16 is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=1 & Sd & Sd2 & c0911=5 & c0808=0 & c0707=0 & c0606=1 & c0404=0 & fbits16 +{ + build COND; + build Sd; + build Sd2; + build fbits16; + Sd = VectorUnsignedFixedToFloat(Sd2,32:1,fbits16); +} + +:vcvt^COND^".f32.s32" Sd,Sd2,fbits32 is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=0 & Sd & Sd2 & c0911=5 & c0808=0 & c0707=1 & c0606=1 & c0404=0 & fbits32 +{ + build COND; + build Sd; + build Sd2; + build fbits32; + Sd = VectorSignedFixedToFloat(Sd2,32:1,fbits32); +} + +:vcvt^COND^".f32.u32" Sd,Sd2,fbits32 is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=1 & Sd & Sd2 & c0911=5 & c0808=0 & c0707=1 & c0606=1 & c0404=0 & fbits32 +{ + build COND; + build Sd; + build Sd2; + build fbits32; + Sd = VectorUnsignedFixedToFloat(Sd2,32:1,fbits32); +} + +:vcvt^COND^".f64.s16" Dd,Dd2,fbits16 is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=0 & Dd & Dd2 & c0911=5 & c0808=1 & c0707=0 & c0606=1 & c0404=0 & fbits16 +{ + build COND; + build Dd; + build Dd2; + build fbits16; + Dd = VectorSignedFixedToFloat(Dd2,64:1,fbits16); +} + +:vcvt^COND^".f64.u16" Dd,Dd2,fbits16 is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=1 & Dd & Dd2 & c0911=5 & c0808=1 & c0707=0 & c0606=1 & c0404=0 & fbits16 +{ + build COND; + build Dd; + build Dd2; + build fbits16; + Dd = VectorUnsignedFixedToFloat(Dd2,64:1,fbits16); +} + +:vcvt^COND^".f64.s32" Dd,Dd2,fbits32 is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=0 & Dd & Dd2 & c0911=5 & c0808=1 & c0707=1 & c0606=1 & c0404=0 & fbits32 +{ + build COND; + build Dd; + build Dd2; + build fbits32; + Dd = VectorSignedFixedToFloat(Dd2,64:1,fbits32); +} + +:vcvt^COND^".f64.u32" Dd,Dd2,fbits32 is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=1 & Dd & Dd2 & c0911=5 & c0808=1 & c0707=1 & c0606=1 & c0404=0 & fbits32 +{ + build COND; + build Dd; + build Dd2; + build fbits32; + Dd = VectorUnsignedFixedToFloat(Dd2,64:1,fbits32); +} + +@endif # ! VERSION_8 + +@endif # VFPv3 + +define pcodeop VectorFloatDoubleToSingle; +define pcodeop VectorFloatSingleToDouble; + +@if defined(VFPv2) || defined(VFPv3) + +@ifndef VERSION_8 +####### +# VCVT (between double-precision and single-precision) +# + +:vcvt^COND^".f32.f64" Sd,Dm is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1621=0x37 & Sd & c0911=5 & c0808=1 & c0607=3 & c0404=0 & Dm +{ + build COND; + build Sd; + build Dm; + Sd = float2float(Dm); +} + +:vcvt^COND^".f64.f32" Dd,Sm is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1621=0x37 & Dd & c0911=5 & c0808=0 & c0607=3 & c0404=0 & Sm +{ + build COND; + build Dd; + build Sm; + Dd = float2float(Sm); +} + +@endif # ! VERSION_8 +@endif # VFPv2 || VFPv3 + +@if defined(SIMD) + +@ifndef VERSION_8 +define pcodeop VectorFloatSingleToHalf; +define pcodeop VectorFloatHalfToSingle; + +####### +# VCVT (between half-precision and single-precision) +# +:vcvt.f16.f32 Dd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=0 & c0607=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x36 & thv_c0911=3 & thv_c0808=0 & thv_c0607=0 & thv_c0404=0 ) ) & Dd & Qm + +{ + Dd = VectorFloatSingleToHalf(Qm, 4:1, 16:1); +} + +:vcvt.f16.f32 Qd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=1 & c0607=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x36 & thv_c0911=3 & thv_c0808=1 & thv_c0607=0 & thv_c0404=0 ) ) & Qd & Dm + +{ + Qd = VectorFloatHalfToSingle(Dm, 4:1, 16:1); +} + +@endif # ! VERSION_8 +@endif # SIMD + +@if defined(VFPv3) + +@ifndef VERSION_8 +:vcvtb^COND^".f32.f16" Sd,Sm is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1621=0x32 & Sd & c0611=0x29 & c0404=0 & Sm +{ + build COND; + build Sd; + build Sm; + Sd = float2float(Sm:2); +} + +:vcvtb^COND^".f16.f32" Sd,Sm is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1621=0x33 & Sd & c0611=0x29 & c0404=0 & Sm +{ + build COND; + build Sd; + build Sm; + w:2 = float2float(Sm); + Sd[0,16] = w; +} + +:vcvtt^COND^".f32.f16" Sd,Sm is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1621=0x32 & Sd & c0611=0x2b & c0404=0 & Sm +{ + build COND; + build Sd; + build Sm; + w:2 = Sm(2); + Sd = float2float(w); +} + +:vcvtt^COND^".f16.f32" Sd,Sm is $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1621=0x33 & Sd & c0611=0x2b & c0404=0 & Sm +{ + build COND; + build Sd; + build Sm; + w:2 = float2float(Sm); + Sd[16,16] = w; +} +@endif # ! VERSION_8 + +@endif # VFPv3 + +@if defined(VFPv2) || defined(VFPv3) + +:vdiv^COND^".f32" Sd,Sn,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2021=0 & c0811=10 & c0606=0 & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0 ) ) & Sn & Sd & Sm +{ + build COND; + build Sd; + build Sm; + build Sn; + Sd = Sn f/ Sm; +} + +:vdiv^COND^".f64" Dd,Dn,Dm is COND & ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=0 & c0811=11 & c0606=0 & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0 ) ) & Dn & Dd & Dm +{ + build COND; + build Dd; + build Dm; + build Dn; + Dd = Dn f/ Dm; +} + +@endif # VFPv2 || VFPv3 + +define pcodeop VectorHalvingAdd; +define pcodeop VectorHalvingSubtract; +define pcodeop VectorRoundHalvingAdd; +define pcodeop VectorRoundAddAndNarrow; + +@if defined(SIMD) + +####### +# VDUP (scalar) +# + +vdupIndex: c1719 is TMode=0 & c1616=1 & c1719 { tmp:4 = c1719; export tmp; } +vdupIndex: c1819 is TMode=0 & c1617=2 & c1819 { tmp:4 = c1819; export tmp; } +vdupIndex: c1919 is TMode=0 & c1618=4 & c1919 { tmp:4 = c1919; export tmp; } + +vdupIndex: thv_c1719 is TMode=1 & thv_c1616=1 & thv_c1719 { tmp:4 = thv_c1719; export tmp; } +vdupIndex: thv_c1819 is TMode=1 & thv_c1617=2 & thv_c1819 { tmp:4 = thv_c1819; export tmp; } +vdupIndex: thv_c1919 is TMode=1 & thv_c1618=4 & thv_c1919 { tmp:4 = thv_c1919; export tmp; } + +vdupSize: 8 is TMode=0 & c1616=1 { } +vdupSize: 16 is TMode=0 & c1617=2 { } +vdupSize: 32 is TMode=0 & c1618=4 { } +vdupSize: 8 is TMode=1 & thv_c1616=1 { } +vdupSize: 16 is TMode=1 & thv_c1617=2 { } +vdupSize: 32 is TMode=1 & thv_c1618=4 { } + +vdupDm: Dm^"["^vdupIndex^"]" is Dm & vdupIndex & ((TMode=0 & c1616=1) | (TMode=1 & thv_c1616=1)) +{ + ptr:4 = &Dm + vdupIndex; + val:8 = 0; + replicate1to8(*[register]:1 ptr, val); + export val; +} +vdupDm: Dm^"["^vdupIndex^"]" is Dm & vdupIndex & ((TMode=0 & c1617=2) | (TMode=1 & thv_c1617=2)) +{ + ptr:4 = &Dm + (2 * vdupIndex); + val:8 = 0; + replicate2to8(*[register]:2 ptr, val); + export val; +} +vdupDm: Dm^"["^vdupIndex^"]" is Dm & vdupIndex & ((TMode=0 & c1618=4) | (TMode=1 & thv_c1618=4)) +{ + ptr:4 = &Dm + (4 * vdupIndex); + val:8 = 0; + replicate4to8(*[register]:4 ptr, val); + export val; +} + +vdupDm16: vdupDm is vdupDm +{ + val:16 = zext(vdupDm); + val = val & (val << 64); + export val; +} + +:vdup.^vdupSize Dd,vdupDm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & vdupSize & c0711=0x18 & Q6=0 & c0404=0 ) | + ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c0711=0x18 & thv_Q6=0 & thv_c0404=0 ) ) & Dd & vdupDm +{ + Dd = vdupDm; +} + +:vdup.^vdupSize Qd,vdupDm16 is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & vdupSize & c0711=0x18 & Q6=1 & c0404=0 ) | + ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c0711=0x18 & thv_Q6=1 & thv_c0404=0) ) & Qd & vdupDm16 +{ + Qd = vdupDm16; +} + +####### +# VDUP (ARM core register) +# + +vdupSize2: 8 is c2222=1 & c0505=0 { } +vdupSize2: 16 is c2222=0 & c0505=1 { } +vdupSize2: 32 is c2222=0 & c0505=0 { } + +vdupRd8: Rd is Rd & c2222=1 & c0505=0 +{ + val:8 = 0; + local tmpRd = Rd; + replicate1to8(tmpRd:1, val); + export val; +} +vdupRd8: Rd is Rd & c2222=0 & c0505=1 +{ + val:8 = 0; + local tmpRd = Rd; + replicate2to8(tmpRd:2, val); + export val; +} +vdupRd8: Rd is Rd & c2222=0 & c0505=0 +{ + val:8 = 0; + local tmpRd = Rd; + replicate4to8(tmpRd:4, val); + export val; +} + +vdupRd16: vdupRd8 is vdupRd8 +{ + val:16 = zext(vdupRd8); + val = val & (val << 64); + export val; +} + +:vdup^COND^"."^vdupSize2 Dn,vdupRd8 is COND & (( $(AMODE) & ARMcond=1 & c2327=0x1d & c2121=0 & c2020=0 & c0811=11 & c0606=0 & c0004=0x10) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=11 & thv_c0606=0 & thv_c0004=0x10 ) ) & Dn & vdupSize2 & vdupRd8 +{ + build COND; + build vdupRd8; + build Dn; + Dn = vdupRd8; +} + +:vdup^COND^"."^vdupSize2 Qn,vdupRd16 is COND & (( $(AMODE) & ARMcond=1 & c2327=0x1d & c2121=1 & c2020=0 & c0811=11 & c0606=0 & c0004=0x10) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c0811=11 & thv_c0606=0 & thv_c0004=0x10 ) ) & Qn & vdupSize2 & vdupRd16 +{ + build COND; + build vdupRd16; + build Qn; + Qn = vdupRd16; +} + +:veor Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x6 & c2021=0 & c0811=1 & Q6=0 & c0404=1) | + ($(TMODE_F) &thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm + +{ + Dd = Dn ^ Dm; +} + +:veor Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x6 & c2021=0 & c0811=1 & Q6=1 & c0404=1) | + ($(TMODE_F) &thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qd & Qn & Qm +{ + Qd = Qn ^ Qm; +} + +extImm: "#"^c0811 is TMode=0 & c0811 { tmp:1 = c0811; export tmp; } +extImm: "#"^thv_c0811 is TMode=1 & thv_c0811 { tmp:1 = thv_c0811; export tmp; } + +:vext.8 Dd,Dn,Dm,extImm is ( ( $(AMODE) & cond=15 & c2327=5 & c2021=3 & c0606=0 & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=3 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dn & Dm & extImm +{ + val:16 = (zext(Dm) << 64) | zext(Dn); + local shift = extImm * 8; + val = val >> shift; + Dd = val:8; +} + +:vext.8 Qd,Qn,Qm,extImm is ( ( $(AMODE) & cond=15 & c2327=5 & c2021=3 & c0606=1 & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=3 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qn & Qm & extImm +{ + val:32 = (zext(Qm) << 128) | zext(Qn); + local shift = extImm * 8; + val = val >> shift; + Qd = val:16; +} + +:vhadd.^udt^esize2021 Dd,Dn,Dm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=0 & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=0 & thv_Q6=0 & thv_c0404=0) ) & udt & Dm & esize2021 & Dn & Dd +{ + Dd = VectorHalvingAdd(Dn,Dm,esize2021,udt); +} + +:vhadd.^udt^esize2021 Qd,Qn,Qm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=0 & Q6=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=0 & thv_Q6=1 & thv_c0404=0) ) & udt & Qm & esize2021 & Qn & Qd +{ + Qd = VectorHalvingAdd(Qn,Qm,esize2021,udt); +} + + +:vraddhn.i^esize2021x2 Dd,Qn,Qm is (($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=4 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=4 & thv_Q6=0 & thv_c0404=0) ) & Qm & esize2021x2 & Qn & Dd +{ + Dd = VectorRoundAddAndNarrow(Qn,Qm,esize2021x2); +} + +:vrhadd.^udt^esize2021 Dd,Dn,Dm is (($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=1 & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=1 & thv_Q6=0 & thv_c0404=0) ) & udt & Dm & esize2021 & Dn & Dd +{ + Dd = VectorRoundHalvingAdd(Dn,Dm,esize2021,udt); +} + +:vrhadd.^udt^esize2021 Qd,Qn,Qm is (($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=1 & Q6=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=1 & thv_Q6=1 & thv_c0404=0) ) & udt & Qm & esize2021 & Qn & Qd +{ + Qd = VectorRoundHalvingAdd(Qn,Qm,esize2021,udt); +} + +:vhsub.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=2 & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=2 & thv_Q6=0 & thv_c0404=0) ) & udt & esize2021 & Dn & Dd & Dm +{ + Dd = VectorHalvingSubtract(Dn,Dm,esize2021,udt); +} + +:vhsub.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=2 & Q6=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=2 & thv_Q6=1 & thv_c0404=0) ) & udt & Qm & esize2021 & Qn & Qd +{ + Qd = VectorHalvingSubtract(Qn,Qm,esize2021,udt); +} + +####### +# VLD1 (multiple single elements) +# + +buildVld1DdList: Dreg is Dreg & counter=1 [ counter=0; regNum=regNum+1; ] +{ + Dreg = * mult_addr; +} +buildVld1DdList: Dreg,buildVld1DdList is Dreg & buildVld1DdList [ counter=counter-1; regNum=regNum+1; ] +{ + Dreg = * mult_addr; + mult_addr = mult_addr + 8; + build buildVld1DdList; +} + +vld1DdList: "{"^buildVld1DdList^"}" is TMode=0 & c0811=7 & D22 & c1215 & buildVld1DdList [ regNum=(D22<<4)+c1215-1; counter=1; ] { export 1:4; } +vld1DdList: "{"^buildVld1DdList^"}" is TMode=0 & c0811=10 & D22 & c1215 & buildVld1DdList [ regNum=(D22<<4)+c1215-1; counter=2; ] { export 2:4; } +vld1DdList: "{"^buildVld1DdList^"}" is TMode=0 & c0811=6 & D22 & c1215 & buildVld1DdList [ regNum=(D22<<4)+c1215-1; counter=3; ] { export 3:4; } +vld1DdList: "{"^buildVld1DdList^"}" is TMode=0 & c0811=2 & D22 & c1215 & buildVld1DdList [ regNum=(D22<<4)+c1215-1; counter=4; ] { export 4:4; } +vld1DdList: "{"^buildVld1DdList^"}" is TMode=1 & thv_c0811=7 & thv_D22 & thv_c1215 & buildVld1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=1; ] { export 1:4; } +vld1DdList: "{"^buildVld1DdList^"}" is TMode=1 & thv_c0811=10 & thv_D22 & thv_c1215 & buildVld1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=2; ] { export 2:4; } +vld1DdList: "{"^buildVld1DdList^"}" is TMode=1 & thv_c0811=6 & thv_D22 & thv_c1215 & buildVld1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=3; ] { export 3:4; } +vld1DdList: "{"^buildVld1DdList^"}" is TMode=1 & thv_c0811=2 & thv_D22 & thv_c1215 & buildVld1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=4; ] { export 4:4; } + +@define Vld1DdList "(c0811=2 | c0811=6 | c0811=7 | c0811=10)" +@define thv_Vld1DdList "(thv_c0811=2 | thv_c0811=6 | thv_c0811=7 | thv_c0811=10)" + +vldAlign45: is TMode=0 & c0405=0 { } +vldAlign45: "@64" is TMode=0 & c0405=1 { } +vldAlign45: "@128" is TMode=0 & c0405=2 { } +vldAlign45: "@256" is TMode=0 & c0405=3 { } +vldAlign45: is TMode=1 & thv_c0405=0 { } +vldAlign45: "@64" is TMode=1 & thv_c0405=1 { } +vldAlign45: "@128" is TMode=1 & thv_c0405=2 { } +vldAlign45: "@256" is TMode=1 & thv_c0405=3 { } + +RnAligned45: "["^VRn^vldAlign45^"]" is TMode=0 & VRn & vldAlign45 { export VRn; } +RnAligned45: "["^VRn^vldAlign45^"]" is TMode=1 & VRn & vldAlign45 { export VRn; } + + +:vld1.^esize0607 vld1DdList,RnAligned45 is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & c0003=15 & $(Vld1DdList)) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0003=15 & $(thv_Vld1DdList)) ) & esize0607 & RnAligned45 & vld1DdList +{ + mult_addr = RnAligned45; + build vld1DdList; +} + +:vld1.^esize0607 vld1DdList,RnAligned45^"!" is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & c0003=13 & $(Vld1DdList)) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0003=13 & $(thv_Vld1DdList)) ) & esize0607 & RnAligned45 & vld1DdList +{ + mult_addr = RnAligned45; + build vld1DdList; + RnAligned45 = RnAligned45 + (8 * vld1DdList); +} + +:vld1.^esize0607 vld1DdList,RnAligned45,VRm is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & $(Vld1DdList)) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & $(thv_Vld1DdList)) ) & VRm & esize0607 & RnAligned45 & vld1DdList +{ + mult_addr = RnAligned45; + build vld1DdList; + RnAligned45 = RnAligned45 + VRm; +} + + +####### +# VLD1 (single element to one lane) +# + +vld1Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } +vld1Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } + +vld1DdElement2: Dd^"["^vld1Index^"]" is Dd & vld1Index & ((TMode=0 & c1011=0) | (TMode=1 & thv_c1011=0)) +{ + ptr:4 = &Dd + vld1Index; + *[register]:1 ptr = *:1 mult_addr; +} +vld1DdElement2: Dd^"["^vld1Index^"]" is Dd & vld1Index & ((TMode=0 & c1011=1) | (TMode=1 & thv_c1011=1)) +{ + ptr:4 = &Dd + (2 * vld1Index); + *[register]:2 ptr = *:2 mult_addr; +} +vld1DdElement2: Dd^"["^vld1Index^"]" is Dd & vld1Index & ((TMode=0 & c1011=2) | (TMode=1 & thv_c1011=2)) +{ + ptr:4 = &Dd + (4 * vld1Index); + *[register]:4 ptr = *:4 mult_addr; +} + +@define Vld1DdElement2 "((c1011=0 & c0404=0) | (c1011=1 & c0505=0) | (c1011=2 & (c0406=0 | c0406=3)))" +@define T_Vld1DdElement2 "((thv_c1011=0 & thv_c0404=0) | (thv_c1011=1 & thv_c0505=0) | (thv_c1011=2 & (thv_c0406=0 | thv_c0406=3)))" + + +vld1Align2: is TMode=0 & c0404=0 { } +vld1Align2: "@16" is TMode=0 & c1011=1 & c0404=1 { } +vld1Align2: "@32" is TMode=0 & c1011=2 & c0404=1 { } +vld1Align2: is TMode=1 & thv_c0404=0 { } +vld1Align2: "@16" is TMode=1 & thv_c1011=1 & thv_c0404=1 { } +vld1Align2: "@32" is TMode=1 & thv_c1011=2 & thv_c0404=1 { } + +RnAligned2: "["^VRn^vld1Align2^"]" is VRn & vld1Align2 { export VRn; } + +:vld1.^esize1011 vld1DdElement2,RnAligned2 is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0809=0 & c0003=15 & $(Vld1DdElement2) ) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0809=0 & thv_c0003=15 & $(T_Vld1DdElement2) ) ) & RnAligned2 & esize1011 & vld1DdElement2 +{ + mult_addr = RnAligned2; + build vld1DdElement2; +} + +:vld1.^esize1011 vld1DdElement2,RnAligned2^"!" is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0809=0 & c0003=13 & $(Vld1DdElement2) ) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0809=0 & thv_c0003=13 & $(T_Vld1DdElement2) ) ) & RnAligned2 & esize1011 & vld1DdElement2 +{ + mult_addr = RnAligned2; + build vld1DdElement2; + RnAligned2 = RnAligned2 + esize1011; +} + +:vld1.^esize1011 vld1DdElement2,RnAligned2,VRm is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0809=0 & $(Vld1DdElement2) ) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0809=0 & $(T_Vld1DdElement2) ) ) & VRm & RnAligned2 & esize1011 & vld1DdElement2 +{ + mult_addr = RnAligned2; + build vld1DdElement2; + RnAligned2 = RnAligned2 + VRm; +} + + +####### +# VLD1 (single element to all lanes) +# + +vld1RnReplicate: is Rn & c0607=0 +{ + val:8 = 0; + replicate1to8(*:1 Rn, val); + export val; +} +vld1RnReplicate: is Rn & c0607=1 +{ + val:8 = 0; + replicate2to8(*:2 Rn, val); + export val; +} +vld1RnReplicate: is Rn & c0607=2 +{ + val:8 = 0; + replicate4to8(*:4 Rn, val); + export val; +} + +vld1Dd3: Dreg^"[]" is Dreg { export Dreg; } + +buildVld1DdList3: is counter=0 { } +buildVld1DdList3: vld1Dd3 is counter=1 & vld1Dd3 [ counter=0; regNum=regNum+1; ] +{ + vld1Dd3 = mult_dat8; +} +buildVld1DdList3: vld1Dd3,buildVld1DdList3 is vld1Dd3 & buildVld1DdList3 [ counter=counter-1; regNum=regNum+1; ] +{ + vld1Dd3 = mult_dat8; + build buildVld1DdList3; +} + +vld1DdList3: "{"^buildVld1DdList3^"}" is c0505=0 & D22 & c1215 & buildVld1DdList3 [ regNum=(D22<<4)+c1215-1; counter=1; ] { export 1:4; } +vld1DdList3: "{"^buildVld1DdList3^"}" is c0505=1 & D22 & c1215 & buildVld1DdList3 [ regNum=(D22<<4)+c1215-1; counter=2; ] { export 2:4; } + +vld1Align3: is c0404=0 { } +vld1Align3: "@16" is c0404=1 & c0607=1 { } +vld1Align3: "@32" is c0404=1 & c0607=2 { } + +RnAligned3: "["^Rn^vld1Align3^"]" is Rn & vld1Align3 { export Rn; } + +@define vld1Constrain "((c0607=0 & c0404=0) | c0607=1 | c0607=2)" + +:vld1.^esize0607 vld1DdList3,RnAligned3 is $(AMODE) & cond=15 & c2327=9 & c2021=2 & RnAligned3 & vld1RnReplicate & vld1DdList3 & c0811=12 & esize0607 & c0003=15 & $(vld1Constrain) +{ + mult_dat8 = vld1RnReplicate; + build vld1DdList3; +} + +:vld1.^esize0607 vld1DdList3,RnAligned3^"!" is $(AMODE) & cond=15 & c2327=9 & c2021=2 & RnAligned3 & vld1RnReplicate & vld1DdList3 & c0811=12 & esize0607 & c0003=13 & $(vld1Constrain) +{ + mult_dat8 = vld1RnReplicate; + build vld1DdList3; + RnAligned3 = RnAligned3 + vld1DdList3; +} + +:vld1.^esize0607 vld1DdList3,RnAligned3,VRm is $(AMODE) & cond=15 & c2327=9 & c2021=2 & RnAligned3 & vld1RnReplicate & vld1DdList3 & c0811=12 & esize0607 & VRm & $(vld1Constrain) +{ + mult_dat8 = vld1RnReplicate; + build vld1DdList3; + RnAligned3 = RnAligned3 + VRm; +} + +thv_vld1RnReplicate: is VRn & thv_c0607=0 +{ + val:8 = 0; + replicate1to8(*:1 VRn, val); + export val; +} +thv_vld1RnReplicate: is VRn & thv_c0607=1 +{ + val:8 = 0; + replicate2to8(*:2 VRn, val); + export val; +} +thv_vld1RnReplicate: is VRn & thv_c0607=2 +{ + val:8 = 0; + replicate4to8(*:4 VRn, val); + export val; +} + +thv_vld1DdList3: "{"^buildVld1DdList3^"}" is thv_c0505=0 & thv_D22 & thv_c1215 & buildVld1DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; counter=1; ] { export 1:4; } +thv_vld1DdList3: "{"^buildVld1DdList3^"}" is thv_c0505=1 & thv_D22 & thv_c1215 & buildVld1DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; counter=2; ] { export 2:4; } + +thv_vld1Align3: is thv_c0404=0 { } +thv_vld1Align3: "@16" is thv_c0404=1 & thv_c0607=1 { } +thv_vld1Align3: "@32" is thv_c0404=1 & thv_c0607=2 { } + +VRnAligned3: "["^VRn^thv_vld1Align3^"]" is VRn & thv_vld1Align3 { export VRn; } + +@define T_vld1Constrain "((thv_c0607=0 & thv_c0404=0) | thv_c0607=1 | thv_c0607=2)" + +:vld1.^esize0607 thv_vld1DdList3,VRnAligned3 is $(TMODE_F) &thv_c2327=19 & thv_c2021=2 & VRnAligned3 & thv_vld1RnReplicate & thv_vld1DdList3 & thv_c0811=12 & esize0607 & thv_c0003=15 & $(T_vld1Constrain) +{ + mult_dat8 = thv_vld1RnReplicate; + build thv_vld1DdList3; +} + +:vld1.^esize0607 thv_vld1DdList3,VRnAligned3^"!" is $(TMODE_F) &thv_c2327=19 & thv_c2021=2 & VRnAligned3 & thv_vld1RnReplicate & thv_vld1DdList3 & thv_c0811=12 & esize0607 & thv_c0003=13 & $(T_vld1Constrain) +{ + mult_dat8 = thv_vld1RnReplicate; + build thv_vld1DdList3; + VRnAligned3 = VRnAligned3 + thv_vld1DdList3; +} + +:vld1.^esize0607 thv_vld1DdList3,VRnAligned3,VRm is $(TMODE_F) &thv_c2327=19 & thv_c2021=2 & VRnAligned3 & thv_vld1RnReplicate & thv_vld1DdList3 & thv_c0811=12 & esize0607 & VRm & $(T_vld1Constrain) +{ + mult_dat8 = thv_vld1RnReplicate; + build thv_vld1DdList3; + VRnAligned3 = VRnAligned3 + VRm; +} + +####### +# VLD2 (multiple 2-element structures) +# + +vld2Dd: Dreg is (($(AMODE) & c0607=0) | ($(TMODE_F) & thv_c0607=0)) & Dreg & regInc +{ + ptr1:4 = &Dreg; +@if ENDIAN == "little" + ptr2:4 = &Dreg + (regInc * 8); +@else # ENDIAN == "big" + ptr2:4 = &Dreg - (regInc * 8); +@endif # ENDIAN = "big" + mult_dat8 = 8; + + *[register]:1 ptr1 = *:1 mult_addr; + mult_addr = mult_addr + 1; + *[register]:1 ptr2 = *:1 mult_addr; + mult_addr = mult_addr + 1; + mult_dat8 = mult_dat8 - 1; + if(mult_dat8 == 0) goto ; + ptr1 = ptr1 + 1; + ptr2 = ptr2 + 1; + goto ; + +} +vld2Dd: Dreg is (($(AMODE) & c0607=1) | ($(TMODE_F) & thv_c0607=1)) & Dreg & regInc +{ + ptr1:4 = &Dreg; +@if ENDIAN == "little" + ptr2:4 = &Dreg + (regInc * 8); +@else # ENDIAN == "big" + ptr2:4 = &Dreg - (regInc * 8); +@endif # ENDIAN = "big" + mult_dat8 = 4; + + *[register]:2 ptr1 = *:2 mult_addr; + mult_addr = mult_addr + 2; + *[register]:2 ptr2 = *:2 mult_addr; + mult_addr = mult_addr + 2; + mult_dat8 = mult_dat8 - 1; + if(mult_dat8 == 0) goto ; + ptr1 = ptr1 + 2; + ptr2 = ptr2 + 2; + goto ; + +} +vld2Dd: Dreg is (($(AMODE) & c0607=2) | ($(TMODE_F) & thv_c0607=2)) & Dreg & regInc +{ + ptr1:4 = &Dreg; +@if ENDIAN == "little" + ptr2:4 = &Dreg + (regInc * 8); +@else # ENDIAN == "big" + ptr2:4 = &Dreg - (regInc * 8); +@endif # ENDIAN = "big" + mult_dat8 = 2; + + *[register]:4 ptr1 = *:4 mult_addr; + mult_addr = mult_addr + 4; + *[register]:4 ptr2 = *:4 mult_addr; + mult_addr = mult_addr + 4; + mult_dat8 = mult_dat8 - 1; + if(mult_dat8 == 0) goto ; + ptr1 = ptr1 + 4; + ptr2 = ptr2 + 4; + goto ; + +} + +buildVld2DdListA: is counter=0 { } +buildVld2DdListA: vld2Dd,buildVld2DdListA is vld2Dd & buildVld2DdListA & esize0607 [ counter=counter-1; regNum=regNum+1; ] +{ + build vld2Dd; + build buildVld2DdListA; +} + +buildVld2DdListB: is counter2=0 { } +buildVld2DdListB: Dreg2 is Dreg2 & counter2=1 & esize0607 [ counter2=0; reg2Num=reg2Num+1; ] { } +buildVld2DdListB: Dreg2,buildVld2DdListB is Dreg2 & buildVld2DdListB & esize0607 [ counter2=counter2-1; reg2Num=reg2Num+1; ] { } + +vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=0 & c0811=8 & D22 & c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(D22<<4)+c1215-1; regInc=1; reg2Num=regNum+1; counter=1; counter2=1; ] { build buildVld2DdListA; build buildVld2DdListB; export 2:4; } +vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=0 & c0811=9 & D22 & c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(D22<<4)+c1215-1; regInc=2; reg2Num=regNum+2; counter=1; counter2=1; ] { build buildVld2DdListA; build buildVld2DdListB; export 2:4; } +vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=0 & c0811=3 & D22 & c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(D22<<4)+c1215-1; regInc=2; reg2Num=regNum+2; counter=2; counter2=2; ] { build buildVld2DdListA; build buildVld2DdListB; export 4:4; } +vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=1 & thv_c0811=8 & thv_D22 & thv_c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; reg2Num=regNum+1; counter=1; counter2=1; ] { build buildVld2DdListA; build buildVld2DdListB; export 2:4; } +vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=1 & thv_c0811=9 & thv_D22 & thv_c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=2; reg2Num=regNum+2; counter=1; counter2=1; ] { build buildVld2DdListA; build buildVld2DdListB; export 2:4; } +vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=1 & thv_c0811=3 & thv_D22 & thv_c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=2; reg2Num=regNum+2; counter=2; counter2=2; ] { build buildVld2DdListA; build buildVld2DdListB; export 4:4; } + +@define Vld2DdList "(c0811=3 | c0811=8 | c0811=9)" +@define thv_Vld2DdList "(thv_c0811=3 | thv_c0811=8 | thv_c0811=9)" + +:vld2.^esize0607 vld2DdList,RnAligned45 is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003=15 & $(Vld2DdList) ) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0607<3 & thv_c0003=15 & $(thv_Vld2DdList) ) ) & RnAligned45 & esize0607 & vld2DdList +{ + mult_addr = RnAligned45; + build vld2DdList; +} + +:vld2.^esize0607 vld2DdList,RnAligned45^"!" is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003=13 & $(Vld2DdList) ) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0607<3 & thv_c0003=13 & $(thv_Vld2DdList) ) ) & RnAligned45 & esize0607 & vld2DdList +{ + mult_addr = RnAligned45; + build vld2DdList; + RnAligned45 = RnAligned45 + (8 * vld2DdList); +} + +:vld2.^esize0607 vld2DdList,RnAligned45,VRm is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003 & $(Vld2DdList) ) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0607<3 & thv_c0003 & $(thv_Vld2DdList) ) ) & VRm & RnAligned45 & esize0607 & vld2DdList +{ + mult_addr = RnAligned45; + build vld2DdList; + RnAligned45 = RnAligned45 + VRm; +} + +####### +# VLD2 (single 2-element structure to one lane) +# + +vld2Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } +vld2Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } + +vld2DdElement2: Dreg^"["^vld2Index^"]" is Dreg & vld2Index +{ +} + +vld2Align2: is TMode=0 & c0404=0 & (c1111=0 | c0505=0) { } +vld2Align2: "@16" is TMode=0 & c1011=0 & c0404=1 { } +vld2Align2: "@32" is TMode=0 & c1011=1 & c0404=1 { } +vld2Align2: "@64" is TMode=0 & c1011=2 & c0405=1 { } +vld2Align2: is TMode=1 & thv_c0404=0 & (thv_c1111=0 | thv_c0505=0) { } +vld2Align2: "@16" is TMode=1 & thv_c1011=0 & thv_c0404=1 { } +vld2Align2: "@32" is TMode=1 & thv_c1011=1 & thv_c0404=1 { } +vld2Align2: "@64" is TMode=1 & thv_c1011=2 & thv_c0405=1 { } + +vld2RnAligned2: "["^VRn^vld2Align2^"]" is VRn & vld2Align2 { export VRn; } + +buildVld2DdList2: is counter=0 { } +buildVld2DdList2: vld2DdElement2 is counter=1 & vld2DdElement2 [ counter=0; regNum=regNum+regInc; ] { } +buildVld2DdList2: vld2DdElement2,buildVld2DdList2 is vld2DdElement2 & buildVld2DdList2 [ counter=counter-1; regNum=regNum+regInc; ] { } + +vld2DdList2: "{"^buildVld2DdList2^"}" is TMode=0 & D22 & c1215 & buildVld2DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=2; ] { } # Single +vld2DdList2: "{"^buildVld2DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011=2 & c0606=1)) & D22 & c1215 & buildVld2DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=2; ] { } # Double +vld2DdList2: "{"^buildVld2DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVld2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=2; ] { } # Single +vld2DdList2: "{"^buildVld2DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVld2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double + + +:vld2.^esize1011 vld2DdList2,vld2RnAligned2 is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003=15 ) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=1 & thv_c0003=15 ) ) & esize1011 & VRm & vld2RnAligned2 & vld2DdList2 + unimpl + +:vld2.^esize1011 vld2DdList2,vld2RnAligned2^"!" is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003=13 ) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=1 & thv_c0003=13 ) ) & esize1011 & VRm & vld2RnAligned2 & vld2DdList2 + unimpl + +:vld2.^esize1011 vld2DdList2,vld2RnAligned2,VRm is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003 ) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=1 & thv_c0003 ) ) & esize1011 & VRm & vld2RnAligned2 & vld2DdList2 + unimpl + +####### +# VLD2 (single 2-element structure to all lanes) +# + +vld2Align3: is TMode=0 & c0404=0 { } +vld2Align3: "@16" is TMode=0 & c0404=1 & c0607=0 { } +vld2Align3: "@32" is TMode=0 & c0404=1 & c0607=1 { } +vld2Align3: "@64" is TMode=0 & c0404=1 & c0607=2 { } +vld2Align3: is TMode=1 & thv_c0404=0 { } +vld2Align3: "@16" is TMode=1 & thv_c0404=1 & thv_c0607=0 { } +vld2Align3: "@32" is TMode=1 & thv_c0404=1 & thv_c0607=1 { } +vld2Align3: "@64" is TMode=1 & thv_c0404=1 & thv_c0607=2 { } + +vld2RnAligned3: "["^VRn^vld2Align3^"]" is VRn & vld2Align3 { export VRn; } + +buildVld2DdList3: is counter=0 { } +buildVld2DdList3: Dreg^"[]" is counter=1 & Dreg [ counter=0; regNum=regNum+regInc; ] { } +buildVld2DdList3: Dreg^"[]",buildVld2DdList3 is Dreg & buildVld2DdList3 [ counter=counter-1; regNum=regNum+regInc; ] { } + +vld2DdList3: "{"^buildVld2DdList3^"}" is TMode=0 & c0505=0 & D22 & c1215 & buildVld2DdList3 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=2; ] { } # Single +vld2DdList3: "{"^buildVld2DdList3^"}" is TMode=0 & c0505=1 & D22 & c1215 & buildVld2DdList3 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=2; ] { } # Double +vld2DdList3: "{"^buildVld2DdList3^"}" is TMode=1 & thv_c0505=0 & thv_D22 & thv_c1215 & buildVld2DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=2; ] { } # Single +vld2DdList3: "{"^buildVld2DdList3^"}" is TMode=1 & thv_c0505=1 & thv_D22 & thv_c1215 & buildVld2DdList3 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double + +:vld2.^esize0607 vld2DdList3,vld2RnAligned3 is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003=15 ) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=13 & thv_c0607<3 & thv_c0003=15 ) ) & esize0607 & VRm & vld2RnAligned3 & vld2DdList3 + unimpl + +:vld2.^esize0607 vld2DdList3,vld2RnAligned3^"!" is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003=13 ) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=13 & thv_c0607<3 & thv_c0003=13 ) ) & esize0607 & VRm & vld2RnAligned3 & vld2DdList3 + unimpl + +:vld2.^esize0607 vld2DdList3,vld2RnAligned3,VRm is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=13 & thv_c0607<3 & thv_c0003 ) ) & esize0607 & VRm & vld2RnAligned3 & vld2DdList3 + unimpl + +####### +# VLD3 (multiple 3-element structures) +# + +vld3Align: is TMode=0 & c0404=0 { } +vld3Align: "@64" is TMode=0 & c0404=1 { } +vld3Align: is TMode=1 & thv_c0404=0 { } +vld3Align: "@64" is TMode=1 & thv_c0404=1 { } + + +vld3RnAligned: "["^VRn^vld3Align^"]" is VRn & vld3Align { export VRn; } + +buildVld3DdList: is counter=0 { } +buildVld3DdList: Dreg is counter=1 & Dreg [ counter=0; regNum=regNum+regInc; ] { } +buildVld3DdList: Dreg,buildVld3DdList is buildVld3DdList & Dreg [ counter=counter-1; regNum=regNum+regInc; ] { } + +vld3DdList: "{"^buildVld3DdList^"}" is TMode=0 & c0811=4 & D22 & c1215 & buildVld3DdList [ regNum=(D22<<4)+c1215-1; regInc=1; counter=3; ] { } # Single +vld3DdList: "{"^buildVld3DdList^"}" is TMode=0 & c0811=5 & D22 & c1215 & buildVld3DdList [ regNum=(D22<<4)+c1215-2; regInc=2; counter=3; ] { } # Double +vld3DdList: "{"^buildVld3DdList^"}" is TMode=1 & thv_c0811=4 & thv_D22 & thv_c1215 & buildVld3DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single +vld3DdList: "{"^buildVld3DdList^"}" is TMode=1 & thv_c0811=5 & thv_D22 & thv_c1215 & buildVld3DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double + +:vld3.^esize0607 vld3DdList,vld3RnAligned is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 & c0003=15 ) | + ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & (thv_c0811=4 | thv_c0811=5) & thv_c0607<3 & thv_c0505=0 & thv_c0003=15) ) & vld3RnAligned & esize0607 & vld3DdList unimpl + +:vld3.^esize0607 vld3DdList,vld3RnAligned^"!" is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 & c0003=13 ) | + ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & (thv_c0811=4 | thv_c0811=5) & thv_c0607<3 & thv_c0505=0 & thv_c0003=13) ) & vld3RnAligned & esize0607 & vld3DdList unimpl + +:vld3.^esize0607 vld3DdList,vld3RnAligned,VRm is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 ) | + ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & (thv_c0811=4 | thv_c0811=5) & thv_c0607<3 & thv_c0505=0 ) ) & VRm & vld3RnAligned & esize0607 & vld3DdList unimpl + +####### +# VLD3 (single 3-element structure to one lane) +# + +vld3Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } +vld3Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } + +vld3DdElement2: Dreg^"["^vld3Index^"]" is Dreg & vld3Index +{ +} + +vld3Rn: "["^VRn^"]" is VRn { export VRn; } + +buildVld3DdList2: is counter=0 { } +buildVld3DdList2: vld3DdElement2 is counter=1 & vld3DdElement2 [ counter=0; regNum=regNum+regInc; ] { } +buildVld3DdList2: vld3DdElement2,buildVld3DdList2 is vld3DdElement2 & buildVld3DdList2 [ counter=counter-1; regNum=regNum+regInc; ] { } + +vld3DdList2: "{"^buildVld3DdList2^"}" is TMode=0 & D22 & c1215 & buildVld3DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=3; ] { } # Single +vld3DdList2: "{"^buildVld3DdList2^"}" is TMode=0 & ((c1011=1 & c0405=2) | (c1011=2 & c0406=4)) & D22 & c1215 & buildVld3DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=3; ] { } # Double +vld3DdList2: "{"^buildVld3DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVld3DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single +vld3DdList2: "{"^buildVld3DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0405=2) | (thv_c1011=2 & thv_c0406=4)) & thv_D22 & thv_c1215 & buildVld3DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double + + +:vld3.^esize1011 vld3DdList2,vld3Rn is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2 & c0003=15) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=2 & thv_c0003=15) ) & vld3Rn & esize1011 & vld3DdList2 unimpl + +:vld3.^esize1011 vld3DdList2,vld3Rn^"!" is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2 & c0003=13) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=2 & thv_c0003=13) ) & vld3Rn & esize1011 & vld3DdList2 unimpl + +:vld3.^esize1011 vld3DdList2,vld3Rn,VRm is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=2) ) & VRm & vld3Rn & esize1011 & vld3DdList2 unimpl + +####### +# VLD3 (single 3-element structure to all lanes) +# + +buildVld3DdList3: is counter=0 { } +buildVld3DdList3: Dreg^"[]" is counter=1 & Dreg [ counter=0; regNum=regNum+regInc; ] { } +buildVld3DdList3: Dreg^"[]",buildVld3DdList3 is Dreg & buildVld3DdList3 [ counter=counter-1; regNum=regNum+regInc; ] { } + +vld3DdList3: "{"^buildVld3DdList3^"}" is TMode=0 & c0505=0 & D22 & c1215 & buildVld3DdList3 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=3; ] { } # Single +vld3DdList3: "{"^buildVld3DdList3^"}" is TMode=0 & c0505=1 & D22 & c1215 & buildVld3DdList3 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=3; ] { } # Double +vld3DdList3: "{"^buildVld3DdList3^"}" is TMode=1 & thv_c0505=0 & thv_D22 & thv_c1215 & buildVld3DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single +vld3DdList3: "{"^buildVld3DdList3^"}" is TMode=1 & thv_c0505=1 & thv_D22 & thv_c1215 & buildVld3DdList3 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double + +:vld3.^esize0607 vld3DdList3,vld3Rn is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0 & c0003=15) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=14 & thv_c0404=0 & thv_c0003=15) ) & vld3Rn & esize0607 & vld3DdList3 unimpl + +:vld3.^esize0607 vld3DdList3,vld3Rn^"!" is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0 & c0003=13) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=14 & thv_c0404=0 & thv_c0003=13) ) & vld3Rn & esize0607 & vld3DdList3 unimpl + +:vld3.^esize0607 vld3DdList3,vld3Rn,VRm is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=14 & thv_c0404=0) ) & VRm & vld3Rn & esize0607 & vld3DdList3 unimpl + + +####### +# VLD4 (single 4-element structure to one lane) +# + +vld4Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } +vld4Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } + +vld4DdElement2: Dreg^"["^vld4Index^"]" is Dreg & vld4Index +{ +} + +vld4Align2: is TMode=0 & c0404=0 & (c1111=0 | c0505=0) { } +vld4Align2: "@32" is TMode=0 & c1011=0 & c0404=1 { } +vld4Align2: "@64" is TMode=0 & (c1011=1 & c0404=1) | (c1011=2 & c0405=1) { } +vld4Align2: "@128" is TMode=0 & c1011=2 & c0405=2 { } +vld4Align2: is TMode=1 & thv_c0404=0 & (thv_c1111=0 | thv_c0505=0) { } +vld4Align2: "@32" is TMode=1 & thv_c1011=0 & thv_c0404=1 { } +vld4Align2: "@64" is TMode=1 & ((thv_c1011=1 & thv_c0404=1) | (thv_c1011=2 & thv_c0405=1)) { } +vld4Align2: "@128" is TMode=1 & thv_c1011=2 & thv_c0405=2 { } + +vld4RnAligned2: "["^Rn^vld4Align2^"]" is Rn & vld4Align2 { export Rn; } + +buildVld4DdList2: is counter=0 { } +buildVld4DdList2: vld4DdElement2 is counter=1 & vld4DdElement2 [ counter=0; regNum=regNum+regInc; ] { } +buildVld4DdList2: vld4DdElement2,buildVld4DdList2 is vld4DdElement2 & buildVld4DdList2 [ counter=counter-1; regNum=regNum+regInc; ] { } + +vld4DdList2: "{"^buildVld4DdList2^"}" is TMode=0 & D22 & c1215 & buildVld4DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single +vld4DdList2: "{"^buildVld4DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011=2 & c0606=1)) & D22 & c1215 & buildVld4DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double +vld4DdList2: "{"^buildVld4DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVld4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single +vld4DdList2: "{"^buildVld4DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVld4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double + + +:vld4.^esize1011 vld4DdList2,vld4RnAligned2 is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003=15) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=3 & thv_c0003=15 ) ) & esize1011 & vld4RnAligned2 & vld4DdList2 +unimpl + +:vld4.^esize1011 vld4DdList2,vld4RnAligned2^"!" is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003=13) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=3 & thv_c0003=13 ) ) & esize1011 & vld4RnAligned2 & vld4DdList2 +unimpl + +:vld4.^esize1011 vld4DdList2,vld4RnAligned2,VRm is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=3 & thv_c0003 ) ) & esize1011 & VRm & vld4RnAligned2 & vld4DdList2 + unimpl + +####### +# VLD4 (single 4-element structure to all lanes) +# + +vld4Align3: is c0404=0 { } +vld4Align3: "@32" is c0404=1 & c0607=0 { } +vld4Align3: "@64" is c0404=1 & (c0607=1 | c0607=2) { } +vld4Align3: "@128" is c0404=1 & c0607=3 { } + +vld4RnAligned3: "["^Rn^vld4Align3^"]" is Rn & vld4Align3 { export Rn; } + +buildVld4DdList3: is counter=0 { } +buildVld4DdList3: Dreg^"[]" is counter=1 & Dreg [ counter=0; regNum=regNum+regInc; ] { } +buildVld4DdList3: Dreg^"[]",buildVld4DdList3 is Dreg & buildVld4DdList3 [ counter=counter-1; regNum=regNum+regInc; ] { } + +vld4DdList3: "{"^buildVld4DdList3^"}" is c0505=0 & D22 & c1215 & buildVld4DdList3 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single +vld4DdList3: "{"^buildVld4DdList3^"}" is c0505=1 & D22 & c1215 & buildVld4DdList3 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double + +:vld4.^esize0607 vld4DdList3,vld4RnAligned3 is $(AMODE) & cond=15 & c2327=9 & c2021=2 & vld4RnAligned3 & c0811=15 & esize0607 & c0003=15 & vld4DdList3 unimpl +#thv_2327=0x12 + +:vld4.^esize0607 vld4DdList3,vld4RnAligned3^"!" is $(AMODE) & cond=15 & c2327=9 & c2021=2 & vld4RnAligned3 & c0811=15 & esize0607 & c0003=13 & vld4DdList3 unimpl + +:vld4.^esize0607 vld4DdList3,vld4RnAligned3,VRm is $(AMODE) & cond=15 & c2327=9 & c2021=2 & vld4RnAligned3 & c0811=15 & esize0607 & VRm & vld4DdList3 unimpl + +####### +# VLD4 (multiple 4-element structures) +# + +vld4Align: is TMode=0 & c0405=0 { } +vld4Align: "@64" is TMode=0 & c0405=1 { } +vld4Align: "@128" is TMode=0 & c0405=2 { } +vld4Align: "@256" is TMode=0 & c0405=3 { } +vld4Align: is TMode=1 & thv_c0405=0 { } +vld4Align: "@64" is TMode=1 & thv_c0405=1 { } +vld4Align: "@128" is TMode=1 & thv_c0405=2 { } +vld4Align: "@256" is TMode=1 & thv_c0405=3 { } + +vld4RnAligned: "["^VRn^vld4Align^"]" is VRn & vld4Align { export VRn; } + +buildVld4DdList: is counter=0 { } +buildVld4DdList: Dreg is counter=1 & Dreg [ counter=0; regNum=regNum+regInc; ] { } +buildVld4DdList: Dreg,buildVld4DdList is buildVld4DdList & Dreg [ counter=counter-1; regNum=regNum+regInc; ] { } + +vld4DdList: "{"^buildVld4DdList^"}" is TMode=0 & c0808=0 & D22 & c1215 & buildVld4DdList [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single +vld4DdList: "{"^buildVld4DdList^"}" is TMode=0 & c0808=1 & D22 & c1215 & buildVld4DdList [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double +vld4DdList: "{"^buildVld4DdList^"}" is TMode=1 & thv_c0808=0 & thv_D22 & thv_c1215 & buildVld4DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single +vld4DdList: "{"^buildVld4DdList^"}" is TMode=1 & thv_c0808=1 & thv_D22 & thv_c1215 & buildVld4DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double + +:vld4.^esize0607 vld4DdList,vld4RnAligned is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3 & c0003=15 ) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0911=0 & thv_c0607<3 & thv_c0003=15 ) ) & esize0607 & VRm & vld4RnAligned & vld4DdList + unimpl + +:vld4.^esize0607 vld4DdList,vld4RnAligned^"!" is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3 & c0003=13 ) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0911=0 & thv_c0607<3 & thv_c0003=13 ) ) & esize0607 & VRm & vld4RnAligned & vld4DdList + unimpl + +:vld4.^esize0607 vld4DdList,vld4RnAligned,VRm is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0911=0 & thv_c0607<3 ) ) & esize0607 & VRm & vld4RnAligned & vld4DdList + unimpl + + +@endif # SIMD + +@if defined(VFPv2) || defined(VFPv3) || defined(SIMD) + +####### +# VLDM (A1) +# + +vldmRn: Rn is TMode=0 & Rn & c2121=0 { export Rn; } +vldmRn: Rn^"!" is TMode=0 & Rn & c2121=1 { export Rn; } +vldmRn: thv_Rn is TMode=1 & thv_Rn & thv_c2121=0 { export thv_Rn; } +vldmRn: thv_Rn^"!" is TMode=1 & thv_Rn & thv_c2121=1 { export thv_Rn; } +vldmOffset: value is $(AMODE) & immed [ value= immed << 2; ] { export *[const]:4 value; } +vldmOffset: value is TMode=1 & thv_immed [ value= thv_immed << 2; ] { export *[const]:4 value; } +vldmUpdate: immed is TMode=0 & vldmRn & c2121=0 & immed { } +vldmUpdate: immed is TMode=0 & vldmRn & c2121=1 & immed { vldmRn = vldmRn + (immed << 2); } +vldmUpdate: thv_immed is TMode=1 & vldmRn & thv_c2121=0 & thv_immed { } +vldmUpdate: thv_immed is TMode=1 & vldmRn & thv_c2121=1 & thv_immed { vldmRn = vldmRn + (thv_immed << 2); } + +buildVldmDdList: is counter=0 { } +buildVldmDdList: Dreg is counter=1 & Dreg [ counter=0; regNum=regNum+1; ] +{ + Dreg = *mult_addr; + mult_addr = mult_addr + 8; +} + +buildVldmDdList: Dreg,buildVldmDdList is Dreg & buildVldmDdList [ counter=counter-1; regNum=regNum+1; ] +{ + Dreg = *mult_addr; + mult_addr = mult_addr + 8; + build buildVldmDdList; +} + +vldmDdList: "{"^buildVldmDdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVldmDdList [ regNum=(D22<<4)+c1215 - 1; counter=c0007>>1; ] { } +vldmDdList: "{"^buildVldmDdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVldmDdList [ regNum=(thv_D22<<4)+thv_c1215 - 1; counter=thv_c0007>>1; ] { } + +:vldmia^COND vldmRn,vldmDdList is ( ($(AMODE) & COND & c2327=0x19 & c2121 & c2020=1 & c0811=11 & c0000=0) | + ($(TMODE_E) & thv_c2327=0x19 & thv_c2121 & thv_c2020=1 & thv_c0811=11 & thv_c0000=0) ) & vldmRn & vldmDdList & vldmOffset & vldmUpdate +{ + mult_addr = vldmRn; + build vldmDdList; + build vldmUpdate; +} + +:vldmdb^COND vldmRn,vldmDdList is ( ($(AMODE) & COND & c2327=0x1a & c2121=1 & c2020=1 & c0811=11 & c0000=0) | + ($(TMODE_E) & thv_c2327=0x1a & thv_c2121=1 & thv_c2020=1 & thv_c0811=11 & thv_c0000=0 ) ) & vldmRn & vldmDdList & vldmOffset +{ + local start_addr = vldmRn - vldmOffset; + mult_addr = start_addr; + build vldmDdList; + vldmRn = start_addr; +} + +@endif # VFPv2 | VFPv3 | SIMD + +@if defined(VERSION_8) + +fldmSet1: thv_Dd_1 is TMode=1 & thv_Rn & thv_Dd_1 { thv_Dd_1 = * thv_Rn; } +fldmSet2: thv_Dd_2 is TMode=1 & thv_Rn & thv_Dd_2 & fldmSet1 { build fldmSet1; thv_Dd_2 = *:8 (thv_Rn + 8:4); } +fldmSet3: thv_Dd_3 is TMode=1 & thv_Rn & thv_Dd_3 & fldmSet2 { build fldmSet2; thv_Dd_3 = *:8 (thv_Rn + 16:4); } +fldmSet4: thv_Dd_4 is TMode=1 & thv_Rn & thv_Dd_4 & fldmSet3 { build fldmSet3; thv_Dd_4 = *:8 (thv_Rn + 24:4); } +fldmSet5: thv_Dd_5 is TMode=1 & thv_Rn & thv_Dd_5 & fldmSet4 { build fldmSet4; thv_Dd_5 = *:8 (thv_Rn + 32:4); } +fldmSet6: thv_Dd_6 is TMode=1 & thv_Rn & thv_Dd_6 & fldmSet5 { build fldmSet5; thv_Dd_6 = *:8 (thv_Rn + 40:4); } +fldmSet7: thv_Dd_7 is TMode=1 & thv_Rn & thv_Dd_7 & fldmSet6 { build fldmSet6; thv_Dd_7 = *:8 (thv_Rn + 48:4); } +fldmSet8: thv_Dd_8 is TMode=1 & thv_Rn & thv_Dd_8 & fldmSet7 { build fldmSet7; thv_Dd_8 = *:8 (thv_Rn + 56:4); } +fldmSet9: thv_Dd_9 is TMode=1 & thv_Rn & thv_Dd_9 & fldmSet8 { build fldmSet8; thv_Dd_9 = *:8 (thv_Rn + 64:4); } +fldmSet10: thv_Dd_10 is TMode=1 & thv_Rn & thv_Dd_10 & fldmSet9 { build fldmSet9; thv_Dd_10 = *:8 (thv_Rn + 72:4); } +fldmSet11: thv_Dd_11 is TMode=1 & thv_Rn & thv_Dd_11 & fldmSet10 { build fldmSet10; thv_Dd_11 = *:8 (thv_Rn + 80:4); } +fldmSet12: thv_Dd_12 is TMode=1 & thv_Rn & thv_Dd_12 & fldmSet11 { build fldmSet11; thv_Dd_12 = *:8 (thv_Rn + 88:4); } +fldmSet13: thv_Dd_13 is TMode=1 & thv_Rn & thv_Dd_13 & fldmSet12 { build fldmSet12; thv_Dd_13 = *:8 (thv_Rn + 96:4); } +fldmSet14: thv_Dd_14 is TMode=1 & thv_Rn & thv_Dd_14 & fldmSet13 { build fldmSet13; thv_Dd_14 = *:8 (thv_Rn + 104:4); } +fldmSet15: thv_Dd_15 is TMode=1 & thv_Rn & thv_Dd_15 & fldmSet14 { build fldmSet14; thv_Dd_15 = *:8 (thv_Rn + 112:4); } +fldmSet16: thv_Dd_16 is TMode=1 & thv_Rn & thv_Dd_16 & fldmSet15 { build fldmSet15; thv_Dd_16 = *:8 (thv_Rn + 120:4); } + +fldmSet: "{"^thv_Dd_1^"}" is TMode=1 & thv_Dd_1 & thv_c0007=3 & fldmSet1 { build fldmSet1; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet2^"}" is TMode=1 & thv_Dd_1 & thv_c0007=5 & fldmSet2 { build fldmSet2; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet3^"}" is TMode=1 & thv_Dd_1 & thv_c0007=7 & fldmSet3 { build fldmSet3; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet4^"}" is TMode=1 & thv_Dd_1 & thv_c0007=9 & fldmSet4 { build fldmSet4; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet5^"}" is TMode=1 & thv_Dd_1 & thv_c0007=11 & fldmSet5 { build fldmSet5; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet6^"}" is TMode=1 & thv_Dd_1 & thv_c0007=13 & fldmSet6 { build fldmSet6; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet7^"}" is TMode=1 & thv_Dd_1 & thv_c0007=15 & fldmSet7 { build fldmSet7; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet8^"}" is TMode=1 & thv_Dd_1 & thv_c0007=17 & fldmSet8 { build fldmSet8; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet9^"}" is TMode=1 & thv_Dd_1 & thv_c0007=19 & fldmSet9 { build fldmSet9; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet10^"}" is TMode=1 & thv_Dd_1 & thv_c0007=21 & fldmSet10 { build fldmSet10; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet11^"}" is TMode=1 & thv_Dd_1 & thv_c0007=23 & fldmSet11 { build fldmSet11; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet12^"}" is TMode=1 & thv_Dd_1 & thv_c0007=25 & fldmSet12 { build fldmSet12; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet13^"}" is TMode=1 & thv_Dd_1 & thv_c0007=27 & fldmSet13 { build fldmSet13; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet14^"}" is TMode=1 & thv_Dd_1 & thv_c0007=29 & fldmSet14 { build fldmSet14; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet15^"}" is TMode=1 & thv_Dd_1 & thv_c0007=31 & fldmSet15 { build fldmSet15; } +fldmSet: "{"^thv_Dd_1^"-"^fldmSet16^"}" is TMode=1 & thv_Dd_1 & thv_c0007=33 & fldmSet16 { build fldmSet16; } + +fldmWback: thv_Rn^"!" is thv_bit21=1 & thv_bit23=1 & thv_c0007 & thv_Rn { thv_Rn = thv_Rn + (4 * thv_c0007:4); } +fldmWback: thv_Rn^"!" is thv_bit21=1 & thv_bit23=0 & thv_c0007 & thv_Rn { thv_Rn = thv_Rn - (4 * thv_c0007:4); } +fldmWback: thv_Rn is thv_bit21=0 & thv_Rn { } + +:fldmdbx^ItCond fldmWback, fldmSet is TMode=1 & ItCond & thv_c2331=0b111011010 & thv_bit21=1 & thv_bit20=1 & thv_c0811=0b1011 & fldmWback & fldmSet +{ + build fldmWback; + build fldmSet; +} + +:fldmiax^ItCond fldmWback, fldmSet is TMode=1 & ItCond & thv_c2331=0b111011001 & thv_bit20=1 & thv_c0811=0b1011 & fldmWback & fldmSet +{ + build fldmSet; + build fldmWback; +} + +@endif + +@if defined(VFPv2) || defined(VFPv3) + +####### +# VLDM (A2) +# + +buildVldmSdList: is counter=0 { } +buildVldmSdList: Sreg is counter=1 & Sreg [ counter=0; regNum=regNum+1; ] +{ + Sreg = *mult_addr; + mult_addr = mult_addr + 4; +} +buildVldmSdList: Sreg,buildVldmSdList is Sreg & buildVldmSdList [ counter=counter-1; regNum=regNum+1; ] +{ + Sreg = *mult_addr; + mult_addr = mult_addr + 4; + build buildVldmSdList; +} + +vldmSdList: "{"^buildVldmSdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVldmSdList [ regNum=(c1215<<1) + D22 - 1; counter=c0007; ] { } +vldmSdList: "{"^buildVldmSdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVldmSdList [ regNum=(thv_c1215<<1) + thv_D22 - 1; counter=thv_c0007; ] { } + +:vldmia^COND vldmRn,vldmSdList is COND & ( ( $(AMODE) & ARMcond=1 & c2327=0x19 & c2121 & c2020=1 & c0811=10 ) | + ($(TMODE_E) & thv_c2327=0x19 & thv_c2121 & thv_c2020=1 & thv_c0811=10 ) ) & vldmRn & vldmSdList & vldmOffset & vldmUpdate +{ + mult_addr = vldmRn; + build vldmSdList; + build vldmUpdate; +} + +:vldmdb^COND vldmRn,vldmSdList is COND & ( ( $(AMODE) & ARMcond=1 & c2327=0x1a & c2121=1 & c2020=1 & c0811=10 ) | + ($(TMODE_E) & thv_c2327=0x1a & thv_c2121=1 & thv_c2020=1 & thv_c0811=10 ) ) & vldmRn & vldmSdList & vldmOffset +{ + local start_addr = vldmRn - vldmOffset; + mult_addr = start_addr; + build vldmSdList; + vldmRn = start_addr; +} + +####### +# VLDR +# + +vldrRn: "["^Rn^"]" is TMode=0 & Rn & immed=0 & c2323=0 { ptr:4 = Rn; export ptr; } +vldrRn: "["^Rn^"]" is TMode=0 & Rn & immed=0 & c2323=1 { ptr:4 = Rn; export ptr; } +vldrRn: "["^Rn^",#-"^vldrImm^"]" is TMode=0 & Rn & immed & c2323=0 [ vldrImm = immed * 4; ] { ptr:4 = Rn - vldrImm; export ptr; } +vldrRn: "["^Rn^",#"^vldrImm^"]" is TMode=0 & Rn & immed & c2323=1 [ vldrImm = immed * 4; ] { ptr:4 = Rn + vldrImm; export ptr; } +vldrRn: "["^pc^",#-"^vldrImm^"]" is TMode=0 & Rn=15 & pc & immed & c2323=0 [ vldrImm = immed * 4; ] { ptr:4 = ((inst_start + 8) & 0xfffffffc) - vldrImm; export ptr; } +vldrRn: "["^pc^",#"^vldrImm^"]" is TMode=0 & Rn=15 & pc & immed & c2323=1 [ vldrImm = immed * 4; ] { ptr:4 = ((inst_start + 8) & 0xfffffffc) + vldrImm; export ptr; } +vldrRn: "["^VRn^"]" is TMode=1 & VRn & thv_immed=0 & thv_c2323=0 { ptr:4 = VRn; export ptr; } +vldrRn: "["^VRn^"]" is TMode=1 & VRn & thv_immed=0 & thv_c2323=1 { ptr:4 = VRn; export ptr; } +vldrRn: "["^VRn^",#-"^vldrImm^"]" is TMode=1 & VRn & thv_immed & thv_c2323=0 [ vldrImm = thv_immed * 4; ] { ptr:4 = VRn - vldrImm; export ptr; } +vldrRn: "["^VRn^",#"^vldrImm^"]" is TMode=1 & VRn & thv_immed & thv_c2323=1 [ vldrImm = thv_immed * 4; ] { ptr:4 = VRn + vldrImm; export ptr; } +vldrRn: "["^pc^",#-"^vldrImm^"]" is TMode=1 & thv_Rn=15 & pc & thv_immed & thv_c2323=0 [ vldrImm = thv_immed * 4; ] { ptr:4 = ((inst_start + 4) & 0xfffffffc) - vldrImm; export ptr; } +vldrRn: "["^pc^",#"^vldrImm^"]" is TMode=1 & thv_Rn=15 & pc & thv_immed & thv_c2323=1 [ vldrImm = thv_immed * 4; ] { ptr:4 = ((inst_start + 4) & 0xfffffffc) + vldrImm; export ptr; } + +:vldr^COND^".64" Dd,vldrRn is COND & ( ($(AMODE) & ARMcond=1 & c2427=13 & c2021=1 & c0811=11) | ($(TMODE_E) & thv_c2427=13 & thv_c2021=1 & thv_c0811=11)) & Dd & vldrRn +{ + Dd = *:8 vldrRn; +} + +:vldr^COND^".32" Sd,vldrRn is COND & ( ($(AMODE) & ARMcond=1 & c2427=13 & c2021=1 & c0811=10) | ($(TMODE_E) & thv_c2427=13 & thv_c2021=1 & thv_c0811=10)) & Sd & vldrRn +{ + Sd = *:4 vldrRn; +} + +@endif # VFPv2 | VFPv3 + +define pcodeop VectorMin; +define pcodeop VectorMax; +define pcodeop FloatVectorMin; +define pcodeop FloatVectorMax; +define pcodeop VectorMultiplyAccumulate; +define pcodeop VectorMultiplySubtract; +define pcodeop VectorMultiplySubtractLong; +define pcodeop VectorDoubleMultiplyHighHalf; +define pcodeop VectorRoundDoubleMultiplyHighHalf; +define pcodeop VectorDoubleMultiplyLong; +define pcodeop VectorDoubleMultiplyAccumulateLong; +define pcodeop VectorDoubleMultiplySubtractLong; +define pcodeop FloatVectorMultiplyAccumulate; +define pcodeop FloatVectorMultiplySubtract; +define pcodeop VectorGetElement; + +@if defined(SIMD) + +:vmax.^udt^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=0 & c0404=0 ) | + ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=0 & thv_c0404=0 ) ) & esize2021 & udt & Dm & Dn & Dd +{ + Dd = VectorMax(Dn,Dm,esize2021,udt); +} + +:vmax.^udt^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=1 & c0404=0 ) | + ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & udt & Qm & Qn & Qd +{ + Qd = VectorMax(Qn,Qm,esize2021,udt); +} + +:vmax.f32 Dd,Dn,Dm is (($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=15 & Q6=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0)) & Dm & Dn & Dd +{ + Dd = FloatVectorMax(Dn,Dm,2:4,32:1); +} + +:vmax.f32 Qd,Qn,Qm is (($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=15 & Q6=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=15 & thv_Q6=1 & thv_c0404=0)) & Qm & Qn & Qd +{ + Qd = FloatVectorMax(Qn,Qm,2:4,32:1); +} + +:vmin.^udt^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=0 & c0404=1 ) | + ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=0 & thv_c0404=1 ) ) & esize2021 & udt & Dm & Dn & Dd + +{ + Dd = VectorMin(Dn,Dm,esize2021,udt); +} + +:vmin.^udt^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=1 & c0404=1 ) | + ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=1 & thv_c0404=1 ) ) & esize2021 & udt & Qm & Qn & Qd + +{ + Qd = VectorMin(Qn,Qm,esize2021,udt); +} + +:vmin.f32 Dd,Dn,Dm is (($(AMODE) & cond=15 & c2327=4 & c2021=2 & c0811=15 & Q6=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0)) & Dm & Dn & Dd +{ + Dd = FloatVectorMin(Dn,Dm,2:4,32:1); +} + +:vmin.f32 Qd,Qn,Qm is (($(AMODE) & cond=15 & c2327=4 & c2021=2 & c0811=15 & Q6=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=15 & thv_Q6=1 & thv_c0404=0)) & Qm & Qn & Qd +{ + Qd = FloatVectorMin(Qn,Qm,2:4,32:1); +} + +:vmla.i^esize2021 Dd,Dn,Dm is ( ( $(AMODE) & cond=15 & c2327=4 & c2021<3 & c0811=9 & Q6=0 & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dm & Dn & Dd +{ + Dd = VectorMultiplyAccumulate(Dn,Dm,esize2021,0:1); +} + +:vmla.i^esize2021 Qd,Qn,Qm is ( ( $(AMODE) & cond=15 & c2327=4 & c2021<3 & c0811=9 & Q6=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=1 & thv_c0404=0)) & esize2021 & Qm & Qn & Qd +{ + Qd = VectorMultiplyAccumulate(Qn,Qm,esize2021,0:1); +} + +:vmls.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2424=1 & c2021<3 & c0811=9 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dm & Dn & Dd +{ + Dd = VectorMultiplySubtract(Dn,Dm,esize2021,0:1); +} + +:vmls.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2424=1 & c2021<3 & c0811=9 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=1 & thv_c0404=0)) & esize2021 & Qm & Qn & Qd +{ + Qd = VectorMultiplySubtract(Qn,Qm,esize2021,0:1); +} + +:vmlal.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=8 & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=8 & thv_Q6=0 & thv_c0404=0 ) ) & Dm & Dn & Qd & udt & esize2021 +{ + Qd = VectorMultiplyAccumulate(Dn,Dm,esize2021,udt); +} + +:vmlsl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=10 & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=10 & thv_Q6=0 & thv_c0404=0 ) ) & Dm & Dn & Qd & udt & esize2021 +{ + Qd = VectorMultiplySubtractLong(Dn,Dm,esize2021,udt); +} + +:vmla.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_c0606=0 & thv_c0404=1)) & fesize2021 & Dn & Dd & Dm +{ + Dd = FloatVectorMultiplyAccumulate(Dn,Dm,fesize2021,8:1); +} + +:vmla.f^fesize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_c0606=1 & thv_c0404=1)) & fesize2021 & Qn & Qd & Qm +{ + Qd = FloatVectorMultiplyAccumulate(Qn,Qm,fesize2021,16:1); +} + +:vmls.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_c0606=0 & thv_c0404=1)) & fesize2021 & Dn & Dd & Dm +{ + Dd = FloatVectorMultiplySubtract(Dn,Dm,fesize2021,8:1); +} + +:vmls.f^fesize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_c0606=1 & thv_c0404=1)) & fesize2021 & Qn & Qd & Qm +{ + Qd = FloatVectorMultiplySubtract(Qn,Qm,fesize2021,16:1); +} + +@endif # SIMD + +@if defined(VFPv2) || defined(VFPv3) + +:vmla^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1c & c2021=0 & c0811=10 & c0606=0 & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0)) & Sm & Sn & Sd +{ + Sd = Sd f+ (Sn f* Sm); +} + +:vmla^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1c & c2021=0 & c0811=11 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0)) & Dm & Dn & Dd +{ + Dd = Dd f+ (Dn f* Dm); +} + +:vmls^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1c & c2021=0 & c0811=10 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0)) & Sm & Sn & Sd +{ + Sd = Sd f- (Sn f* Sm); +} + +:vmls^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1c & c2021=0 & c0811=11 & c0606=1 & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0)) & Dm & Dn & Dd +{ + Dd = Dd f- (Dn f* Dm); +} + +@endif # VFPv2 || VFPv3 + +@if defined(SIMD) + +##### +# VML* (by scalar) (A1) +# + +vmlDm: Dm_3^"["^index^"]" is TMode=0 & c2021=1 & Dm_3 & M5 & c0303 [ index = (M5 << 1) + c0303; ] { el:4 = VectorGetElement(Dm_3, index:1, 2:1, 0:1); export el; } +vmlDm: Dm_4^"["^M5^"]" is TMode=0 & c2021=2 & Dm_4 & M5 { el:4 = VectorGetElement(Dm_4, M5:1, 4:1, 0:1); export el; } +vmlDm: thv_Dm_3^"["^index^"]" is TMode=1 & thv_c2021=1 & thv_Dm_3 & thv_M5 & thv_c0303 [ index = (thv_M5 << 1) + thv_c0303; ] { el:4 = VectorGetElement(thv_Dm_3, index:1, 2:1, 0:1); export el; } +vmlDm: thv_Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & thv_Dm_4 & thv_M5 { el:4 = VectorGetElement(thv_Dm_4, thv_M5:1, 4:1, 0:1); export el; } + + +:vmla.i^esize2021 Dd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2) & c0811=0 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Dn & Dd & vmlDm +{ + Dd = VectorMultiplyAccumulate(Dn,vmlDm,esize2021); +} + +:vmla.i^esize2021 Qd,Qn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0 & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Qn & Qd & vmlDm +{ + Qd = VectorMultiplyAccumulate(Qn,vmlDm,esize2021); +} + +:vmla.f32 Dd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2021=2 & c0811=1 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=1 & thv_c0606=1 & thv_c0404=0)) & Dn & Dd & vmlDm +{ + Dd = FloatVectorMultiplyAccumulate(Dn,vmlDm,2:4,32:1); +} + +:vmla.f32 Qd,Qn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2021=2 & c0811=1 & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=1 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & vmlDm +{ + Qd = FloatVectorMultiplyAccumulate(Qn,vmlDm,2:4,32:1); +} + +:vmls.i^esize2021 Dd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2) & c0811=4 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=4 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Dn & Dd & vmlDm +{ + Dd = VectorMultiplySubtract(Dn,vmlDm,esize2021); +} + +:vmls.i^esize2021 Qd,Qn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2)& c0811=4 & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=4 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Qn & Qd & vmlDm +{ + Qd = VectorMultiplySubtract(Qn,vmlDm,esize2021); +} + +:vmls.f32 Dd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2021=2 & c0811=5 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=5 & thv_c0606=1 & thv_c0404=0)) & Dn & Dd & vmlDm +{ + Dd = FloatVectorMultiplySubtract(Dn,vmlDm,2:4,32:1); +} + +:vmls.f32 Qd,Qn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2021=2 & c0811=5 & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=5 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & vmlDm +{ + Qd = FloatVectorMultiplySubtract(Qn,vmlDm,2:4,32:1); +} + +##### +# VML* (by scalar) (A2) +# + + + +:vmlal.^udt^esize2021 Qd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=2 & Q6=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=2 & thv_Q6=1 & thv_c0404=0 ) ) & udt & esize2021 & Dn & Qd & vmlDm +{ + Qd = VectorMultiplyAccumulate(Dn,vmlDm,esize2021,udt); +} + +:vmlsl.^udt^esize2021 Qd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=6 & Q6=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=6 & thv_Q6=1 & thv_c0404=0 ) ) & udt & esize2021 & Dn & Qd & vmlDm +{ + Qd = VectorMultiplySubtract(Dn,vmlDm,esize2021,udt); +} + +:vmov.^simdExpImmDT Dd,simdExpImm_8 is (( $(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0707=0 & Q6=0 & c0404=1 ) | + ( $(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0707=0 & thv_Q6=0 & thv_c0404=1 )) & Dd & simdExpImmDT & simdExpImm_8 +{ + Dd = simdExpImm_8; +} + +:vmov.^simdExpImmDT Qd,simdExpImm_16 is (( $(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0707=0 & Q6=1 & c0404=1 ) | + ( $(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0707=0 & thv_Q6=1 & thv_c0404=1 )) & Qd & simdExpImmDT & simdExpImm_16 +{ + Qd = simdExpImm_16; +} + +@endif # SIMD + +@if defined(VFPv3) + +:vmov^COND^".f32" Sd,vfpExpImm_4 is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c2021=3 & c0411=0xa0 ) | + ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c0411=0xa0 ) ) & Sd & vfpExpImm_4 +{ + Sd = vfpExpImm_4; +} + +:vmov^COND^".f64" Dd,vfpExpImm_8 is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c2021=3 & c0411=0xb0 ) | + ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c0411=0xb0 ) ) & Dd & vfpExpImm_8 +{ + Dd = vfpExpImm_8; +} + +@endif # VFPv3 + +@if defined(SIMD) + +:vmov Dd,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=2 & c1619=c0003 & c0811=1 & c0707=c0505 & Q6=0 & c0404=1 ) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c1619=thv_c0003 & thv_c0811=1 & thv_c0707=thv_c0505 & thv_c0606=0 & thv_c0404=1) ) & Dd & Dm +{ + Dd = Dm; +} + +:vmov Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=4 & c2021=2 & c1619=c0003 & c0811=1 & c0707=c0505 & Q6=1 & c0404=1 ) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c1619=thv_c0003 & thv_c0811=1 & thv_c0707=thv_c0505 & thv_c0606=1 & thv_c0404=1) ) & Qd & Qm +{ + Qd = Qm; +} + +@endif # SIMD + +@if defined(VFPv2) || defined(VFPv3) + +:vmov^COND^".f32" Sd,Sm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1621=0x30 & c0611=0x29 & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x30 & thv_c0611=0x29 & thv_c0404=0) ) & Sd & Sm +{ + Sd = Sm; +} + +:vmov^COND^".f64" Dd,Dm is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1d & c1621=0x30 & c0611=0x2d & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x30 & thv_c0611=0x2d & thv_c0404=0) ) & Dd & Dm +{ + Dd = Dm; +} + +@endif # VFPv2 || VFPv3 + +define pcodeop VectorSetElement; + +@if defined(SIMD) + +vmovSize: 8 is TMode=0 & c2222=1 { export 1:1; } +vmovSize: 16 is TMode=0 & c2222=0 & c0505=1 { export 2:1; } +vmovSize: 8 is TMode=1 & thv_c2222=1 { export 1:1; } +vmovSize: 16 is TMode=1 & thv_c2222=0 & thv_c0505=1 { export 2:1; } + +@if defined(VFPv2) || defined(VFPv3) || defined(SIMD) + +vmovSize: 32 is TMode=0 & c2222=0 & c0506=0 { export 4:1; } +vmovSize: 32 is TMode=1 & thv_c2222=0 & thv_c0506=0 { export 4:1; } + +@endif # VFPv2 || VFPv3 || SIMD + +vmovIndex: val is TMode=0 & c2222=1 & c2121 & c0506 [ val = (c2121 << 2) + c0506; ] { tmp:1 = val; export tmp; } +vmovIndex: val is TMode=0 & c2222=0 & c2121 & c0606 & c0505=1 [ val = (c2121 << 1) + c0606; ] { tmp:1 = val; export tmp; } + +vmovIndex: val is TMode=1 & thv_c2222=1 & thv_c2121 & thv_c0506 [ val = (thv_c2121 << 2) + thv_c0506; ] { tmp:1 = val; export tmp; } +vmovIndex: val is TMode=1 & thv_c2222=0 & thv_c2121 & thv_c0606 & thv_c0505=1 [ val = (thv_c2121 << 1) + thv_c0606; ] { tmp:1 = val; export tmp; } + +@if defined(VFPv2) || defined(VFPv3) || defined(SIMD) + +vmovIndex: c2121 is TMode=0 & c2222=0 & c2121 & c0606=0 & c0506=0 { tmp:1 = c2121; export tmp; } +vmovIndex: thv_c2121 is TMode=1 & thv_c2222=0 & thv_c2121 & thv_c0606=0 & thv_c0506=0 { tmp:1 = thv_c2121; export tmp; } + +@endif # VFPv2 || VFPv3 || SIMD + + +dNvmovIndex: Dn^"["^vmovIndex^"]" is Dn & vmovIndex { } + + +:vmov^COND^"."^vmovSize dNvmovIndex,VRd is ( ($(AMODE) & COND & ARMcond=1 & c2327=0x1c & c2020=0 & c0811=11 & c0404=1 & c0003=0 ) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2020=0 & thv_c0811=11 & thv_c0404=1 & thv_c0003=0 ) ) & Dn & VRd & vmovSize & vmovIndex & dNvmovIndex +{ + VectorSetElement(VRd,Dn,vmovIndex,vmovSize); +} + +:vmov^COND^".u"^vmovSize VRd,dNvmovIndex is ( ($(AMODE) & COND & ARMcond=1 & c2327=0x1d & c2020=1 & c0811=11 & c0404=1 & c0003=0 ) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2020=1 & thv_c0811=11 & thv_c0404=1 & thv_c0003=0 ) ) & Dn & VRd & vmovSize & vmovIndex & dNvmovIndex +{ + VRd = VectorGetElement(Dn,vmovIndex,vmovSize,0:1); +} + +:vmov^COND^".s"^vmovSize VRd,dNvmovIndex is ( ( $(AMODE) & COND & ARMcond=1 & c2327=0x1c & c2020=1 & c0811=11 & c0404=1 & c0003=0 ) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c2020=1 & thv_c0811=11 & thv_c0404=1 & thv_c0003=0 ) ) & Dn & VRd & vmovSize & vmovIndex & dNvmovIndex +{ + VRd = VectorGetElement(Dn,vmovIndex,vmovSize,1:1); +} + +@endif # SIMD + + +@if defined(VFPv2) || defined(VFPv3) + +:vmov^COND Sn,VRd is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2122=0 & c2020=0 & c0811=10 & c0006=0x10) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2122=0 & thv_c2020=0 & thv_c0811=10 & thv_c0006=0x10) ) & Sn & VRd +{ + Sn = VRd; +} + +:vmov^COND VRd,Sn is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2122=0 & c2020=1 & c0811=10 & c0006=0x10) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2122=0 & thv_c2020=1 & thv_c0811=10 & thv_c0006=0x10) ) & Sn & VRd +{ + VRd = Sn; +} + +:vmov^COND Sm,SmNext,VRd,VRn is COND & ( ($(AMODE) & ARMcond=1 & c2027=0xc4 & c0611=0x28 & c0404=1) | + ($(TMODE_E) & thv_c2027=0xc4 & thv_c0611=0x28 & thv_c0404=1) ) & VRn & VRd & Sm & SmNext +{ + Sm = VRd; + SmNext = VRn; +} + +:vmov^COND VRd,VRn,Sm,SmNext is COND & ( ($(AMODE) & ARMcond=1 & c2027=0xc5 & c0611=0x28 & c0404=1) | + ($(TMODE_E) & thv_c2027=0xc5 & thv_c0611=0x28 & thv_c0404=1) ) & VRn & VRd & Sm & SmNext +{ + VRd = Sm; + VRn = SmNext; +} + +@endif # VFPv2 || VFPv3 + +@if defined(VFPv2) || defined(VFPv3) || defined(SIMD) + +:vmov^COND Dm,VRd,VRn is COND & ( ($(AMODE) & ARMcond=1 & c2027=0xc4 & c0611=0x2c & c0404=1) | ($(TMODE_E) & thv_c2027=0xc4 & thv_c0611=0x2c & thv_c0404=1) ) & Dm & VRn & VRd +{ + Dm = (zext(VRn) << 32) + zext(VRd); +} + +:vmov^COND VRd,VRn,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2027=0xc5 & c0611=0x2c & c0404=1) | ($(TMODE_E) & thv_c2027=0xc5 & thv_c0611=0x2c & thv_c0404=1) ) & Dm & VRn & VRd +{ + VRn = Dm(4); + VRd = Dm:4; +} + +@endif # VFPv2 || VFPv3 || SIMD + +define pcodeop VectorCopyLong; +define pcodeop VectorCopyNarrow; + +@if defined(SIMD) + +:vmovl.^udt^esize2021 Qd,Dm is $(AMODE) & cond=15 & c2527=1 & udt & c2323=1 & (c1921=1 | c1921=2 | c1921=4) & esize2021 & c1618=0 & Qd & c0611=0x28 & c0404=1 & Dm +{ + Qd = VectorCopyLong(Dm,esize2021,udt); +} + +:vmovn.i^esize1819x2 Dd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0611=8 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0611=8 & thv_c0404=0) ) & esize1819x2 & Dd & Qm +{ + Dd = VectorCopyNarrow(Qm,esize1819x2); +} + +@endif # SIMD + +@if defined(VFPv2) || defined(VFPv3) || defined(SIMD) + +:vmrs^COND VRd,fpscr is COND & ( ($(AMODE) & ARMcond=1 & c1627=0xef1 & c0011=0xa10) | ($(TMODE_E) & thv_c1627=0xef1 & thv_c0011=0xa10)) & fpscr & VRd +{ + VRd = fpscr; +} + +apsr: "apsr" is epsilon {} + +:vmrs^COND apsr,fpscr is COND & apsr & ( ($(AMODE) & ARMcond=1 & c1627=0xef1 & c1215=15 & c0011=0xa10) | ($(TMODE_E) & thv_c1627=0xef1 & thv_c1215=15 & thv_c0011=0xa10) ) & fpscr +{ + NG = $(FPSCR_N); + ZR = $(FPSCR_Z); + CY = $(FPSCR_C); + OV = $(FPSCR_V); +} + +:vmsr^COND fpscr,VRd is COND & ( ($(AMODE) & ARMcond=1 & c1627=0xee1 & c0011=0xa10) | ($(TMODE_E) & thv_c1627=0xee1 & thv_c0011=0xa10) ) & VRd & fpscr +{ + fpscr = VRd; +} + +@endif # VFPv2 || VFPv3 || SIMD + +@if defined(SIMD) + +### +# VMUL (floating Point) +# + +define pcodeop FloatVectorMult; +define pcodeop VectorMultiply; +define pcodeop PolynomialMultiply; + +:vmul.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x06 & c2121=0 & c2020=0 & c0811=13 & Q6=0 & c0404=1) | + ($(TMODE_F) &thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm +{ + Dd = FloatVectorMult(Dn,Dm,2:1,32:1); +} + +:vmul.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x06 & c2121=0 & c2020=0 & c0811=13 & Q6=1 & c0404=1) | + ($(TMODE_F) &thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd +{ + Qd = FloatVectorMult(Qn,Qm,2:1,32:1); +} + +:vmul.f16 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x06 & c2121=0 & c2020=1 & c0811=13 & Q6=0 & c0404=1) | + ($(TMODE_F) &thv_c2327=0x1e & thv_c2121=0 & thv_c2020=1 & thv_c0811=13 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm +{ + Dd = FloatVectorMult(Dn,Dm,4:1,16:1); +} + +:vmul.f16 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x06 & c2121=0 & c2020=1 & c0811=13 & Q6=1 & c0404=1) | + ($(TMODE_F) &thv_c2327=0x1e & thv_c2121=0 & thv_c2020=1 & thv_c0811=13 & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd +{ + Qd = FloatVectorMult(Qn,Qm,4:1,16:1); +} + +:vmul^COND^".f64" Dd,Dn,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=11 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0) ) & Dm & Dn & Dd +{ + Dd = Dn f* Dm; +} + +:vmul^COND^".f32" Sd,Sn,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=10 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0) ) & Sm & Sn & Sd +{ + Sd = Sn f* Sm; +} + +:vmul^COND^".f16" Sd,Sn,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=9 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=9 & thv_c0606=0 & thv_c0404=0) ) & Sm & Sn & Sd +{ + product:2 = Sn:2 f* Sm:2; + Sd = zext(product); +} + +:vmul.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=9 & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=9 & thv_Q6=0 & thv_c0404=1)) & esize2021 & Dn & Dd & Dm +{ + Dd = VectorMultiply(Dn,Dm,esize2021); +} + +:vmul.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=9 & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=9 & thv_Q6=1 & thv_c0404=1)) & esize2021 & Qm & Qn & Qd +{ + Qd = VectorMultiply(Qn,Qm,esize2021); +} + +:vmul.p8 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=0 & c0811=9 & Q6=0 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=9 & thv_Q6=0 & thv_c0404=1) ) & Dn & Dd & Dm +{ + Dd = PolynomialMultiply(Dn,Dm,1:1); +} + +:vmul.p8 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=0 & c0811=9 & Q6=1 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=9 & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd +{ + Qd = PolynomialMultiply(Qn,Qm,1:1); +} + +:vmull.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=0xc & Q6=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=0xc & thv_Q6=0 & thv_c0404=0) ) & esize2021 & Dm & Dn & Qd & udt +{ + Qd = VectorMultiply(Dn,Dm,esize2021,udt); +} + +:vmull.p8 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2021=0 & c0811=0xe & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=0 & thv_c0811=0xe & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Qd +{ + Qd = PolynomialMultiply(Dn,Dm,1:1); +} + +:vmull.p64 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2021=2 & c0811=0xe & Q6=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=0xe & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Qd +{ + Qd = PolynomialMultiply(Dn,Dm,8:1); +} + +etype: "I" is TMode=0 & c0909=0 & c0808=0 {} +etype: "F" is TMode=0 & c0909=0 & c0808=1 {} +etype: "S" is TMode=0 & c0909=1 & c2424=0 {} +etype: "U" is TMode=0 & c0909=1 & c2424=1 {} +etype: "I" is TMode=1 & thv_c0909=0 & thv_c0808=0 {} +etype: "F" is TMode=1 & thv_c0909=0 & thv_c0808=1 {} +etype: "S" is TMode=1 & thv_c0909=1 & thv_c2828=0 {} +etype: "U" is TMode=1 & thv_c0909=1 & thv_c2828=1 {} + +vmlDmA: Dm_3^"["^index^"]" is TMode=0 & c2021=1 & Dm_3 & M5 & c0303 [ index = (M5 << 1) + c0303; ] { el:4 = VectorGetElement(Dm_3, index:1, 2:1, 0:1); export el; } +vmlDmA: Dm_4^"["^M5^"]" is TMode=0 & c2021=2 & Dm_4 & M5 { el:4 = VectorGetElement(Dm_4, M5:1, 4:1, 0:1); export el; } +vmlDmA: Dm_3^"["^index^"]" is TMode=1 & thv_c2021=1 & Dm_3 & thv_M5 & c0303 [ index = (thv_M5 << 1) + c0303; ] { el:4 = VectorGetElement(Dm_3, index:1, 2:1, 0:1); export el; } +vmlDmA: Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & Dm_4 & thv_M5 { el:4 = VectorGetElement(Dm_4, thv_M5:1, 4:1, 0:1); export el; } + +:vmul.^etype^esize2021 Qd,Qn,vmlDmA is ( ($(AMODE) & cond=15 & c2424=1 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0911=4 & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2828=1 & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0911=4 & thv_c0606=1 & thv_c0404=0 ) ) & etype & esize2021 & Qn & Qd & vmlDmA +{ + Qd = VectorMultiply(Qn,vmlDmA,esize2021); +} + +:vmul.^etype^esize2021 Dd,Dn,vmlDmA is ( ($(AMODE) & cond=15 & c2424=0 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0911=4 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2828=0 & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0911=4 & thv_c0606=1 & thv_c0404=0 ) ) & etype & esize2021 & Dn & Dd & vmlDmA +{ + Dd = VectorMultiply(Dn,vmlDmA,esize2021); +} + +:vmull.^etype^esize2021 Qd,Dn,vmlDmA is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=10 & c0606=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=10 & thv_c0606=1 & thv_c0404=0 ) ) & Dd & Dm & esize1819 & etype & esize2021 & Dn & Qd & vmlDmA +{ + Qd = VectorMultiply(Dn,vmlDmA,esize2021); +} + +:vmvn Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0811=5 & c0707=1 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0811=5 & thv_c0707=1 & thv_Q6=0 & thv_c0404=0) ) & Dd & Dm +{ + Dd = ~Dm; +} + +:vmvn Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0811=5 & c0707=1 & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0811=5 & thv_c0707=1 & thv_Q6=1 & thv_c0404=0) ) & Qd & Qm +{ + tmp1:8 = Qm:8; + tmp2:8 = Qm(8); + tmp1 = ~ tmp1; + tmp2 = ~ tmp2; + Qd = (zext(tmp1) << 8) | zext(tmp2); +} + +@endif # SIMD + + + +define pcodeop FloatVectorNeg; + +@if defined(SIMD) + +:vneg.s^esize1819 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=7 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=7 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 +{ + Dd = FloatVectorNeg(Dm,esize1819); +} + +:vneg.s^esize1819 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=7 & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=7 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 +{ + Qd = FloatVectorNeg(Qm,esize1819); +} + + +:vneg.f32 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819=2 & c1617=1 & c0711=0xf & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819=2 & thv_c1617=1 & thv_c0711=0xf & thv_c0606=0 & thv_c0404=0 ) ) & Dm & Dd +{ + Dd = FloatVectorNeg(Dm,2:1,32:1); +} + +:vneg.f32 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819=2 & c1617=1 & c0711=0xf & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819=2 & thv_c1617=1 & thv_c0711=0xf & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm +{ + Qd = FloatVectorNeg(Qm,2:1,32:1); +} + +@endif # SIMD + +@if defined(VFPv2) || defined(VFPv3) + + +:vnmla^COND^".f64" Dd,Dn,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=11 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0) ) & Dm & Dn & Dd +{ + product:8 = Dn f* Dm; + Dd = (0 f- Dd) f+ (0 f- product); +} + +:vnmla^COND^".f32" Sd,Sn,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=10 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0) ) & Sm & Sn & Sd +{ + product:4 = Sn f* Sm; + Sd = (0 f- Sd) f+ (0 f- product); +} + +:vnmla.f16 Sd,Sn,Sm is ( ($(AMODE) & cond=0xe & c2327=0x1c & c2021=1 & c0811=9 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=9 & thv_c0606=1 & thv_c0404=0) ) & Sm & Sn & Sd +{ + product:2 = Sn:2 f* Sm:2; + product = (0:2 f- Sd:2) f+ (0:2 f- product); + Sd = zext(product); +} + +:vnmls^COND^".f64" Dd,Dn,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=11 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0) ) & Dm & Dn & Dd +{ + product:8 = Dn f* Dm; + Dd = product f- Dd; +} + +:vnmls^COND^".f32" Sd,Sn,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=10 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0) ) & Sm & Sn & Sd +{ + product:4 = Sn f* Sm; + Sd = product f- Sd; +} + +:vnmls.f16 Sd,Sn,Sm is ( ($(AMODE) & cond=0xe & c2327=0x1c & c2021=1 & c0811=9 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=9 & thv_c0606=0 & thv_c0404=0) ) & Sm & Sn & Sd +{ + product:2 = Sn:2 f* Sm:2; + product = product f- Sd:2; + Sd = zext(product); +} + +:vnmul^COND^".f64" Dd,Dn,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=11 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0) ) & Dm & Dn & Dd +{ + product:8 = Dn f* Dm; + Dd = 0 f- product; +} + +:vnmul^COND^".f32" Sd,Sn,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=10 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0) ) & Sm & Sn & Sd +{ + product:4 = Sn f* Sm; + Sd = 0 f- product; +} + +:vnmul.f16 Sd,Sn,Sm is ( ($(AMODE) & cond=0xe & c2327=0x1c & c2021=2 & c0811=9 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=9 & thv_c0606=1 & thv_c0404=0) ) & Sm & Sn & Sd +{ + product:2 = Sn:2 f* Sm:2; + product = 0 f- product; + Sd = zext(product); +} + +:vneg^COND^".f32" Sd,Sm is COND & ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x31 & c0611=0x29 & c0404=0 ) | + ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x31 & thv_c0611=0x29 & thv_c0404=0 ) ) & Sm & Sd +{ + build COND; + build Sd; + build Sm; + Sd = 0 f- Sm; +} + +:vneg^COND^".f64" Dd,Dm is COND & ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x31 & c0611=0x2d & c0404=0 ) | + ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x31 & thv_c0611=0x2d & thv_c0404=0 ) ) & Dd & Dm +{ + build COND; + build Dd; + build Dm; + Dd = 0 f- Dm; +} + +@endif # VFPv2 || VFPv3 + +@if defined(SIMD) + +#F6.1.141 VORR (register) 64-bit SIMD vector variant (A1 and T1) +:vorr Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=2 & c0811=1 & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm + +{ + Dd = Dn | Dm; +} + +#F6.1.141 VORR (register) 128-bit SIMD vector variant (A1 and T1) +:vorr Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=2 & c0811=1 & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qd & Qn & Qm +{ + Qd = Qn | Qm; +} + +#F6.1.140 VORR and F6.1.138 VORN (immediate) 64-bit SIMD vector variant +:vorr Dd,simdExpImm_8 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011<3 & c0808=1 & c0407=1 ) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011<3 & thv_c0808=1 & thv_c0407=1) ) & Dd & simdExpImm_8 +{ + Dd = Dd | simdExpImm_8; +} + +#F6.1.140 VORR and F6.1.138 VORN (immediate) 128-bit SIMD vector variant +:vorr Qd,simdExpImm_16 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011<3 & c0808=1 & c0407=5 ) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011<3 & thv_c0808=1 & thv_c0407=5) ) & Qd & simdExpImm_16 +{ + Qd = Qd | simdExpImm_16; +} + +#F6.1.139 VORN (register) 64-bit SIMD vector variant (A1 and T1) +:vorn Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=3 & c0811=1 & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm + +{ + Dd = Dn | ~Dm; +} + +#F6.1.139 VORN (register) 128-bit SIMD vector variant (A1 and T1) +:vorn Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=3 & c0811=1 & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qd & Qn & Qm +{ + Qd = Qn | ~Qm; +} + +@endif # SIMD + + +####### +# VPUSH (A2) +# + +@if defined(VFPv2) || defined(VFPv3) || defined(SIMD) + +buildVpushSdList: Sreg is counter=0 & Sreg [ regNum=regNum+1; ] { * mult_addr = Sreg; mult_addr = mult_addr + 4; } +buildVpushSdList: Sreg,buildVpushSdList is Sreg & buildVpushSdList [ counter=counter-1; regNum=regNum+1; ] { * mult_addr = Sreg; mult_addr = mult_addr + 4; } + +vpushSdList: "{"^buildVpushSdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVpushSdList [ regNum=(c1215<<1)+D22-1; counter=c0007-1; ] { sp = sp - c0007 * 4; mult_addr = sp; build buildVpushSdList; } +vpushSdList: "{"^buildVpushSdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVpushSdList [ regNum=(thv_c1215<<1)+thv_D22-1; counter=thv_c0007-1; ] { sp = sp - thv_c0007 * 4; mult_addr = sp; build buildVpushSdList; } + +buildVpushSd64List: Dreg is counter=0 & Dreg [ regNum=regNum+1; ] { * mult_addr = Dreg:8; mult_addr = mult_addr + 8; } +buildVpushSd64List: Dreg,buildVpushSd64List is Dreg & buildVpushSd64List [ counter=counter-1; regNum=regNum+1; ] { * mult_addr = Dreg:8; mult_addr = mult_addr + 8; build buildVpushSd64List; } + +vpushSd64List: "{"^buildVpushSd64List^"}" is TMode=0 & D22 & c1215 & c0007 & buildVpushSd64List [ regNum=(D22<<4)+c1215-1; counter=c0007 / 2 - 1; ] { sp = sp - c0007 * 4; mult_addr = sp; build buildVpushSd64List; } +vpushSd64List: "{"^buildVpushSd64List^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVpushSd64List [ regNum=(thv_D22<<4)+thv_c1215-1; counter=thv_c0007 / 2 - 1; ] { sp = sp - thv_c0007 * 4; mult_addr = sp; build buildVpushSd64List; } + + +:vpush^COND vpushSd64List is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1a & c1619=13 & c2121=1 & c2020=0 & c0811=11) | + ($(TMODE_E) & thv_c2327=0x1a & thv_c1619=13 & thv_c2121=1 & thv_c2020=0 & thv_c0811=11) ) & vpushSd64List +{ + build vpushSd64List; +} + +:vpush^COND vpushSdList is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1a & c1619=13 & c2121=1 & c2020=0 & c0811=10) | + ($(TMODE_E) & thv_c2327=0x1a & thv_c1619=13 & thv_c2121=1 & thv_c2020=0 & thv_c0811=10) ) & vpushSdList +{ + build vpushSdList; +} + +buildVpopSdList: Sreg is counter=0 & Sreg [ regNum=regNum+1; ] + { tmp:4 = *mult_addr; Sreg = zext(tmp); mult_addr = mult_addr + 4; } +buildVpopSdList: Sreg,buildVpopSdList is Sreg & buildVpopSdList [ counter=counter-1; regNum=regNum+1; ] + { tmp:4 = *mult_addr; Sreg = zext(tmp); mult_addr = mult_addr + 4; } + +vpopSdList: "{"^buildVpopSdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVpopSdList [ regNum=(c1215<<1)+D22-1; counter=c0007-1; ] + { mult_addr = sp; sp = sp + c0007 * 4; build buildVpopSdList; } +vpopSdList: "{"^buildVpopSdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVpopSdList [ regNum=(thv_c1215<<1)+thv_D22-1; counter=thv_c0007-1; ] + { mult_addr = sp; sp = sp + thv_c0007 * 4; build buildVpopSdList; } + +buildVpopSd64List: Dreg is counter=0 & Dreg [ regNum=regNum+1; ] + { Dreg = *mult_addr; mult_addr = mult_addr + 8; } +buildVpopSd64List: Dreg,buildVpopSd64List is Dreg & buildVpopSd64List [ counter=counter-1; regNum=regNum+1; ] + { Dreg = *mult_addr; mult_addr = mult_addr + 8; build buildVpopSd64List; } + +vpopSd64List: "{"^buildVpopSd64List^"}" is TMode=0 & D22 & c1215 & c0007 & buildVpopSd64List [ regNum=(D22<<4)+c1215-1; counter=c0007 / 2 - 1; ] + { mult_addr = sp; sp = sp + c0007 * 4; build buildVpopSd64List; } +vpopSd64List: "{"^buildVpopSd64List^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVpopSd64List [ regNum=(thv_D22<<4)+thv_c1215-1; counter=thv_c0007 / 2 - 1; ] + { mult_addr = sp; sp = sp + thv_c0007 * 4; build buildVpopSd64List; } + +:vpop^COND vpopSd64List is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x19 & c1619=13 & c2121=1 & c2020=1 & c0811=11 & c0000=0) | + ($(TMODE_E) & thv_c2327=0x19 & thv_c1619=13 & thv_c2121=1 & thv_c2020=1 & thv_c0811=11 & thv_c0000=0) ) & vpopSd64List +{ + build vpopSd64List; +} + +:vpop^COND vpopSdList is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x19 & c1619=13 & c2121=1 & c2020=1 & c0811=10) | + ($(TMODE_E) & thv_c2327=0x19 & thv_c1619=13 & thv_c2121=1 & thv_c2020=1 & thv_c0811=10) ) & vpopSdList +{ + build vpopSdList; +} + +@endif # VFPv2 || VFPv3 || SIMD + +@if defined(SIMD) + +define pcodeop SatQ; +define pcodeop SignedSatQ; + +:vqadd.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=0 & Q6=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c0811=0 & thv_Q6=0 & thv_c0404=1)) & udt & esize2021 & Dn & Dd & Dm +{ + Dd = VectorAdd(Dn,Dm,esize2021,udt); + Dd = SatQ(Dd, esize2021, udt); +} + +:vqadd.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=0 & Q6=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c0811=0 & thv_Q6=1 & thv_c0404=1) ) & udt & esize2021 & Qm & Qn & Qd +{ + Qd = VectorAdd(Qn,Qm,esize2021,udt); + Qd = SatQ(Qd, esize2021, udt); +} + +:vqmovn.i^esize1819x2 Dd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=5 & c0606 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=5 & thv_c0404=0) ) & esize1819x2 & Dd & Qm +{ + Dd = VectorCopyNarrow(Qm,esize1819x2,c0606:1); + Dd = SatQ(Dd, esize1819x2,0:1); +} + +:vqmovun.i^esize1819x2 Dd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0611=9 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0611=9 & thv_c0404=0) ) & esize1819x2 & Dd & Qm +{ + Dd = VectorCopyNarrow(Qm,esize1819x2,0:1); + Dd = SatQ(Dd, esize1819x2,0:1); +} + +:vqdmlal.S^esize2021 Qd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0x9 & c0606=0 & c0404=0 ) | + ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0x9 & thv_c0606=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Qd + +{ + Qd = VectorDoubleMultiplyAccumulateLong(Dn,Dm,esize2021,0:1); + Qd = SatQ(Qd, esize2021,0:1); +} + +:vqdmlal.S^esize2021 Qd, Dn, vmlDmA is ( ( $(AMODE) & cond=15 & c2327=5 & (c2021=1 | c2021=2)& c0811=0x3 & c0606=1 & c0404=0) | + ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0x3 & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Qd + +{ + Qd = VectorDoubleMultiplyAccumulateLong(Dn,vmlDmA,esize2021,0:1); + Qd = SatQ(Qd, esize2021,0:1); +} + +:vqdmlsl.S^esize2021 Qd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0xb & c0606=0 & c0404=0 ) | + ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_c0606=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Qd + +{ + Qd = VectorDoubleMultiplySubtractLong(Dn,Dm,esize2021,0:1); + Qd = SatQ(Qd, esize2021,0:1); +} + +:vqdmlsl.S^esize2021 Qd, Dn, vmlDmA is ( ( $(AMODE) & cond=15 & c2327=5 & (c2021=1 | c2021=2)& c0811=0x7 & c0606=1 & c0404=0) | + ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0x7 & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Qd + +{ + Qd = VectorDoubleMultiplySubtractLong(Dn,vmlDmA,esize2021,0:1); + Qd = SatQ(Qd, esize2021,0:1); +} + +:vqdmulh.S^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2527=1 & c2324=0 & (c2021=1 | c2021=2) & c0811=0xb & Q6=0 & c0404=0 ) | + ( $(TMODE_E) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_c0606=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Dd + +{ + Dd = VectorDoubleMultiplyHighHalf(Dn,Dm,esize2021,0:1); + Dd = SatQ(Dd, esize2021,0:1); +} + +:vqdmulh.S^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & cond=15 & c2527=1 & c2324=0 & (c2021=1 | c2021=2) & c0811=0xb & Q6=1 & c0404=0 ) | + ( $(TMODE_E) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & Qm & Qn & Qd + +{ + Qd = VectorDoubleMultiplyHighHalf(Qn,Qm,esize2021,0:1); + Qd = SatQ(Qd, esize2021,0:1); +} + +:vqdmulh.S^esize2021 Dd, Dn, vmlDmA is ( ( $(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2)& c0811=0xc & c0606=1 & c0404=0) | + ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xc & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Dd + +{ + Dd = VectorDoubleMultiplyLong(Dn,vmlDmA,esize2021,0:1); + Dd = SatQ(Dd, esize2021,0:1); +} + +:vqdmulh.S^esize2021 Qd, Qn, vmlDmA is ( ( $(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0xc & c0606=1 & c0404=0) | + ( $(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xc & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Qn & Qd + +{ + Qd = VectorDoubleMultiplyLong(Qn,vmlDmA,esize2021,0:1); + Qd = SatQ(Qd, esize2021,0:1); +} + +:vqdmull.S^esize2021 Qd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2327=5 & c2021<3 & c0811=0xD & Q6=0 & c0404=0 ) | + ( $(TMODE_E) & thv_c2327=0x1f & thv_c2324=1 & thv_c2021<3 & thv_c0811=0xD & thv_Q6=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Qd + +{ + Qd = VectorDoubleMultiplyLong(Dn,Dm,esize2021,0:1); + Qd = SatQ(Qd, esize2021,0:1); +} + +:vqdmull.S^esize2021 Qd, Dn, vmlDmA is ( ( $(AMODE) & cond=15 & c2327=5 & c2021<3 & c0811=0xb & Q6=1 & c0404=1 ) | + ( $(TMODE_E) & thv_c2327=0x1e & thv_c2324=1 & thv_c2021<3 & thv_c0811=0xb & thv_Q6=1 & thv_c0404=1 ) ) & esize2021 & vmlDmA & Dn & Qd + +{ + Qd = VectorDoubleMultiplyLong(Dn,vmlDmA,esize2021,0:1); + Qd = SatQ(Qd, esize2021,0:1); +} + +:vqrdmulh.S^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2527=1 & c2324=2 & (c2021=1 | c2021=2) & c0811=0xb & Q6=0 & c0404=0 ) | + ( $(TMODE_F) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_Q6=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Dd + +{ + Dd = VectorRoundDoubleMultiplyHighHalf(Dn,Dm,esize2021,0:1); + Dd = SatQ(Dd, esize2021,0:1); +} + +:vqrdmulh.S^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & cond=15 & c2527=1 & c2324=2 & (c2021=1 | c2021=2) & c0811=0xb & Q6=1 & c0404=0 ) | + ( $(TMODE_F) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & Qm & Qn & Qd + +{ + Qd = VectorRoundDoubleMultiplyHighHalf(Qn,Qm,esize2021,0:1); + Qd = SatQ(Qd, esize2021,0:1); +} + +:vqrdmulh.S^esize2021 Dd, Dn, vmlDmA is ( ( $(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2)& c0811=0xd & Q6=1 & c0404=0) | + ( $(TMODE_E) & thv_c2327=0x1f & thv_c2323=1 & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xd & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Dd + +{ + Dd = VectorRoundDoubleMultiplyHighHalf(Dn,vmlDmA,esize2021,0:1); + Dd = SatQ(Dd, esize2021,0:1); +} + +:vqrdmulh.S^esize2021 Qd, Qn, vmlDmA is ( ( $(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0xd & Q6=1 & c0404=0) | + ( $(TMODE_F) & thv_c2327=0x1f & thv_c2323=1 & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xd & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Qn & Qd + +{ + Qd = VectorRoundDoubleMultiplyHighHalf(Qn,vmlDmA,esize2021,0:1); + Qd = SatQ(Qd, esize2021,0:1); +} + + +:vqsub.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=2 & Q6=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c0811=2 & thv_Q6=0 & thv_c0404=1)) & udt & esize2021 & Dn & Dd & Dm +{ + Dd = VectorSub(Dn,Dm,esize2021,udt); + Dd = SatQ(Dd, esize2021, udt); +} + +:vqsub.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=2 & Q6=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c0811=2 & thv_Q6=1 & thv_c0404=1) ) & udt & esize2021 & Qm & Qn & Qd +{ + Qd = VectorSub(Qn,Qm,esize2021,udt); + Qd = SatQ(Qd, esize2021, udt); +} + +####### +# TODO: lots of missing stuff +# + +####### +# VRECPE +define pcodeop VectorReciprocalEstimate; + +:vrecpe.^fdt^32 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=0 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=0 & thv_Q6=1 & thv_c0404=0) ) & fdt & Qm & Qd +{ + Qd = VectorReciprocalEstimate(Qm,fdt); +} + +:vrecpe.^fdt^32 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=0 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=0 & thv_Q6=0 & thv_c0404=0) ) & fdt & Dm & Dd +{ + Dd = VectorReciprocalEstimate(Dm,fdt); +} + +####### +# VRECPS +define pcodeop VectorReciprocalStep; + +:vrecps.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x4 & c2021=0 & c0811=0xf & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=0xf & thv_Q6=1 & thv_c0404=1) ) & Qn & Qm & Qd +{ + Qd = VectorReciprocalStep(Qn,Qm); +} + +:vrecps.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x4 & c2021=0 & c0811=0xf & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=0xf & thv_Q6=0 & thv_c0404=1) ) & Dn & Dm & Dd +{ + Dd = VectorReciprocalStep(Dn,Dm); +} + +####### +# VREV +# + +define pcodeop vrev; + +:vrev16.^esize1819x3 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=2 & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=2 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & esize1819x3 +{ + Qd = vrev(Qm,esize1819x3); +} + +:vrev32.^esize1819x3 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=1 & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=1 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & esize1819x3 +{ + Qd = vrev(Qm,esize1819x3); +} + +:vrev64.^esize1819x3 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=0 & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=0 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & esize1819x3 +{ + Qd = vrev(Qm,esize1819x3); +} + +:vrev16.^esize1819x3 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=2 & c0606=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=2 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & esize1819x3 +{ + Dd = vrev(Dm,esize1819x3); +} + +:vrev32.^esize1819x3 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=1 & c0606=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=1 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & esize1819x3 +{ + Dd = vrev(Dm,esize1819x3); +} + +:vrev64.^esize1819x3 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=0 & c0606=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=0 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & esize1819x3 +{ + Dd = vrev(Dm,esize1819x3); +} + +####### +# VSH +# + +define pcodeop VectorShiftLeft; +define pcodeop VectorRoundShiftLeft; +define pcodeop VectorShiftRight; +define pcodeop VectorShiftLeftInsert; +define pcodeop VectorShiftRightInsert; +define pcodeop VectorShiftRightNarrow; +define pcodeop VectorShiftRightAccumulate; +define pcodeop VectorRoundShiftRight; +define pcodeop VectorRoundShiftRightNarrow; +define pcodeop VectorRoundShiftRightAccumulate; + +ShiftSize: "8" is TMode=0 & c1921=1 & L7=0 { export 8:8; } +ShiftSize: "16" is TMode=0 & c2021=1 & L7=0 { export 16:8; } +ShiftSize: "32" is TMode=0 & c2121=1 & L7=0 { export 32:8; } +ShiftSize: "64" is TMode=0 & L7=1 { export 64:8; } +ShiftSize: "8" is TMode=1 & thv_c1921=1 & thv_L7=0 { export 8:8; } +ShiftSize: "16" is TMode=1 & thv_c2021=1 & thv_L7=0 { export 16:8; } +ShiftSize: "32" is TMode=1 & thv_c2121=1 & thv_L7=0 { export 32:8; } +ShiftSize: "64" is TMode=1 & thv_L7=1 { export 64:8; } + + +ShiftImmRLI: "#"^shift_amt is TMode=0 & c1921=1 & L7=0 & c1621 [ shift_amt = 16 - c1621; ] { export *[const]:8 shift_amt; } +ShiftImmRLI: "#"^shift_amt is TMode=0 & c2021=1 & L7=0 & c1621 [ shift_amt = 32 - c1621; ] { export *[const]:8 shift_amt; } +ShiftImmRLI: "#"^shift_amt is TMode=0 & c2121=1 & L7=0 & c1621 [ shift_amt = 64 - c1621; ] { export *[const]:8 shift_amt; } +ShiftImmRLI: "#"^shift_amt is TMode=0 & L7=1 & c1621 [ shift_amt = 64 - c1621; ] { export *[const]:8 shift_amt; } +ShiftImmRLI: "#"^shift_amt is TMode=1 & thv_c1921=1 & thv_L7=0 & thv_c1621 [ shift_amt = 16 - thv_c1621; ] { export *[const]:8 shift_amt; } +ShiftImmRLI: "#"^shift_amt is TMode=1 & thv_c2021=1 & thv_L7=0 & thv_c1621 [ shift_amt = 32 - thv_c1621; ] { export *[const]:8 shift_amt; } +ShiftImmRLI: "#"^shift_amt is TMode=1 & thv_c2121=1 & thv_L7=0 & thv_c1621 [ shift_amt = 64 - thv_c1621; ] { export *[const]:8 shift_amt; } +ShiftImmRLI: "#"^shift_amt is TMode=1 & thv_L7=1 & thv_c1621 [ shift_amt = 64 - thv_c1621; ] { export *[const]:8 shift_amt; } + +ShiftImmLLI: "#"^shift_amt is TMode=0 & c1921=1 & L7=0 & c1621 [ shift_amt = c1621 - 8; ] { export *[const]:8 shift_amt; } +ShiftImmLLI: "#"^shift_amt is TMode=0 & c2021=1 & L7=0 & c1621 [ shift_amt = c1621 - 16; ] { export *[const]:8 shift_amt; } +ShiftImmLLI: "#"^shift_amt is TMode=0 & c2121=1 & L7=0 & c1621 [ shift_amt = c1621 - 32; ] { export *[const]:8 shift_amt; } +ShiftImmLLI: "#"^shift_amt is TMode=0 & L7=1 & c1621 [ shift_amt = c1621 - 0; ] { export *[const]:8 shift_amt; } +ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_c1921=1 & thv_L7=0 & thv_c1621 [ shift_amt = thv_c1621 - 8; ] { export *[const]:8 shift_amt; } +ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_c2021=1 & thv_L7=0 & thv_c1621 [ shift_amt = thv_c1621 - 16; ] { export *[const]:8 shift_amt; } +ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_c2121=1 & thv_L7=0 & thv_c1621 [ shift_amt = thv_c1621 - 32; ] { export *[const]:8 shift_amt; } +ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_L7=1 & thv_c1621 [ shift_amt = thv_c1621 - 0; ] { export *[const]:8 shift_amt; } + +:vqshrn.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x24 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x24 & thv_c0404=1) ) & udt & esize2021 & ShiftSize & ShiftImmRLI & Dd & Qm +{ + Dd = VectorShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); + Dd = SatQ(Dd,esize2021,udt); +} + +:vqshrun.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x20 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x20 & thv_c0404=1) ) & udt & esize2021 & ShiftSize & ShiftImmRLI & Dd & Qm +{ + Dd = VectorShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); + Dd = SatQ(Dd,esize2021,udt); +} + +:vqrshrn.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x25 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x25 & thv_c0404=1) ) & udt & esize2021 & ShiftSize & ShiftImmRLI & Dd & Qm +{ + Dd = VectorRoundShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); + Dd = SatQ(Dd,esize2021,udt); +} + +:vqrshrun.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x21 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x21 & thv_c0404=1) ) & udt & esize2021 & ShiftImmRLI & Dd & Qm +{ + Dd = VectorRoundShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); + Dd = SatQ(Dd,esize2021,udt); +} + + +:vqshl.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=7 & c0606=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1621 & thv_c0811=7 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Qm +{ + Qd = VectorShiftLeft(Qm,ShiftImmLLI,ShiftSize,udt); +} + +:vqshl.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=7 & c0606=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1621 & thv_c0811=7 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Dd & Dm +{ + Dd = VectorShiftLeft(Dm,ShiftImmLLI,ShiftSize,udt); +} + +:vqshlu.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=6 & c0606=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2828=1 & thv_c2327=0x1f & thv_c1621 & thv_c0811=6 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Qm +{ + Qd = VectorShiftLeft(Qm,ShiftImmLLI,ShiftSize,udt); +} + +:vqshlu.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=6 & c0606=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2828=1 & thv_c2327=0x1f & thv_c1621 & thv_c0811=6 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Dd & Dm +{ + Dd = VectorShiftLeft(Dm,ShiftImmLLI,ShiftSize,udt); +} + + +:vqshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=1 & thv_c0404=1) ) & udt & esize2021 & Qd & Qm & Qn +{ + Qd = VectorShiftLeft(Qm,Qn,esize2021,udt); +} + +:vqshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=0 & thv_c0404=1) ) & udt & esize2021 & Dd & Dm & Dn +{ + Dd = VectorShiftLeft(Dm,Dn,esize2021,udt); +} + + +:vshl.I^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2327=5 & c0811=5 & c0606=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Qd & Qm +{ + Qd = VectorShiftLeft(Qm,ShiftImmLLI,ShiftSize,0:1); +} + +:vshl.I^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2327=5 & c0811=5 & c0606=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=0 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Dd & Dm +{ + Dd = VectorShiftLeft(Dm,ShiftImmLLI,ShiftSize,0:1); +} + + +:vshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=1 & thv_c0404=0) ) & udt & esize2021 & Qd & Qm & Qn +{ + Qd = VectorShiftLeft(Qm,Qn,esize2021,udt); +} + +:vshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=0 & thv_c0404=0) ) & udt & esize2021 & Dd & Dm & Dn +{ + Dd = VectorShiftLeft(Dm,Dn,esize2021,udt); +} + +define pcodeop VectorShiftLongLeft; + +:vshll.^udt^ShiftSize Qd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=10 & c0607=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=10 & thv_c0607=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Dm +{ + Qd = VectorShiftLongLeft(Dm,ShiftImmLLI); +} + +:vshll.^udt^esize1819 Qd, Dm, "#"^esize1819x3 is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=3 & c0607=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=2 & thv_c0811=3 & thv_c0607=0 & thv_c0404=0) ) & udt & esize1819 & esize1819x3 & Qd & Dm +{ + Qd = VectorShiftLongLeft(Dm,esize1819x3); +} + +:vrshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=1 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=5 & thv_c0606=1 & thv_c0404=0) ) & udt & esize2021 & Qd & Qm & Qn +{ + Qd = VectorRoundShiftLeft(Qm,esize2021,Qn); +} + +:vrshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=5 & thv_c0606=0 & thv_c0404=0) ) & udt & esize2021 & Dd & Dm & Dn +{ + Dd = VectorRoundShiftLeft(Dm,esize2021,Dn); +} + +:vrshr.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=2 & c0606=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=2 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm +{ + Qd = VectorRoundShiftRight(Qm,ShiftImmRLI); +} + +:vrshr.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=2 & c0606=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=2 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm +{ + Dd = VectorRoundShiftRight(Dm,ShiftImmRLI); +} + +:vrshrn.^ShiftSize Dd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2327=5 & c0811=8 & c0707=0 & c0606=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=8 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmRLI & Dd & Qm +{ + Dd = VectorRoundShiftRightNarrow(Qm,ShiftImmRLI); +} + +:vrsra.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=3 & c0606=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=3 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm +{ + Qd = VectorRoundShiftRightAccumulate(Qd, Qm,ShiftImmRLI); +} + +:vrsra.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=3 & c0606=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=3 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm +{ + Dd = VectorRoundShiftRightAccumulate(Dd, Dm,ShiftImmRLI); +} + +:vsli.^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2327=7 & c0811=5 & c0606=0 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=0 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Dd & Dm +{ + Dd = VectorShiftLeftInsert(Dd, Dm,ShiftImmLLI); +} + +:vsli.^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2327=7 & c0811=5 & c0606=1 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Qd & Qm +{ + Qd = VectorShiftLeftInsert(Qd, Qm,ShiftImmLLI); +} + +:vsqrt^COND^".f32" Sd,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=1 & c0811=10 & c0607=3 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=1 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0) ) & Sm & Sd +{ + build COND; + build Sd; + build Sm; + Sd = sqrt(Sm); +} + +:vsqrt^COND^".f64" Dd,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=1 & c0811=11 & c0606=1 & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=1 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0) ) & Dm & Dd +{ + build COND; + build Dd; + build Dm; + Dd = sqrt(Dm); +} + +:vsra.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=1 & c0606=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=1 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm +{ + Qd = VectorShiftRightAccumulate(Qd, Qm,ShiftImmRLI); +} + +:vsra.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=1 & c0606=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=1 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm +{ + Dd = VectorShiftRightAccumulate(Dd, Dm,ShiftImmRLI); +} + +:vsri.^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c0811=4 & c0606=1 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=4 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmRLI & Qd & Qm +{ + Qd = VectorShiftRightInsert(Qd, Qm,ShiftImmRLI); +} + +:vsri.^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c0811=4 & c0606=0 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=4 & thv_c0606=0 & thv_c0404=1) ) & ShiftSize & ShiftImmRLI & Dd & Dm +{ + Dd = VectorShiftRightInsert(Dd, Dm,ShiftImmRLI); +} + +####### +# VSHR +# + +:vshr.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=0 & c0606=1 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=0 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm +{ + Qd = VectorShiftRight(Qm,ShiftImmRLI); +} + +:vshr.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=0 & c0606=0 & c0404=1) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=0 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm +{ + Dd = VectorShiftRight(Dm,ShiftImmRLI); +} + +define pcodeop VectorShiftNarrowRight; + +:vshrn.^ShiftSize Dd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2327=5 & c0811=8 & c0607=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=8 & thv_c0607=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Qm +{ + Dd = VectorShiftNarrowRight(Qm,ShiftImmRLI); +} + +####### +# VRSQRTE +define pcodeop VectorReciprocalSquareRootEstimate; + +:vrsqrte.^fdt^32 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=1 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=1 & thv_Q6=1 & thv_c0404=0) ) & fdt & Qm & Qd +{ + Qd = VectorReciprocalSquareRootEstimate(Qm,fdt); +} + +:vrsqrte.^fdt^32 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=1 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=1 & thv_Q6=0 & thv_c0404=0) ) & fdt & Dm & Dd +{ + Dd = VectorReciprocalSquareRootEstimate(Dm,fdt); +} + +####### +# VRSQRTS +define pcodeop VectorReciprocalSquareRootStep; + +:vrsqrts.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x4 & c2021=2 & c0811=0xf & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=0xf & thv_Q6=1 & thv_c0404=1) ) & Qn & Qm & Qd +{ + Qd = VectorReciprocalSquareRootStep(Qn,Qm); +} + +:vrsqrts.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x4 & c2021=2 & c0811=0xf & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=0xf & thv_Q6=0 & thv_c0404=1) ) & Dn & Dm & Dd +{ + Dd = VectorReciprocalSquareRootStep(Dn,Dm); +} + + +####### +# VST1 (multiple single elements) +# + +buildVst1DdList: Dreg is Dreg & counter=1 [ counter=0; regNum=regNum+1; ] +{ + * mult_addr = Dreg; +} +buildVst1DdList: Dreg,buildVst1DdList is Dreg & buildVst1DdList [ counter=counter-1; regNum=regNum+1; ] +{ + * mult_addr = Dreg; + mult_addr = mult_addr + 8; + build buildVst1DdList; +} + +vst1DdList: "{"^buildVst1DdList^"}" is TMode = 0 & c0811=7 & D22 & c1215 & buildVst1DdList [ regNum=(D22<<4)+c1215-1; counter=1; ] { export 1:4; } +vst1DdList: "{"^buildVst1DdList^"}" is TMode = 0 & c0811=10 & D22 & c1215 & buildVst1DdList [ regNum=(D22<<4)+c1215-1; counter=2; ] { export 2:4; } +vst1DdList: "{"^buildVst1DdList^"}" is TMode = 0 & c0811=6 & D22 & c1215 & buildVst1DdList [ regNum=(D22<<4)+c1215-1; counter=3; ] { export 3:4; } +vst1DdList: "{"^buildVst1DdList^"}" is TMode = 0 & c0811=2 & D22 & c1215 & buildVst1DdList [ regNum=(D22<<4)+c1215-1; counter=4; ] { export 4:4; } +vst1DdList: "{"^buildVst1DdList^"}" is TMode = 1 & thv_c0811=7 & thv_D22 & thv_c1215 & buildVst1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=1; ] { export 1:4; } +vst1DdList: "{"^buildVst1DdList^"}" is TMode = 1 & thv_c0811=10 & thv_D22 & thv_c1215 & buildVst1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=2; ] { export 2:4; } +vst1DdList: "{"^buildVst1DdList^"}" is TMode = 1 & thv_c0811=6 & thv_D22 & thv_c1215 & buildVst1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=3; ] { export 3:4; } +vst1DdList: "{"^buildVst1DdList^"}" is TMode = 1 & thv_c0811=2 & thv_D22 & thv_c1215 & buildVst1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=4; ] { export 4:4; } + +@define Vst1DdList "(c0811=2 | c0811=6 | c0811=7 | c0811=10)" +@define T_Vst1DdList "(thv_c0811=2 | thv_c0811=6 | thv_c0811=7 | thv_c0811=10)" + +:vst1.^esize0607 vst1DdList,RnAligned45 is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & c0003=15 & $(Vst1DdList)) | + ($(TMODE_F) &thv_c2327=18 & thv_c2021=0 & thv_c0003=15 & $(T_Vst1DdList)) ) & RnAligned45 & esize0607 & vst1DdList +{ + mult_addr = RnAligned45; + build vst1DdList; +} + +:vst1.^esize0607 vst1DdList,RnAligned45^"!" is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & c0003=13 & $(Vst1DdList)) | + ($(TMODE_F) &thv_c2327=18 & thv_c2021=0 & thv_c0003=13 & $(T_Vst1DdList)) ) & RnAligned45 & esize0607 & vst1DdList +{ + mult_addr = RnAligned45; + build vst1DdList; + RnAligned45 = RnAligned45 + (8 * vst1DdList); +} + +:vst1.^esize0607 vst1DdList,RnAligned45,VRm is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & $(Vst1DdList)) | + ($(TMODE_F) &thv_c2327=18 & thv_c2021=0 & $(T_Vst1DdList)) ) & RnAligned45 & esize0607 & VRm & vst1DdList +{ + mult_addr = RnAligned45; + build vst1DdList; + RnAligned45 = RnAligned45 + VRm; +} + +####### +# VST1 (single element to one lane) +# + +vst1Index: val is c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } + +vst1DdElement2: Dd^"["^vst1Index^"]" is Dd & vst1Index & c1011=0 +{ + ptr:4 = &Dd + vst1Index; + *:1 mult_addr = *[register]:1 ptr; +} +vst1DdElement2: Dd^"["^vst1Index^"]" is Dd & vst1Index & c1011=1 +{ + ptr:4 = &Dd + (2 * vst1Index); + *:2 mult_addr = *[register]:2 ptr; +} +vst1DdElement2: Dd^"["^vst1Index^"]" is Dd & vst1Index & c1011=2 +{ + ptr:4 = &Dd + (4 * vst1Index); + *:4 mult_addr = *[register]:4 ptr; +} + +@define Vst1DdElement2 "((c1011=0 & c0404=0) | (c1011=1 & c0505=0) | (c1011=2 & (c0406=0 | c0406=3))) & vst1DdElement2" + +:vst1.^esize1011 vst1DdElement2,RnAligned2 is $(AMODE) & cond=15 & c2327=9 & c2021=0 & RnAligned2 & esize1011 & c0809=0 & c0003=15 & $(Vst1DdElement2) +{ + mult_addr = RnAligned2; + build vst1DdElement2; +} + +:vst1.^esize1011 vst1DdElement2,RnAligned2^"!" is $(AMODE) & cond=15 & c2327=9 & c2021=0 & RnAligned2 & esize1011 & c0809=0 & c0003=13 & $(Vst1DdElement2) +{ + mult_addr = RnAligned2; + build vst1DdElement2; + RnAligned2 = RnAligned2 + esize1011; +} + +:vst1.^esize1011 vst1DdElement2,RnAligned2,VRm is $(AMODE) & cond=15 & c2327=9 & c2021=0 & RnAligned2 & esize1011 & c0809=0 & VRm & $(Vst1DdElement2) +{ + mult_addr = RnAligned2; + build vst1DdElement2; + RnAligned2 = RnAligned2 + VRm; +} + +thv_vst1Index: val is thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } + +thv_vst1DdElement2: Dd^"["^thv_vst1Index^"]" is Dd & thv_vst1Index & thv_c1011=0 +{ + ptr:4 = &Dd + thv_vst1Index; + *:1 mult_addr = *[register]:1 ptr; +} +thv_vst1DdElement2: Dd^"["^thv_vst1Index^"]" is Dd & thv_vst1Index & thv_c1011=1 +{ + ptr:4 = &Dd + (2 * thv_vst1Index); + *:2 mult_addr = *[register]:2 ptr; +} +thv_vst1DdElement2: Dd^"["^thv_vst1Index^"]" is Dd & thv_vst1Index & thv_c1011=2 +{ + ptr:4 = &Dd + (4 * thv_vst1Index); + *:4 mult_addr = *[register]:4 ptr; +} + +@define T_Vst1DdElement2 "((thv_c1011=0 & thv_c0404=0) | (thv_c1011=1 & thv_c0505=0) | (thv_c1011=2 & (thv_c0406=0 | thv_c0406=3))) & thv_vst1DdElement2" + +:vst1.^esize1011 thv_vst1DdElement2,RnAligned2 is $(TMODE_F) &thv_c2327=19 & thv_c2021=0 & RnAligned2 & esize1011 & thv_c0809=0 & thv_c0003=15 & $(T_Vst1DdElement2) +{ + mult_addr = RnAligned2; + build thv_vst1DdElement2; +} + +:vst1.^esize1011 thv_vst1DdElement2,RnAligned2^"!" is $(TMODE_F) &thv_c2327=19 & thv_c2021=0 & RnAligned2 & esize1011 & thv_c0809=0 & thv_c0003=13 & $(T_Vst1DdElement2) +{ + mult_addr = RnAligned2; + build thv_vst1DdElement2; + RnAligned2 = RnAligned2 + esize1011; +} + +:vst1.^esize1011 thv_vst1DdElement2,RnAligned2,VRm is $(TMODE_F) &thv_c2327=19 & thv_c2021=0 & RnAligned2 & esize1011 & thv_c0809=0 & VRm & $(T_Vst1DdElement2) +{ + mult_addr = RnAligned2; + build thv_vst1DdElement2; + RnAligned2 = RnAligned2 + VRm; +} + + +####### +# VST2 +# + +####### +# VST2 (multiple 2-element structures) +# + +vst2Dd: Dreg is Dreg & ((TMode=0 & c0607=0) | (TMode=1 & thv_c0607=0)) & regInc +{ + ptr1:4 = &Dreg; +@if ENDIAN == "little" + ptr2:4 = &Dreg + (regInc * 8); +@else # ENDIAN == "big" + ptr2:4 = &Dreg - (regInc * 8); +@endif # ENDIAN = "big" + mult_dat8 = 8; + + *:1 mult_addr = *[register]:1 ptr1; + mult_addr = mult_addr + 1; + *:1 mult_addr = *[register]:1 ptr2; + mult_addr = mult_addr + 1; + mult_dat8 = mult_dat8 - 1; + if(mult_dat8 == 0) goto ; + ptr1 = ptr1 + 1; + ptr2 = ptr2 + 1; + goto ; + +} +vst2Dd: Dreg is Dreg & ((TMode=0 & c0607=1) | (TMode=1 & thv_c0607=1)) & regInc +{ + ptr1:4 = &Dreg; +@if ENDIAN == "little" + ptr2:4 = &Dreg + (regInc * 8); +@else # ENDIAN == "big" + ptr2:4 = &Dreg - (regInc * 8); +@endif # ENDIAN = "big" + mult_dat8 = 4; + + *:2 mult_addr = *[register]:2 ptr1; + mult_addr = mult_addr + 2; + *:2 mult_addr = *[register]:2 ptr2; + mult_addr = mult_addr + 2; + mult_dat8 = mult_dat8 - 1; + if(mult_dat8 == 0) goto ; + ptr1 = ptr1 + 2; + ptr2 = ptr2 + 2; + goto ; + +} +vst2Dd: Dreg is Dreg & ((TMode=0 & c0607=2) | (TMode=1 & thv_c0607=2)) & regInc +{ + ptr1:4 = &Dreg; +@if ENDIAN == "little" + ptr2:4 = &Dreg + (regInc * 8); +@else # ENDIAN == "big" + ptr2:4 = &Dreg - (regInc * 8); +@endif # ENDIAN = "big" + mult_dat8 = 2; + + *:4 mult_addr = *[register]:4 ptr1; + mult_addr = mult_addr + 4; + *:4 mult_addr = *[register]:4 ptr2; + mult_addr = mult_addr + 4; + mult_dat8 = mult_dat8 - 1; + if(mult_dat8 == 0) goto ; + ptr1 = ptr1 + 4; + ptr2 = ptr2 + 4; + goto ; + +} + +buildVst2DdListA: is counter=0 { } +buildVst2DdListA: vst2Dd,buildVst2DdListA is vst2Dd & buildVst2DdListA & esize0607 [ counter=counter-1; regNum=regNum+1; ] +{ + build vst2Dd; + build buildVst2DdListA; +} + +buildVst2DdListB: is counter2=0 { } +buildVst2DdListB: Dreg2 is Dreg2 & counter2=1 & esize0607 [ counter2=0; reg2Num=reg2Num+1; ] { } +buildVst2DdListB: Dreg2,buildVst2DdListB is Dreg2 & buildVst2DdListB & esize0607 [ counter2=counter2-1; reg2Num=reg2Num+1; ] { } + +vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=0 & c0811=8 & D22 & c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(D22<<4)+c1215-1; regInc=1; reg2Num=regNum+1; counter=1; counter2=1; ] { build buildVst2DdListA; build buildVst2DdListB; export 2:4; } +vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=0 & c0811=9 & D22 & c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(D22<<4)+c1215-1; regInc=2; reg2Num=regNum+2; counter=1; counter2=1; ] { build buildVst2DdListA; build buildVst2DdListB; export 2:4; } +vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=0 & c0811=3 & D22 & c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(D22<<4)+c1215-1; regInc=2; reg2Num=regNum+2; counter=2; counter2=2; ] { build buildVst2DdListA; build buildVst2DdListB; export 4:4; } +vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=1 & thv_c0811=8 & thv_D22 & thv_c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; reg2Num=regNum+1; counter=1; counter2=1; ] { build buildVst2DdListA; build buildVst2DdListB; export 2:4; } +vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=1 & thv_c0811=9 & thv_D22 & thv_c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=2; reg2Num=regNum+2; counter=1; counter2=1; ] { build buildVst2DdListA; build buildVst2DdListB; export 2:4; } +vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=1 & thv_c0811=3 & thv_D22 & thv_c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=2; reg2Num=regNum+2; counter=2; counter2=2; ] { build buildVst2DdListA; build buildVst2DdListB; export 4:4; } + + +@define Vst2DdList "(c0811=3 | c0811=8 | c0811=9)" +@define T_Vst2DdList "(thv_c0811=3 | thv_c0811=8 | thv_c0811=9)" + +:vst2.^esize0607 vst2DdList,RnAligned45 is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0 & c0607<3 & c0003=15 & $(Vst2DdList) ) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0607<3 & thv_c0003=15 & $(T_Vst2DdList) ) ) & RnAligned45 & esize0607 & vst2DdList +{ + mult_addr = RnAligned45; + build vst2DdList; +} + +:vst2.^esize0607 vst2DdList,RnAligned45^"!" is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0 & c0607<3 & c0003=13 & $(Vst2DdList) ) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0607<3 & thv_c0003=13 & $(T_Vst2DdList) ) ) & RnAligned45 & esize0607 & vst2DdList +{ + mult_addr = RnAligned45; + build vst2DdList; + RnAligned45 = RnAligned45 + (8 * vst2DdList); +} + +:vst2.^esize0607 vst2DdList,RnAligned45,VRm is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0 & c0607<3 & $(Vst2DdList) ) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0607<3 & $(T_Vst2DdList) ) ) & RnAligned45 & VRm & esize0607 & vst2DdList +{ + mult_addr = RnAligned45; + build vst2DdList; + RnAligned45 = RnAligned45 + VRm; +} + +####### +# VST2 (single 2-element structure to one lane) +# + +vst2DdElement2: Dreg^"["^vld2Index^"]" is Dreg & vld2Index +{ +} + +vst2Align2: is TMode=0 & c0404=0 & (c1111=0 | c0505=0) { } +vst2Align2: "@16" is TMode=0 & c1011=0 & c0404=1 { } +vst2Align2: "@32" is TMode=0 & c1011=1 & c0404=1 { } +vst2Align2: "@64" is TMode=0 & c1011=2 & c0405=1 { } +vst2Align2: is TMode=1 & thv_c0404=0 & (thv_c1111=0 | thv_c0505=0) { } +vst2Align2: "@16" is TMode=1 & thv_c1011=0 & thv_c0404=1 { } +vst2Align2: "@32" is TMode=1 & thv_c1011=1 & thv_c0404=1 { } +vst2Align2: "@64" is TMode=1 & thv_c1011=2 & thv_c0405=1 { } + +vst2RnAligned2: "["^VRn^vst2Align2^"]" is VRn & vst2Align2 { export VRn; } + +buildVst2DdList2: is counter=0 { } +buildVst2DdList2: vst2DdElement2 is counter=1 & vst2DdElement2 [ counter=0; regNum=regNum+regInc; ] { } +buildVst2DdList2: vst2DdElement2,buildVst2DdList2 is vst2DdElement2 & buildVst2DdList2 [ counter=counter-1; regNum=regNum+regInc; ] { } + +vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=0 & D22 & c1215 & buildVst2DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=2; ] { } # Single +vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011=2 & c0606=1)) & D22 & c1215 & buildVst2DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=2; ] { } # Double +vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVst2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=2; ] { } # Single +vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVst2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double + +:vst2.^esize1011 vst2DdList2,vst2RnAligned2 is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 & c0003=15 ) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=1 & thv_c0003=15 ) ) & vst2RnAligned2 & esize1011 & vst2DdList2 + unimpl + +:vst2.^esize1011 vst2DdList2,vst2RnAligned2^"!" is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 & c0003=13 ) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=1 & thv_c0003=13 ) ) & vst2RnAligned2 & esize1011 & vst2DdList2 + unimpl + +:vst2.^esize1011 vst2DdList2,vst2RnAligned2,VRm is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 ) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=1 ) ) & vst2RnAligned2 & esize1011 & vst2DdList2 & VRm + unimpl + + +####### +# VST3 +# + +####### +# VST3 (multiple 3-element structures) +# + + +vst3Align: is TMode=0 & c0404=0 { } +vst3Align: "@64" is TMode=0 & c0404=1 { } +vst3Align: is TMode=1 & thv_c0404=0 { } +vst3Align: "@64" is TMode=1 & thv_c0404=1 { } + + +vst3RnAligned: "["^VRn^vst3Align^"]" is VRn & vst3Align { export VRn; } + +buildvst3DdList: is counter=0 { } +buildvst3DdList: Dreg is counter=1 & Dreg [ counter=0; regNum=regNum+regInc; ] { } +buildvst3DdList: Dreg,buildvst3DdList is buildvst3DdList & Dreg [ counter=counter-1; regNum=regNum+regInc; ] { } + +vst3DdList: "{"^buildvst3DdList^"}" is TMode=0 & c0811=4 & D22 & c1215 & buildvst3DdList [ regNum=(D22<<4)+c1215-1; regInc=1; counter=3; ] { } # Single +vst3DdList: "{"^buildvst3DdList^"}" is TMode=0 & c0811=5 & D22 & c1215 & buildvst3DdList [ regNum=(D22<<4)+c1215-2; regInc=2; counter=3; ] { } # Double +vst3DdList: "{"^buildvst3DdList^"}" is TMode=1 & thv_c0811=4 & thv_D22 & thv_c1215 & buildvst3DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single +vst3DdList: "{"^buildvst3DdList^"}" is TMode=1 & thv_c0811=5 & thv_D22 & thv_c1215 & buildvst3DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double + + +:vst3.^esize0607 vst3DdList,vst3RnAligned is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0 & c0003=15 ) | + ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0003=15 ) ) & vst3RnAligned & esize0607 & vst3DdList + unimpl + +:vst3.^esize0607 vst3DdList,vst3RnAligned^"!" is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0 & c0003=13 ) | + ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0003=13 ) ) & vst3RnAligned & esize0607 & vst3DdList + unimpl + +:vst3.^esize0607 vst3DdList,vst3RnAligned,VRm is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0) | + ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 ) ) & vst3RnAligned & esize0607 & vst3DdList & VRm + unimpl + + +####### +# VST3 (single 3-element structure to one lane) +# + +vst3Rn: "["^VRn^"]" is VRn { export VRn; } + +vst3DdList2: "{"^buildvst3DdList^"}" is TMode=0 & D22 & c1215 & buildvst3DdList [ regNum=(D22<<4)+c1215-1; regInc=1; counter=2; ] { } # Single +vst3DdList2: "{"^buildvst3DdList^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011=2 & c0606=1)) & D22 & c1215 & buildvst3DdList [ regNum=(D22<<4)+c1215-2; regInc=2; counter=2; ] { } # Double +vst3DdList2: "{"^buildvst3DdList^"}" is TMode=1 & thv_D22 & thv_c1215 & buildvst3DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=2; ] { } # Single +vst3DdList2: "{"^buildvst3DdList^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildvst3DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double + +:vst3.^esize1011 vst3DdList2,vst3Rn is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 & c0003=15 ) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=2 & thv_c0003=15 ) ) & vst3Rn & esize1011 & vst3DdList2 + unimpl + +:vst3.^esize1011 vst3DdList2,vst3Rn^"!" is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 & c0003=13 ) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=2 & thv_c0003=13 ) ) & vst3Rn & esize1011 & vst3DdList2 + unimpl + +:vst3.^esize1011 vst3DdList2,vst3Rn,VRm is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 ) | + ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=2 ) ) & vst3Rn & esize1011 & vst3DdList2 & VRm + unimpl + +####### +# VST4 (multiple 4-element structures) +# + +vst4Align: is TMode=0 & c0405=0 { } +vst4Align: "@64" is TMode=0 & c0405=1 { } +vst4Align: "@128" is TMode=0 & c0405=2 { } +vst4Align: "@256" is TMode=0 & c0405=3 { } +vst4Align: is TMode=1 & thv_c0405=0 { } +vst4Align: "@64" is TMode=1 & thv_c0405=1 { } +vst4Align: "@128" is TMode=1 & thv_c0405=2 { } +vst4Align: "@256" is TMode=1 & thv_c0405=3 { } + +vst4RnAligned: "["^VRn^vst4Align^"]" is VRn & vst4Align { export VRn; } + +buildVst4DdList: is counter=0 { } +buildVst4DdList: Dreg is counter=1 & Dreg [ counter=0; regNum=regNum+regInc; ] { } +buildVst4DdList: Dreg,buildVst4DdList is buildVst4DdList & Dreg [ counter=counter-1; regNum=regNum+regInc; ] { } + +vst4DdList: "{"^buildVst4DdList^"}" is TMode=0 & c0808=0 & D22 & c1215 & buildVst4DdList [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single +vst4DdList: "{"^buildVst4DdList^"}" is TMode=0 & c0808=1 & D22 & c1215 & buildVst4DdList [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double +vst4DdList: "{"^buildVst4DdList^"}" is TMode=1 & thv_c0808=0 & thv_D22 & thv_c1215 & buildVst4DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single +vst4DdList: "{"^buildVst4DdList^"}" is TMode=1 & thv_c0808=1 & thv_D22 & thv_c1215 & buildVst4DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double + +:vst4.^esize0607 vst4DdList,vst4RnAligned is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3 & c0003=15) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0911=0 & thv_c0607<3 & thv_c0003=15) ) & vst4RnAligned & esize0607 & vst4DdList unimpl + +:vst4.^esize0607 vst4DdList,vst4RnAligned^"!" is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3 & c0003=13) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0911=0 & thv_c0607<3 & thv_c0003=13) ) & vst4RnAligned & esize0607 & vst4DdList unimpl + +:vst4.^esize0607 vst4DdList,vst4RnAligned,VRm is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3) | + ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0911=0 & thv_c0607<3) ) & VRm & vst4RnAligned & esize0607 & vst4DdList unimpl + +####### +# VST4 (single 4-element structure from one lane) +# + +vst4Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } +vst4Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } + + +vst4DdElement2: Dreg^"["^vst4Index^"]" is Dreg & vst4Index +{ +} + +vst4Align2: is TMode=0 & c0404=0 & (c1111=0 | c0505=0) { } +vst4Align2: "@32" is TMode=0 & c1011=0 & c0404=1 { } +vst4Align2: "@64" is TMode=0 & ((c1011=1 & c0404=1) | (c1011=2 & c0405=1)) { } +vst4Align2: "@128" is TMode=0 & c1011=2 & c0405=2 { } +vst4Align2: is TMode=1 & thv_c0404=0 & (thv_c1111=0 | thv_c0505=0) { } +vst4Align2: "@32" is TMode=1 & thv_c1011=0 & thv_c0404=1 { } +vst4Align2: "@64" is TMode=1 & ((thv_c1011=1 & thv_c0404=1) | (thv_c1011=2 & thv_c0405=1)) { } +vst4Align2: "@128" is TMode=1 & thv_c1011=2 & thv_c0405=2 { } + +vst4RnAligned2: "["^VRn^vst4Align2^"]" is VRn & vst4Align2 { export VRn; } + +buildVst4DdList2: is counter=0 { } +buildVst4DdList2: vst4DdElement2 is counter=1 & vst4DdElement2 [ counter=0; regNum=regNum+regInc; ] { } +buildVst4DdList2: vst4DdElement2,buildVst4DdList2 is vst4DdElement2 & buildVst4DdList2 [ counter=counter-1; regNum=regNum+regInc; ] { } + +vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=0 & D22 & c1215 & buildVst4DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single +vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011=2 & c0606=1)) & D22 & c1215 & buildVst4DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double +vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVst4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single +vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVst4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double + +:vst4.^esize1011 vst4DdList2,vst4RnAligned2 is ( ($(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3 & c0003=15) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=3 & thv_c0003=15) ) & vst4RnAligned2 & esize1011 & vst4DdList2 unimpl + +:vst4.^esize1011 vst4DdList2,vst4RnAligned2^"!" is ( ($(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3 & c0003=13) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=3 & thv_c0003=13) ) & vst4RnAligned2 & esize1011 & vst4DdList2 unimpl + +:vst4.^esize1011 vst4DdList2,vst4RnAligned2,VRm is ( ($(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3) | + ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=3) ) & VRm & vst4RnAligned2 & esize1011 & vst4DdList2 unimpl + +@endif # SIMD + +@if defined(VFPv2) || defined(VFPv3) || defined(SIMD) + +####### +# VSTM (A1) +# + +buildVstmDdList: is counter=0 { } +buildVstmDdList: Dreg is counter=1 & Dreg [ counter=0; regNum=regNum+1; ] +{ + *mult_addr = Dreg; + mult_addr = mult_addr + 8; +} +buildVstmDdList: Dreg,buildVstmDdList is Dreg & buildVstmDdList [ counter=counter-1; regNum=regNum+1; ] +{ + *mult_addr = Dreg; + mult_addr = mult_addr + 8; + build buildVstmDdList; +} + +vstmDdList: "{"^buildVstmDdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVstmDdList [ regNum=(D22<<4)+c1215-1; counter=c0007>>1; ] { } +vstmDdList: "{"^buildVstmDdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVstmDdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=thv_c0007>>1; ] { } + +:vstmia^COND vldmRn,vstmDdList is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x19 & c2121 & c2020=0 & c0811=11 & c0000=0) | + ($(TMODE_E) & thv_c2327=0x19 & thv_c2121 & thv_c2020=0 & thv_c0811=11 & thv_c0000=0) ) & vldmRn & vstmDdList & vldmOffset & vldmUpdate +{ + mult_addr = vldmRn; + build vstmDdList; + build vldmUpdate; +} + +:vstmdb^COND vldmRn,vstmDdList is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1a & c2121=1 & c2020=0 & c0811=11 & c0000=0) | + ($(TMODE_E) & thv_c2327=0x1a & thv_c2121=1 & thv_c2020=0 & thv_c0811=11 & thv_c0000=0) ) & vldmRn & vstmDdList & vldmOffset +{ + local start_addr = vldmRn - vldmOffset; + mult_addr = start_addr; + build vstmDdList; + vldmRn = start_addr; +} + +@endif # VFPv2 | VFPv3 | SIMD + +@if defined(VFPv2) || defined(VFPv3) + +####### +# VSTM (A2) +# + +buildVstmSdList: is counter=0 { } +buildVstmSdList: Sreg is counter=1 & Sreg [ counter=0; regNum=regNum+1; ] +{ + *mult_addr = Sreg; + mult_addr = mult_addr + 4; +} +buildVstmSdList: Sreg,buildVstmSdList is Sreg & buildVstmSdList [ counter=counter-1; regNum=regNum+1; ] +{ + *mult_addr = Sreg; + mult_addr = mult_addr + 4; + build buildVstmSdList; +} + +vstmSdList: "{"^buildVstmSdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVstmSdList [ regNum=(c1215<<1) + D22 -1; counter=c0007; ] { } +vstmSdList: "{"^buildVstmSdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVstmSdList [ regNum=(thv_c1215<<1) + thv_D22 -1; counter=thv_c0007; ] { } + +:vstmia^COND vldmRn,vstmSdList is COND & ( ( $(AMODE) & ARMcond=1 & c2327=0x19 & c2121 & c2020=0 & c0811=10 ) | + ($(TMODE_E) & thv_c2327=0x19 & thv_c2121 & thv_c2020=0 & thv_c0811=10 ) ) & vldmRn & vstmSdList & vldmOffset & vldmUpdate +{ + mult_addr = vldmRn; + build vstmSdList; + build vldmUpdate; +} + +:vstmdb^COND vldmRn,vstmSdList is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1a & c2121=1 & c2020=0 & c0811=10 ) | + ($(TMODE_E) & thv_c2327=0x1a & thv_c2121=1 & thv_c2020=0 & thv_c0811=10) ) & vldmRn & vstmSdList & vldmOffset +{ + local start_addr = vldmRn - vldmOffset; + mult_addr = start_addr; + build vstmSdList; + vldmRn = start_addr; +} + + +####### +# VSTR +# + +:vstr^COND^".64" Dd,vldrRn is COND & ( ($(AMODE) & ARMcond=1 & c2427=13 & c2021=0 & c0811=11) | ($(TMODE_E) & thv_c2427=13 & thv_c2021=0 & thv_c0811=11)) & Dd & vldrRn +{ + *vldrRn = Dd; +} + +:vstr^COND^".32" Sd,vldrRn is COND & ( ($(AMODE) & ARMcond=1 & c2427=13 & c2021=0 & c0811=10) | ($(TMODE_E) & thv_c2427=13 & thv_c2021=0 & thv_c0811=10)) & Sd & vldrRn +{ + *vldrRn = Sd; +} + +@endif # VFPv2 || VFPv3 || SIMD + + +####### +# VSUB +# + +@if defined(SIMD) + +define pcodeop FloatVectorSub; +define pcodeop VectorSubAndNarrow; + +:vsub.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c0811=8 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dn & Dd & Dm +{ + Dd = VectorSub(Dn,Dm,esize2021); +} + +:vsub.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c0811=8 & Q6=1 & c0404=0) | + ($(TMODE_F) &thv_c2327=0x1e & thv_c0811=8 & thv_Q6=1 & thv_c0404=0) ) & esize2021 & Qm & Qn & Qd +{ + Qd = VectorSub(Qn,Qm,esize2021); +} + +:vsub.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Dd +{ + Dd = FloatVectorSub(Dn,Dm,2:1,32:1); +} + +:vsub.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_Q6=1 & thv_c0404=0) ) & Qn & Qd & Qm +{ + Qd = FloatVectorSub(Qn,Qm,2:1,32:1); +} + +:vsubhn.i^esize2021x2 Dd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=5 & c0811=6 & Q6=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=6 & thv_Q6=0 & thv_c0404=0)) & esize2021x2 & Dd & Qn & Qm +{ + Dd = VectorSubAndNarrow(Qn,Qm,esize2021x2); +} + +:vsubl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=2 & c0606=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=2 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Dn & Qd & Dm +{ + Qd = VectorSub(Dn,Dm,esize2021,udt); +} + +:vsubw.^udt^esize2021 Qd,Qn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=3 & c0606=0 & c0404=0) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=3 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Qn & Qd & Dm +{ + Qd = VectorSub(Qn,Dm,esize2021,udt); +} + +@endif # SIMD + +@if defined(VFPv2) || defined(VFPv3) + +:vsub^COND^".f32" Sd,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=3 & c0811=10 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=3 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0) ) & Sm & Sn & Sd +{ + build COND; + build Sd; + build Sm; + build Sn; + Sd = Sn f- Sm; +} + +:vsub^COND^".f64" Dd,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=3 & c0811=11 & c0606=1 & c0404=0 ) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=3 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0) ) & Dm & Dn & Dd +{ + build COND; + build Dd; + build Dm; + build Dn; + Dd = Dn f- Dm; +} + +@endif # VFPv2 || VFPv3 + +@if defined(SIMD) + + +####### +# VSWP +# + +:vswp Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=0 & Q6=0 & c0404=0 ) | + ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=0 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm +{ + tmp:8 = Dm; + Dm = Dd; + Dd = tmp; +} + +:vswp Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=0 & Q6=1 & c0404=0 ) | + ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=0 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm +{ + tmp:16 = Qm; + Qm = Qd; + Qd = tmp; +} + + +########### +# VTBL/VTBX +# + +define pcodeop VectorTableLookup; + +buildVtblDdList: is counter=0 { } +buildVtblDdList: Dreg is Dreg & counter=1 [ counter=0; regNum=regNum+1; ] { } +buildVtblDdList: Dreg,buildVtblDdList is Dreg & buildVtblDdList [ counter=counter-1; regNum=regNum+1; ] +{ + build buildVtblDdList; +} + +vtblDdList: "{"^buildVtblDdList^"}" is TMode=0 & c0809=0 & N7 & c1619 & buildVtblDdList [ regNum=(N7<<4)+c1619-1; counter=1; ] { export 1:4; } +vtblDdList: "{"^buildVtblDdList^"}" is TMode=0 & c0809=1 & N7 & c1619 & buildVtblDdList [ regNum=(N7<<4)+c1619-1; counter=2; ] { export 2:4; } +vtblDdList: "{"^buildVtblDdList^"}" is TMode=0 & c0809=2 & N7 & c1619 & buildVtblDdList [ regNum=(N7<<4)+c1619-1; counter=3; ] { export 3:4; } +vtblDdList: "{"^buildVtblDdList^"}" is TMode=0 & c0809=3 & N7 & c1619 & buildVtblDdList [ regNum=(N7<<4)+c1619-1; counter=4; ] { export 4:4; } +vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=0 & thv_N7 & thv_c1619 & buildVtblDdList [ regNum=(thv_N7<<4)+thv_c1619-1; counter=1; ] { export 1:4; } +vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=1 & thv_N7 & thv_c1619 & buildVtblDdList [ regNum=(thv_N7<<4)+thv_c1619-1; counter=2; ] { export 2:4; } +vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=2 & thv_N7 & thv_c1619 & buildVtblDdList [ regNum=(thv_N7<<4)+thv_c1619-1; counter=3; ] { export 3:4; } +vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=3 & thv_N7 & thv_c1619 & buildVtblDdList [ regNum=(thv_N7<<4)+thv_c1619-1; counter=4; ] { export 4:4; } + + +:vtbl.8 VRd,vtblDdList,VRm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1011=2 & c0606=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1011=2 & thv_c0606=0 & thv_c0404=0 ) ) & VRm & VRd & VRn & vtblDdList +{ + VRd = VectorTableLookup(VRm,VRn,vtblDdList); +} + +:vtbx.8 VRd,vtblDdList,VRm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1011=2 & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1011=2 & thv_c0606=1 & thv_c0404=0 ) ) & VRm & VRd & VRn & vtblDdList +{ + VRd = VectorTableLookup(VRm,VRn,vtblDdList); +} + + +###### +# VTST +# + +define pcodeop VectorTest; + +:vtst.^esize2021 Qd, Qn, Qm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=8 & c0606=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_c0606=1 & thv_c0404=1) ) & esize2021 & Qm & Qn & Qd +{ + Qd = VectorTest(Qn, Qm); +} + +:vtst.^esize2021 Dd, Dn, Dm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=8 & c0606=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_c0606=0 & thv_c0404=1) ) & esize2021 & Dm & Dn & Dd +{ + Dd = VectorTest(Dn, Dm); +} + +define pcodeop VectorTranspose; + +:vtrn^"."^esize1819 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=0 & c0707=1 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=2 & thv_c0811=0 & thv_c0707=1 & thv_Q6=0 & thv_c0404=0)) & esize1819 & Dd & Dm +{ + Dd = VectorTranspose(Dm,esize1819); +} + +:vtrn^"."^esize1819 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=0 & c0707=1 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=2 & thv_c0811=0 & thv_c0707=1 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd +{ + Qd = VectorTranspose(Qm,esize1819); +} + + +####### +# VUZP +# + +define pcodeop VectorUnzip; + +:vuzp^esize1819 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=3 & Q6=0 & c0404=0 ) | + ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=3 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 +{ + Dd = VectorUnzip(Dm,esize1819); +} + +:vuzp^esize1819 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=3 & Q6=1 & c0404=0 ) | + ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=3 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 +{ + Qd = VectorUnzip(Qm,esize1819); +} + + +####### +# VZIP +# + +define pcodeop VectorZip; + +:vuzp^esize1819 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=2 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=2 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 +{ + Dd = VectorZip(Dm,esize1819); +} + +:vuzp^esize1819 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=2 & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=2 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 +{ + Qd = VectorZip(Qm,esize1819); +} + + +@endif # SIMD + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARMt.pspec b/src/third-party/sleigh/processors/ARM/data/languages/ARMt.pspec new file mode 100644 index 00000000..17d32acd --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARMt.pspec @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARMtTHUMB.pspec b/src/third-party/sleigh/processors/ARM/data/languages/ARMtTHUMB.pspec new file mode 100644 index 00000000..cc30cec1 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARMtTHUMB.pspec @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARMt_v45.pspec b/src/third-party/sleigh/processors/ARM/data/languages/ARMt_v45.pspec new file mode 100644 index 00000000..1d04e809 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARMt_v45.pspec @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARMt_v6.pspec b/src/third-party/sleigh/processors/ARM/data/languages/ARMt_v6.pspec new file mode 100644 index 00000000..68d853b1 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARMt_v6.pspec @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/ARMv8.sinc b/src/third-party/sleigh/processors/ARM/data/languages/ARMv8.sinc new file mode 100644 index 00000000..a4555820 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/ARMv8.sinc @@ -0,0 +1,1443 @@ + +# This macro is always defined in this file, but the ifdef may be +# useful if it is moved to ARMinstructions.sinc. + +crc32_type: "b" is TMode=0 & c2122=0b00 & c0909=0 { } +crc32_type: "h" is TMode=0 & c2122=0b01 & c0909=0 { } +crc32_type: "w" is TMode=0 & c2122=0b10 & c0909=0 { } +crc32_type: "cb" is TMode=0 & c2122=0b00 & c0909=1 { } +crc32_type: "ch" is TMode=0 & c2122=0b01 & c0909=1 { } +crc32_type: "cw" is TMode=0 & c2122=0b10 & c0909=1 { } +crc32_type: "b" is TMode=1 & thv_c0405=0b00 { } +crc32_type: "h" is TMode=1 & thv_c0405=0b01 { } +crc32_type: "w" is TMode=1 & thv_c0405=0b10 { } + +define pcodeop Crc32Calc; + +# F5.1.39,40 p2650,2653 CRC32,CRC32C A1 +:crc32^crc32_type Rd,Rn,Rm + is TMode=0 & c2831=0b1110 & c2327=0b00010 & c2020=0 & c0407=0b0100 & c1011=0b00 & c0808=0 + & crc32_type & Rn & Rd & Rm + { Rd = Crc32Calc(Rn,Rm); } + +# F5.1.39 p2650 CRC32 T1 +:crc32^crc32_type thv_Rt2,thv_Rn,thv_Rm + is TMode=1 & thv_c2031=0b111110101100 & thv_c1215=0b1111 & thv_c0607=0b10 + & crc32_type & thv_Rn & thv_Rt2 & thv_Rm + { thv_Rt2 = Crc32Calc(thv_Rn,thv_Rm); } + +# F5.1.40 p2653 CRC32C T1 +:crc32c^crc32_type thv_Rt2,thv_Rn,thv_Rm + is TMode=1 & thv_c2031=0b111110101101 & thv_c1215=0b1111 & thv_c0607=0b10 + & crc32_type & thv_Rn & thv_Rt2 & thv_Rm + { thv_Rt2 = Crc32Calc(thv_Rn,thv_Rm); } + +define pcodeop DCPSInstruction; + +dcps_lev:1 is TMode=1 & thv_c0001=0b01 { export 1:1; } +dcps_lev:2 is TMode=1 & thv_c0001=0b10 { export 2:1; } +dcps_lev:3 is TMode=1 & thv_c0001=0b11 { export 3:1; } + +# F5.1.42 p2657 DCPS1,DCPS2,DCPS3 DSPS1 variant +:dcps^dcps_lev + is TMode=1 & thv_c1631=0b1111011110001111 & thv_c0215=0b10000000000000 & (thv_c0101=1 | thv_c0000=1) & dcps_lev + { DCPSInstruction(dcps_lev:1); } + +# F5.1.53 p2683 LDA +:lda^COND Rd,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x19 & Rn & Rd & c0011=0xc9f + { + build COND; + Rd = *Rn; + } + +# F5.1.53 p2683 LDA +:lda thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1010 + & ItCond & thv_Rn & thv_Rt + { + build ItCond; + thv_Rt = *thv_Rn; + } + +# F5.1.54 p2684 LDAB +:ldab^COND Rd,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x1d & Rn & Rd & c0011=0xc9f + { + build COND; + val:1 = *Rn; + Rd = zext(val); + } + +# F5.1.54 p2684 LDAB +:ldab thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1000 + & ItCond & thv_Rt & thv_Rn + { + build ItCond; + val:1 = *thv_Rn; + thv_Rt = zext(val); + } + +# F5.1.55 p2685 LDAEX +:ldaex^COND Rd,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x19 & Rn & Rd & c0011=0xe9f + { + build COND; + Rd = *Rn; + } + +# F5.1.55 p2685 LDAEX +:ldaex thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1110 + & ItCond & thv_Rt & thv_Rn + { + build ItCond; + thv_Rt = *thv_Rn; + } + +# F5.1.56 p2687 LDAEXB +:ldaexb^COND Rd,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x1d & Rn & Rd & c0011=0xe9f + { + build COND; + val:1 = *Rn; + Rd = zext(val); + } + +# F5.1.56 p2687 LDAEXB +:ldaexb thv_Rt,thv_Rn + is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1100 + & ItCond & thv_Rt & thv_Rn + { + build ItCond; + val:1 = *thv_Rn; + thv_Rt = zext(val); + } + +# F5.1.57 p2689 LDAEXD +:ldaexd^COND Rd,Rd2,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x1b & Rn & Rd & Rd2 & c0011=0xe9f + { + build COND; +@if ENDIAN == "big" + Rd = *(Rn + 4); + Rd2 = *(Rn); +@else # ENDIAN == "little" + Rd = *(Rn); + Rd2 = *(Rn + 4); +@endif # ENDIAN == "little" + } + +# F5.1.57 p2689 LDAEXD +:ldaexd thv_Rt,thv_Rt2,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1111 + & ItCond & thv_Rt & thv_Rt2 & thv_Rn + { + build ItCond; +@if ENDIAN == "big" + thv_Rt = *(thv_Rn + 4); + thv_Rt2 = *(thv_Rn); +@else # ENDIAN == "little" + thv_Rt = *(thv_Rn); + thv_Rt2 = *(thv_Rn + 4); +@endif # ENDIAN == "little" + } + +# F5.1.58 p2691 LDAEXH +:ldaexh^COND Rd,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x1f & Rn & Rd & c0011=0xe9f + { + build COND; + val:2 = *Rn; + Rd = zext(val); + } + +# F5.1.58 p2691 LDAEXH +:ldaexh thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1101 + & ItCond & thv_Rt & thv_Rn + { + build ItCond; + val:2 = *thv_Rn; + thv_Rt = zext(val); + } + +# F5.1.59 p2693 LDAH +:ldah^COND Rd,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x1f & Rn & Rd & c0011=0xc9f + { + build COND; + val:2 = *Rn; + Rd = zext(val); + } + +# F5.1.59 p2693 LDAH +:ldah thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1001 + & ItCond & thv_Rt & thv_Rn + { + build ItCond; + val:2 = *thv_Rn; + thv_Rt = zext(val); + } + +# F5.1.178 p2969 SEVL A1 variant +:sevl^COND + is TMode=0 & ARMcond=1 & COND & c1627=0b001100100000 & c0007=0b00000101 + { + build COND; + SendEvent(); + } + +# F5.1.178 p2969 SEVL T2 variant +:sevl.w + is TMode=1 & thv_c2031=0b111100111010 & thv_c1415=0b10 & thv_c1212=0 & thv_c0010=0b00000000101 + & ItCond + { + build ItCond; + SendEvent(); + } + +# F5.1.209 p3035 STL +:stl^COND Rm,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x18 & Rn & c0415=0xfc9 & Rm + { + build COND; + *Rn = Rm; + } + +# F5.1.209 p3035 STL +:stl thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1010 + & ItCond & thv_Rt & thv_Rn + { + build ItCond; + *thv_Rn = thv_Rt; + } + +# F5.1.210 p3036 STLB +:stlb^COND Rm,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x1c & Rn & c0415=0xfc9 & Rm + { + build COND; + *:1 Rn = Rm[0,8]; + } + +# F5.1.210 p3036 STLB +:stlb thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1000 + & ItCond & thv_Rt & thv_Rn + { + build ItCond; + *:1 thv_Rn = thv_Rt[0,8]; + } + +# F5.1.211 p3037 STLEX +:stlex^COND Rd,Rm,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x18 & Rn & Rd & c0411=0xe9 & Rm + { + build COND; + *Rn = Rm; + Rd = 0; + } + +# F5.1.211 p3037 STLEX +:stlex thv_Rm,thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1110 + & ItCond & thv_Rm & thv_Rt & thv_Rn + { + build ItCond; + *thv_Rn = thv_Rt; + thv_Rm = 0; + } + +# F5.1.212 p3040 STLEXB +:stlexb^COND Rd,Rm,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x1c & Rn & Rd & c0411=0xe9 & Rm + { + build COND; + *:1 Rn = Rm[0,8]; + Rd = 0; + } + +# F5.1.212 p3040 STLEXB +:stlexb thv_Rm,thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1100 + & ItCond & thv_Rm & thv_Rt & thv_Rn + { + build ItCond; + *:1 thv_Rn = thv_Rt[0,8]; + thv_Rm = 0; + } + +# F5.1.213 p3042 STLEXD +:stlexd^COND Rd,Rm,Rm2,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x1a & Rn & Rd & c0411=0xe9 & Rm & Rm2 + { + build COND; +@if ENDIAN == "big" + *Rn = Rm; + *(Rn + 4) = Rm2; +@else # ENDIAN == "little" + *Rn = Rm2; + *(Rn + 4) = Rm; +@endif # ENDIAN == "little" + Rd = 0; + } + +# F5.1.213 p3042 STLEXD +:stlexd thv_Rm,thv_Rt,thv_Rt2,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1111 + & ItCond & thv_Rm & thv_Rt & thv_Rt2 & thv_Rn + { + build ItCond; +@if ENDIAN == "big" + *thv_Rn = thv_Rt; + *(thv_Rn + 4) = thv_Rt2; +@else # ENDIAN == "little" + *thv_Rn = thv_Rt2; + *(thv_Rn + 4) = thv_Rt; +@endif # ENDIAN == "little" + thv_Rm = 0; + } + +# F5.1.214 p3045 STLEXH +:stlexh^COND Rd,Rm,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x1e & Rn & Rd & c0411=0xe9 & Rm + { + build COND; + *:2 Rn = Rm[0,16]; + Rd = 0; + } + +# F5.1.214 p3045 STLEXH +:stlexh thv_Rm,thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1101 + & ItCond & thv_Rm & thv_Rt & thv_Rn + { + build ItCond; + *:2 thv_Rn = thv_Rt[0,16]; + thv_Rm = 0; + } + +# F5.1.215 p3048 STLH +:stlh^COND Rm,[Rn] + is TMode=0 & ARMcond=1 & COND & c2027=0x1e & Rn & c0415=0xfc9 & Rm + { + build COND; + *:2 Rn = Rm[0,16]; + } + +# F5.1.215 p3048 STLH +:stlh thv_Rt,[thv_Rn] + is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1001 + & ItCond & thv_Rt & thv_Rn + { + build ItCond; + *:2 thv_Rn = thv_Rt[0,16]; + } + +@ifdef INCLUDE_NEON + +# Advanced SIMD support / NEON in ARMv8 + +####### +# macro declarations + +# The Inexact flag is bit 4 of FPEXC +@define FPEXC_IXF "fpexc[3,1]" + +# Rounding modes, as used in pseudocode, defined as an enumeration +# '01' N +@define FPRounding_TIEEVEN "0:1" +# '10' P +@define FPRounding_POSINF "1:1" +# '11' M +@define FPRounding_NEGINF "2:1" +@define FPRounding_ZERO "3:1" +# '00' A +@define FPRounding_TIEAWAY "4:1" +@define FPRounding_ODD "5:1" + +####### +# pcodeop declarations + +# CryptOp(val) +# Various crypto algorithms, too numerous for explication at +# this time + +define pcodeop CryptOp; + +# FixedToFP(fp, M, N, fbits, unsigned, rounding) +# Convert M-bit fixed point with fbits fractional bits to N-bit +# floating point, controlled by unsigned flag and rounding. Can +# also be used with packed "SIMD" floats. + +define pcodeop FixedToFP; + +# FPConvert(fp, M, N [, rounding]) +# Convert floating point between from M-bit to N-bit precision. +# Can also be used with packed "SIMD" floats. Sometimes +# equivalent to float2float. M, N are the input and output sizes +# (16, 32, 64), implied by pseudocode, but given explicitly +# here. Rounding is only required when converting to integral +# type. + +define pcodeop FPConvert; + +# FPConvertInexact() +# At the end of any rounding or conversion operation, the +# pseudocode tests whether the converted value is identical to +# the original value. If it is not identical, and if the "exact" +# argument is true, then it sets the floating point exception +# FPEXC.Inexact bit. This function is understood to return 0/1 +# depending on whether converstion was exact (0) or inexact (1). +# + +define pcodeop FPConvertInexact; + +# FPToFixed(fp, M, N, fbits, unsigned, rounding) +# Convert M-bit floating point to N-bit fixed point with fbits +# fractional bits, controlled by unsigned flag and rounding. +# between different precisions. Can also be used with packed +# "SIMD" floats. + +define pcodeop FPToFixed; + +# FPRoundInt(fp, N, rounding, exact) +# Round fp to nearest integral floating point, controlled by +# rounding. If exact is true, set FPSR.IXC flag. Can also be +# used with packed "SIMD" floats. + +define pcodeop FPRoundInt; + +# PolynomialMult(op1, op2) + +define pcodeop PolynomialMult; + +####### +# AESD single round decryption + +# F6.1.1 p3235 A1/T1 +:aesd.8 Qd,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001101 & c0404=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001101 & thv_c0404=0)) + & Qd & Qm + { Qd = CryptOp(Qd | Qm); } + +####### +# AESE single round encryption + +# F6.1.2 p3237 A1/T1 +:aese.8 Qd,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001100 & c0404=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001100 & thv_c0404=0)) + & Qd & Qm + { Qd = CryptOp(Qd | Qm); } + +####### +# AESIMC inverse mix columns + +# F6.1.3 p3239 A1/T1 +:aesimc.8 Qd,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001111 & c0404=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001111 & thv_c0404=0)) + & Qd & Qm + { Qd = CryptOp(Qm); } + +####### +# AESMC mix columns + +# F6.1.4 p3240 A1/T1 +:aesmc.8 Qd,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001110 & c0404=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001110 & thv_c0404=0)) + & Qd & Qm + { Qd = CryptOp(Qm); } + +####### +# SHA1C SHA1 hash update (choose) + +# F6.1.7 p3248 A1/T1 +:sha1c.32 Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b00 & c0811=0b1100 & c0606=1 & c0404=0) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) + & Qn & Qd & Qm + { Qd = CryptOp(Qd,Qn,Qm); } + +####### +# SHA1H SHA1 fixed rotate + +# F6.1.8 p3250 A1/T1 +:sha1h.32 Qd,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b01 & c0611=0b001011 & c0404=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b01 & thv_c0611=0b001011 & thv_c0404=0)) + & Qd & Qm + { Qd = CryptOp(Qm); } + +####### +# SHA1M SHA1 hash update (majority) + +# F6.1.9 p3251 A1/T1 +:sha1m.32 Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b10 & c0811=0b1100 & c0606=1 & c0404=0) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) + & Qn & Qd & Qm + { Qd = CryptOp(Qd,Qn,Qm); } + +####### +# SHA1P SHA1 hash update (parity) + +# F6.1.10 p3253 A1/T1 +:sha1p.32 Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b01 & c0811=0b1100 & c0606=1 & c0404=0) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) + & Qn & Qd & Qm + { Qd = CryptOp(Qd,Qn,Qm); } + +####### +# SHA1SU0 SHA1 schedule update 0 + +# F6.1.11 p3255 A1/T1 +:sha1su0.32 Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b11 & c0811=0b1100 & c0606=1 & c0404=0) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b11 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) + & Qn & Qd & Qm + { Qd = CryptOp(Qd,Qn,Qm); } + +####### +# SHA1SU1 SHA1 schedule update 1 + +# F6.1.12 p3257 A1/T1 +:sha1su1.32 Qd,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c0611=0b001110 & c0404=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b10 & thv_c0611=0b001110 & thv_c0404=0)) + & Qd & Qm + { Qd = CryptOp(Qd,Qm); } + +####### +# SHA256H SHA256 hash update part 1 + +# F6.1.13 p3259 A1/T1 +:sha256h.32 Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1100 & c0606=1 & c0404=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) + & Qn & Qd & Qm + { Qd = CryptOp(Qd,Qn,Qm); } + +####### +# SHA256H2 SHA256 hash update part 2 + +# F6.1.14 p3260 A1/T1 +:sha256h2.32 Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1100 & c0606=1 & c0404=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) + & Qn & Qd & Qm + { Qd = CryptOp(Qd,Qn,Qm); } + +####### +# SHA256SU0 SHA256 schedule update 0 + +# F6.1.15 p3261 A1/T1 +:sha256su0.32 Qd,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c0611=0b001111 & c0404=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b10 & thv_c0611=0b001111 & thv_c0404=0)) + & Qd & Qm + { Qd = CryptOp(Qd,Qm); } + +####### +# SHA256SU1 SHA256 schedule update 1 + +# F6.1.16 p3263 A1/T1 +:sha256su1.32 Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1100 & c0606=1 & c0404=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) + & Qn & Qd & Qm + { Qd = CryptOp(Qd,Qn,Qm); } + +####### +# The VCVT instructions are a large family for converting between +# floating point numbers and integers, of all sizes and combinations + +# F6.1.54 p3350 A1 cases size = 10 (c0809) +:vcvt^COND^".f64.f32" Dd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11011 & c1616=1 & c1011=0b10 & c0707=1 & c0606=1 & c0404=0 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11011 & thv_c1616=1 & thv_c1011=0b10 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b10)) + & COND & Dd & Sm + { build COND; Dd = float2float(Sm); } + +# F6.1.54 p3350 A1 cases size = 11 (c0809) +:vcvt^COND^".f32.f64" Sd,Dm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11011 & c1616=1 & c1011=0b10 & c0707=1 & c0606=1 & c0404=0 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11011 & thv_c1616=1 & thv_c1011=0b10 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b11)) + & COND & Sd & Dm + { build COND; Sd = float2float(Dm); } + +# F6.1.55 p3352 A1 op == 1 (c0808) +:vcvt.f32.f16 Qd,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b01 & c1617=0b10 & c0911=0b011 & c0607=0b00 & c0404=0 & c0808=1) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b01 & thv_c1617=0b10 & thv_c0911=0b011 & thv_c0607=0b00 & thv_c0404=0 & thv_c0808=1)) + & Qd & Dm + { + Qd = float2float(Dm:2); + } + +# F6.1.55 p3352 A1 op == 0 (c0808) +:vcvt.f16.f32 Dd,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b01 & c1617=0b10 & c0911=0b011 & c0607=0b00 & c0404=0 & c0808=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b01 & thv_c1617=0b10 & thv_c0911=0b011 & thv_c0607=0b00 & thv_c0404=0 & thv_c0808=0)) + & Dd & Qm + { Dd = float2float(Qm); } + +vcvt_56_64_dt: ".f32.s32" + is ((TMode=0 & c0708=0b00) + | (TMode=1 & thv_c0708=0b00)) + & Dd & Dm + { Dd = FixedToFP(Dm, 32:1, 32:1, 0:1, 0:1, $(FPRounding_TIEEVEN)); } +vcvt_56_64_dt: ".f32.u32" + is ((TMode=0 & c0708=0b01) + | (TMode=1 & thv_c0708=0b01)) + & Dd & Dm + { Dd = FixedToFP(Dm, 32:1, 32:1, 0:1, 1:1, $(FPRounding_TIEEVEN)); } +vcvt_56_64_dt: ".s32.f32" + is ((TMode=0 & c0708=0b10) + | (TMode=1 & thv_c0708=0b10)) + & Dd & Dm + { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 0:1, $(FPRounding_ZERO)); } +vcvt_56_64_dt: ".u32.f32" + is ((TMode=0 & c0708=0b11) + | (TMode=1 & thv_c0708=0b11)) + & Dd & Dm + { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 1:1, $(FPRounding_ZERO)); } + +vcvt_56_128_dt: ".f32.s32" + is ((TMode=0 & c0708=0b00) + | (TMode=1 & thv_c0708=0b00)) + & Qd & Qm + { Qd = FixedToFP(Qm, 32:1, 32:1, 0:1, 0:1, $(FPRounding_TIEEVEN)); } +vcvt_56_128_dt: ".f32.u32" + is ((TMode=0 & c0708=0b01) + | (TMode=1 & thv_c0708=0b01)) + & Qd & Qm + { Qd = FixedToFP(Qm, 32:1, 32:1, 0:1, 1:1, $(FPRounding_TIEEVEN)); } +vcvt_56_128_dt: ".s32.f32" + is ((TMode=0 & c0708=0b10) + | (TMode=1 & thv_c0708=0b10)) + & Qd & Qm + { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 0:1, $(FPRounding_ZERO)); } +vcvt_56_128_dt: ".u32.f32" + is ((TMode=0 & c0708=0b11) + | (TMode=1 & thv_c0708=0b11)) + & Qd & Qm + { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 1:1, $(FPRounding_ZERO)); } + +# F6.1.56 p3354 A1 Q == 0 (c0606) +:vcvt^vcvt_56_64_dt Dd,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c0911=0b011 & c0404=0 & c0606=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c0911=0b011 & thv_c0404=0 & thv_c0606=0)) + & vcvt_56_64_dt & Dd & Dm + { } + +# F6.1.56 p3354 A1 Q == 1 (c0606) +:vcvt^vcvt_56_128_dt Qd,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c0911=0b011 & c0404=0 & c0606=1) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c0911=0b011 & thv_c0404=0 & thv_c0606=1)) + & vcvt_56_128_dt & Qd & Qm + { } + +# F6.1.57 p3356 A1 opc2==100 && size==10 (c1618, c0809) +:vcvt^COND^".u32.f32" Sd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b11 & c0404=0 & c1618=0b100 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b11 & thv_c0404=0 & thv_c1618=0b100 & thv_c0809=0b10)) + & COND & Sd & Sm + { build COND; Sd = zext(round(Sm)); } + +# F6.1.57 p3356 A1 opc2==101 && size==10 (c1618, c0809) +:vcvt^COND^".s32.f32" Sd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b11 & c0404=0 & c1618=0b101 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b11 & thv_c0404=0 & thv_c1618=0b101 & thv_c0809=0b10)) + & COND & Sd & Sm + { build COND; Sd = sext(round(Sm)); } + +# F6.1.57 p3356 A1 opc2==100 && size==11 (c1618, c0809) +:vcvt^COND^".u32.f64" Sd,Dm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b11 & c0404=0 & c1618=0b100 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b11 & thv_c0404=0 & thv_c1618=0b100 & thv_c0809=0b11)) + & COND & Sd & Dm + { build COND; local tmp:8 = zext(round(Dm)); Sd = tmp:4; } + +# F6.1.57 p3356 A1 opc2==101 && size==11 (c1618, c0809) +:vcvt^COND^".s32.f64" Sd,Dm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b11 & c0404=0 & c1618=0b101 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b11 & thv_c0404=0 & thv_c1618=0b101 & thv_c0809=0b11)) + & COND & Sd & Dm + { build COND; local tmp:8 = sext(round(Dm)); Sd = tmp:4; } + +# The rounding mode depends on c0707=0 => FPSCR else ZERO + +vcvt_58_3232_dt: ".f32.u32" + is ((TMode=0 & c0708=0b00) + | (TMode=1 & thv_c0708=0b00)) + & Sd & Sm + { local tmp:8 = zext(Sm); Sd = int2float(tmp); } +vcvt_58_3232_dt: ".f32.s32" + is ((TMode=0 & c0708=0b01) + | (TMode=1 & thv_c0708=0b01)) + & Sd & Sm + { local tmp:8 = sext(Sm); Sd = int2float(tmp); } + +vcvt_58_6432_dt: ".f64.u32" + is ((TMode=0 & c0708=0b10) + | (TMode=1 & thv_c0708=0b10)) + & Dd & Sm + { local tmp:8 = zext(Sm); Dd = int2float(tmp); } +vcvt_58_6432_dt: ".f64.s32" + is ((TMode=0 & c0708=0b11) + | (TMode=1 & thv_c0708=0b11)) + & Dd & Sm + { local tmp:8 = sext(Sm); Dd = int2float(tmp); } + +# F6.1.58 p3359 A1 size == 10 (c0809) +:vcvt^COND^vcvt_58_3232_dt Sd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11100 & c1616=0 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11100 & thv_c1616=0 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b10)) + & COND & vcvt_58_3232_dt & Sd & Sm + { build COND; build vcvt_58_3232_dt; } + +# F6.1.58 p3359 A1 size == 11 (c0809) +:vcvt^COND^vcvt_58_6432_dt Dd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11100 & c1616=0 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11100 & thv_c1616=0 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b11)) + & COND & vcvt_58_6432_dt & Dd & Sm + { build COND; build vcvt_58_6432_dt; } + +vcvt_59_fbits_built: fbits is TMode=0 & c1621 [ fbits = 64 - c1621; ] { export * [const]:1 fbits; } +vcvt_59_fbits_built: fbits is TMode=1 & thv_c1621 [ fbits = 64 - thv_c1621; ] { export * [const]:1 fbits; } +vcvt_59_fbits: "#"^fbits is TMode=0 & c1621 [ fbits = 64 - c1621; ] { } +vcvt_59_fbits: "#"^fbits is TMode=1 & thv_c1621 [ fbits = 64 - thv_c1621; ] { } + +vcvt_59_32_dt: ".f32.s32" + is ((TMode=0 & c0808=0 & c2424=0) + | (TMode=1 & thv_c0808=0 & thv_c2828=0)) + & Dd & Dm & vcvt_59_fbits_built + { Dd = FixedToFP(Dm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } +vcvt_59_32_dt: ".f32.u32" + is ((TMode=0 & c0808=0 & c2424=1) + | (TMode=1 & thv_c0808=0 & thv_c2828=1)) + & Dd & Dm & vcvt_59_fbits_built + { Dd = FixedToFP(Dm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } +vcvt_59_32_dt: ".s32.f32" + is ((TMode=0 & c0808=1 & c2424=0) + | (TMode=1 & thv_c0808=1 & thv_c2828=0)) + & Dd & Dm & vcvt_59_fbits_built + { Dd = FPToFixed(Dm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_ZERO)); } +vcvt_59_32_dt: ".u32.f32" + is ((TMode=0 & c0808=1 & c2424=1) + | (TMode=1 & thv_c0808=1 & thv_c2828=1)) + & Dd & Dm & vcvt_59_fbits_built + { Dd = FPToFixed(Dm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_ZERO)); } + +vcvt_59_64_dt: ".f32.s32" + is ((TMode=0 & c0808=0 & c2424=0) + | (TMode=1 & thv_c0808=0 & thv_c2828=0)) + & Qd & Qm & vcvt_59_fbits_built + { Qd = FixedToFP(Qm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } +vcvt_59_64_dt: ".f32.u32" + is ((TMode=0 & c0808=0 & c2424=1) + | (TMode=1 & thv_c0808=0 & thv_c2828=1)) + & Qd & Qm & vcvt_59_fbits_built + { Qd = FixedToFP(Qm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } +vcvt_59_64_dt: ".s32.f32" + is ((TMode=0 & c0808=1 & c2424=0) + | (TMode=1 & thv_c0808=1 & thv_c2828=0)) + & Qd & Qm & vcvt_59_fbits_built + { Qd = FPToFixed(Qm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_ZERO)); } +vcvt_59_64_dt: ".u32.f32" + is ((TMode=0 & c0808=1 & c2424=1) + | (TMode=1 & thv_c0808=1 & thv_c2828=1)) + & Qd & Qm & vcvt_59_fbits_built + { Qd = FPToFixed(Qm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_ZERO)); } + +# Should add rounding here, if dt2 is s32 or u32 then rounding is +# FPRounding_ZERO otherwise FPROunding_TIEEVEN + +# F6.1.59 p3361 A1 Q = 0 (c0606) +:vcvt^vcvt_59_32_dt Dd,Dm,vcvt_59_fbits + is ((TMode=0 & c2831=0b1111 & c2527=0b001 & c2323=1 & c2121=1 & c0911=0b111 & c0707=0 & c0404=1 & c0606=0) + | (TMode=1 & thv_c2931=0b111 & thv_c2327=0b11111 & thv_c2121=1 & thv_c0911=0b111 & thv_c0707=0 & thv_c0404=1 & thv_c0606=0)) + & vcvt_59_32_dt & vcvt_59_fbits & Dd & Dm + { } + +# F6.1.59 p3361 A1 Q = 1 (c0606) +:vcvt^vcvt_59_64_dt Qd,Qm,vcvt_59_fbits + is ((TMode=0 & c2831=0b1111 & c2527=0b001 & c2323=1 & c2121=1 & c0911=0b111 & c0707=0 & c0404=1 & c0606=1) + | (TMode=1 & thv_c2931=0b111 & thv_c2327=0b11111 & thv_c2121=1 & thv_c0911=0b111 & thv_c0707=0 & thv_c0404=1 & thv_c0606=1)) + & vcvt_59_64_dt & vcvt_59_fbits & Qd & Qm + { } + +vcvt_60_fbits_built: fbits is TMode=0 & c0707=0 & c0505 & c0003 [fbits = 16 - ( c0003 * 2 + c0505); ] { export * [const]:1 fbits; } +vcvt_60_fbits_built: fbits is TMode=1 & thv_c0707=0 & thv_c0505 & thv_c0003 [fbits = 16 - (thv_c0003 * 2 + thv_c0505); ] { export * [const]:1 fbits; } +vcvt_60_fbits_built: fbits is TMode=0 & c0707=1 & c0505 & c0003 [fbits = 32 - ( c0003 * 2 + c0505); ] { export * [const]:1 fbits; } +vcvt_60_fbits_built: fbits is TMode=1 & thv_c0707=1 & thv_c0505 & thv_c0003 [fbits = 32 - (thv_c0003 * 2 + thv_c0505); ] { export * [const]:1 fbits; } +vcvt_60_fbits: "#"^fbits is TMode=0 & c0707=0 & c0505 & c0003 [fbits = 16 - ( c0003 * 2 + c0505); ] { } +vcvt_60_fbits: "#"^fbits is TMode=1 & thv_c0707=0 & thv_c0505 & thv_c0003 [fbits = 16 - (thv_c0003 * 2 + thv_c0505); ] { } +vcvt_60_fbits: "#"^fbits is TMode=0 & c0707=1 & c0505 & c0003 [fbits = 32 - ( c0003 * 2 + c0505); ] { } +vcvt_60_fbits: "#"^fbits is TMode=1 & thv_c0707=1 & thv_c0505 & thv_c0003 [fbits = 32 - (thv_c0003 * 2 + thv_c0505); ] { } + +vcvt_60_32_dt: ".f32.s16" + is ((TMode=0 & c1818=0 & c1616=0 & c0809=0b10 & c0707=0) + | (TMode=1 & thv_c1818=0 & thv_c1616=0 & thv_c0809=0b10 & thv_c0707=0)) + & Sd & Sd2 & vcvt_60_fbits_built + { Sd = FixedToFP(Sd2, 16:1, 32:1, vcvt_60_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } +vcvt_60_32_dt: ".f32.s32" + is ((TMode=0 & c1818=0 & c1616=0 & c0809=0b10 & c0707=1) + | (TMode=1 & thv_c1818=0 & thv_c1616=0 & thv_c0809=0b10 & thv_c0707=1)) + & Sd & Sd2 & vcvt_60_fbits_built + { Sd = FixedToFP(Sd2, 32:1, 32:1, vcvt_60_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } +vcvt_60_32_dt: ".f32.u16" + is ((TMode=0 & c1818=0 & c1616=1 & c0809=0b10 & c0707=0) + | (TMode=1 & thv_c1818=0 & thv_c1616=1 & thv_c0809=0b10 & thv_c0707=0)) + & Sd & Sd2 & vcvt_60_fbits_built + { Sd = FixedToFP(Sd2, 16:1, 32:1, vcvt_60_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } +vcvt_60_32_dt: ".f32.u32" + is ((TMode=0 & c1818=0 & c1616=1 & c0809=0b10 & c0707=1) + | (TMode=1 & thv_c1818=0 & thv_c1616=1 & thv_c0809=0b10 & thv_c0707=1)) + & Sd & Sd2 & vcvt_60_fbits_built + { Sd = FixedToFP(Sd2, 32:1, 32:1, vcvt_60_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } +vcvt_60_32_dt: ".s16.f32" + is ((TMode=0 & c1818=1 & c1616=0 & c0809=0b10 & c0707=0) + | (TMode=1 & thv_c1818=1 & thv_c1616=0 & thv_c0809=0b10 & thv_c0707=0)) + & Sd & Sd2 & vcvt_60_fbits_built + { Sd = FPToFixed(Sd2, 32:1, 16:1, vcvt_60_fbits_built, 0:1, $(FPRounding_ZERO)); } +vcvt_60_32_dt: ".s32.f32" + is ((TMode=0 & c1818=1 & c1616=0 & c0809=0b10 & c0707=1) + | (TMode=1 & thv_c1818=1 & thv_c1616=0 & thv_c0809=0b10 & thv_c0707=1)) + & Sd & Sd2 & vcvt_60_fbits_built + { Sd = FPToFixed(Sd2, 32:1, 32:1, vcvt_60_fbits_built, 0:1, $(FPRounding_ZERO)); } +vcvt_60_32_dt: ".u16.f32" + is ((TMode=0 & c1818=1 & c1616=1 & c0809=0b10 & c0707=0) + | (TMode=1 & thv_c1818=1 & thv_c1616=1 & thv_c0809=0b10 & thv_c0707=0)) + & Sd & Sd2 & vcvt_60_fbits_built + { Sd = FPToFixed(Sd2, 32:1, 16:1, vcvt_60_fbits_built, 1:1, $(FPRounding_ZERO)); } +vcvt_60_32_dt: ".u32.f32" + is ((TMode=0 & c1818=1 & c1616=1 & c0809=0b10 & c0707=1) + | (TMode=1 & thv_c1818=1 & thv_c1616=1 & thv_c0809=0b10 & thv_c0707=1)) + & Sd & Sd2 & vcvt_60_fbits_built + { Sd = FPToFixed(Sd2, 32:1, 32:1, vcvt_60_fbits_built, 1:1, $(FPRounding_ZERO)); } + +vcvt_60_64_dt: ".f64.s16" + is ((TMode=0 & c1818=0 & c1616=0 & c0809=0b11 & c0707=0) + | (TMode=1 & thv_c1818=0 & thv_c1616=0 & thv_c0809=0b11 & thv_c0707=0)) + & Dd & Dd2 & vcvt_60_fbits_built + { Dd = FixedToFP(Dd2, 16:1, 64:1, vcvt_60_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } +vcvt_60_64_dt: ".f64.s32" + is ((TMode=0 & c1818=0 & c1616=0 & c0809=0b11 & c0707=1) + | (TMode=1 & thv_c1818=0 & thv_c1616=0 & thv_c0809=0b11 & thv_c0707=1)) + & Dd & Dd2 & vcvt_60_fbits_built + { Dd = FixedToFP(Dd2, 32:1, 64:1, vcvt_60_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } +vcvt_60_64_dt: ".f64.u16" + is ((TMode=0 & c1818=0 & c1616=1 & c0809=0b11 & c0707=0) + | (TMode=1 & thv_c1818=0 & thv_c1616=1 & thv_c0809=0b11 & thv_c0707=0)) + & Dd & Dd2 & vcvt_60_fbits_built + { Dd = FixedToFP(Dd2, 16:1, 64:1, vcvt_60_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } +vcvt_60_64_dt: ".f64.u32" + is ((TMode=0 & c1818=0 & c1616=1 & c0809=0b11 & c0707=1) + | (TMode=1 & thv_c1818=0 & thv_c1616=1 & thv_c0809=0b11 & thv_c0707=1)) + & Dd & Dd2 & vcvt_60_fbits_built + { Dd = FixedToFP(Dd2, 32:1, 64:1, vcvt_60_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } +vcvt_60_64_dt: ".s16.f64" + is ((TMode=0 & c1818=1 & c1616=0 & c0809=0b11 & c0707=0) + | (TMode=1 & thv_c1818=1 & thv_c1616=0 & thv_c0809=0b11 & thv_c0707=0)) + & Dd & Dd2 & vcvt_60_fbits_built + { Dd = FPToFixed(Dd2, 64:1, 16:1, vcvt_60_fbits_built, 0:1, $(FPRounding_ZERO)); } +vcvt_60_64_dt: ".s32.f64" + is ((TMode=0 & c1818=1 & c1616=0 & c0809=0b11 & c0707=1) + | (TMode=1 & thv_c1818=1 & thv_c1616=0 & thv_c0809=0b11 & thv_c0707=1)) + & Dd & Dd2 & vcvt_60_fbits_built + { Dd = FPToFixed(Dd2, 64:1, 32:1, vcvt_60_fbits_built, 0:1, $(FPRounding_ZERO)); } +vcvt_60_64_dt: ".u16.f64" + is ((TMode=0 & c1818=1 & c1616=1 & c0809=0b11 & c0707=0) + | (TMode=1 & thv_c1818=1 & thv_c1616=1 & thv_c0809=0b11 & thv_c0707=0)) + & Dd & Dd2 & vcvt_60_fbits_built + { Dd = FPToFixed(Dd2, 64:1, 16:1, vcvt_60_fbits_built, 1:1, $(FPRounding_ZERO)); } +vcvt_60_64_dt: ".u32.f64" + is ((TMode=0 & c1818=1 & c1616=1 & c0809=0b11 & c0707=1) + | (TMode=1 & thv_c1818=1 & thv_c1616=1 & thv_c0809=0b11 & thv_c0707=1)) + & Dd & Dd2 & vcvt_60_fbits_built + { Dd = FPToFixed(Dd2, 64:1, 32:1, vcvt_60_fbits_built, 1:1, $(FPRounding_ZERO)); } + +# F6.1.60 p3364 A1 op=0/1 sf=10 (c1818, c0809) +:vcvt^COND^vcvt_60_32_dt Sd,Sd2,vcvt_60_fbits + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1717=1 & c1011=0b10 & c0606=1 & c0404=0 & c1818 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1717=1 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c1818 & thv_c0809=0b10)) + & COND & vcvt_60_fbits & vcvt_60_32_dt & Sd & Sd2 + { build COND; build vcvt_60_32_dt; } + +# F6.1.60 p3364 A1 op=0/1 sf=11 (c1818, c0809) +:vcvt^COND^vcvt_60_64_dt Dd,Dd2,vcvt_60_fbits + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1717=1 & c1011=0b10 & c0606=1 & c0404=0 & c1818 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1717=1 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c1818 & thv_c0809=0b11)) + & COND & vcvt_60_fbits & vcvt_60_64_dt & Dd & Dd2 + { build COND; build vcvt_60_64_dt; } + +# vcvta, vcvtm, vcvtn, and vcvtp + +vcvt_amnp_simd_RM: "a" + is ((TMode=0 & c0809=0b00) + | (TMode=1 & thv_c0809=0b00)) + { export $(FPRounding_TIEAWAY); } +vcvt_amnp_simd_RM: "n" + is ((TMode=0 & c0809=0b01) + | (TMode=1 & thv_c0809=0b01)) + { export $(FPRounding_TIEEVEN); } +vcvt_amnp_simd_RM: "p" + is ((TMode=0 & c0809=0b10) + | (TMode=1 & thv_c0809=0b10)) + { export $(FPRounding_POSINF); } +vcvt_amnp_simd_RM: "m" + is ((TMode=0 & c0809=0b11) + | (TMode=1 & thv_c0809=0b11)) + { export $(FPRounding_NEGINF); } + +# These RM values need to be converted properly +vcvt_amnp_simd_64_dt: ".s32" is TMode=0 & c0707=0 & c0809 & vcvt_amnp_simd_RM & Dd & Dm { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_simd_RM); } +vcvt_amnp_simd_64_dt: ".s32" is TMode=1 & thv_c0707=0 & thv_c0809 & vcvt_amnp_simd_RM & Dd & Dm { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_simd_RM); } +vcvt_amnp_simd_64_dt: ".u32" is TMode=0 & c0707=1 & c0809 & vcvt_amnp_simd_RM & Dd & Dm { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_simd_RM); } +vcvt_amnp_simd_64_dt: ".u32" is TMode=1 & thv_c0707=1 & thv_c0809 & vcvt_amnp_simd_RM & Dd & Dm { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_simd_RM); } + +vcvt_amnp_simd_128_dt: ".s32" is TMode=0 & c0707=0 & c0809 & vcvt_amnp_simd_RM & Qd & Qm { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_simd_RM); } +vcvt_amnp_simd_128_dt: ".s32" is TMode=1 & thv_c0707=0 & thv_c0809 & vcvt_amnp_simd_RM & Qd & Qm { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_simd_RM); } +vcvt_amnp_simd_128_dt: ".u32" is TMode=0 & c0707=1 & c0809 & vcvt_amnp_simd_RM & Qd & Qm { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_simd_RM); } +vcvt_amnp_simd_128_dt: ".u32" is TMode=1 & thv_c0707=1 & thv_c0809 & vcvt_amnp_simd_RM & Qd & Qm { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_simd_RM); } + +# F6.1.61,64,66,68 p3367,3374,3378,3384 A1 64-bit SIMD vector variant Q = 0 (c0606) +:vcvt^vcvt_amnp_simd_RM^vcvt_amnp_simd_64_dt^".f32" Dd,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c1011=0b00 & c0404=0 & c0606=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c1011=0b00 & thv_c0404=0 & thv_c0606=0)) + & vcvt_amnp_simd_RM & vcvt_amnp_simd_64_dt & Dd & Dm + { } + +# F6.1.61,64,66,68 p3367,3374,3378,3384 A1 128-bit SIMD vector variant Q = 1(c0606) +:vcvt^vcvt_amnp_simd_RM^vcvt_amnp_simd_128_dt^".f32" Qd,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c1011=0b00 & c0404=0 & c0606=1) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c1011=0b00 & thv_c0404=0 & thv_c0606=1)) + & vcvt_amnp_simd_RM & vcvt_amnp_simd_128_dt & Qd & Qm + { } + +vcvt_amnp_fp_RM: "a" + is ((TMode=0 & c1617=0b00) + | (TMode=1 & thv_c1617=0b00)) + { export $(FPRounding_TIEAWAY); } +vcvt_amnp_fp_RM: "n" + is ((TMode=0 & c1617=0b01) + | (TMode=1 & thv_c1617=0b01)) + { export $(FPRounding_TIEEVEN); } +vcvt_amnp_fp_RM: "p" + is ((TMode=0 & c1617=0b10) + | (TMode=1 & thv_c1617=0b10)) + { export $(FPRounding_POSINF); } +vcvt_amnp_fp_RM: "m" + is ((TMode=0 & c1617=0b11) + | (TMode=1 & thv_c1617=0b11)) + { export $(FPRounding_NEGINF); } + +vcvt_amnp_fp_s_dt: ".u32" is TMode=0 & c0707=0 & c1617 & vcvt_amnp_fp_RM & Sd & Sm { Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_fp_RM); } +vcvt_amnp_fp_s_dt: ".u32" is TMode=1 & thv_c0707=0 & thv_c1617 & vcvt_amnp_fp_RM & Sd & Sm { Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_fp_RM); } +vcvt_amnp_fp_s_dt: ".s32" is TMode=0 & c0707=1 & c1617 & vcvt_amnp_fp_RM & Sd & Sm { Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_fp_RM); } +vcvt_amnp_fp_s_dt: ".s32" is TMode=1 & thv_c0707=1 & thv_c1617 & vcvt_amnp_fp_RM & Sd & Sm { Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_fp_RM); } + +vcvt_amnp_fp_d_dt: ".u32" is TMode=0 & c0707=0 & c1617 & vcvt_amnp_fp_RM & Sd & Dm { Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 1:1, vcvt_amnp_fp_RM); } +vcvt_amnp_fp_d_dt: ".u32" is TMode=1 & thv_c0707=0 & thv_c1617 & vcvt_amnp_fp_RM & Sd & Dm { Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 1:1, vcvt_amnp_fp_RM); } +vcvt_amnp_fp_d_dt: ".s32" is TMode=0 & c0707=1 & c1617 & vcvt_amnp_fp_RM & Sd & Dm { Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 0:1, vcvt_amnp_fp_RM); } +vcvt_amnp_fp_d_dt: ".s32" is TMode=1 & thv_c0707=1 & thv_c1617 & vcvt_amnp_fp_RM & Sd & Dm { Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 0:1, vcvt_amnp_fp_RM); } + +# F6.1.62,65,67,69 p3369,3376,3380,3384 Single-precision scalar variant size = 11 (c0809) +:vcvt^vcvt_amnp_fp_RM^vcvt_amnp_fp_s_dt^".f32" Sd,Sm + is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b11 & c1819=0b11 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b11 & thv_c1819=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b10)) + & vcvt_amnp_fp_RM & vcvt_amnp_fp_s_dt & Sd & Sm + { } + +# F6.1.62,65,67,69 p3369,3376,3380,3384 Double-precision scalar variant size = 10 (c0809) +:vcvt^vcvt_amnp_fp_RM^vcvt_amnp_fp_d_dt^".f64" Sd,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b11 & c1819=0b11 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b11 & thv_c1819=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b11)) + & vcvt_amnp_fp_RM & vcvt_amnp_fp_d_dt & Sd & Dm + { } + +# vcvtb and vcvtt + +vcvt_bt3216_op: "b" + is ((TMode=0 & c0707=0) + | (TMode=1 & thv_c0707=0)) + & Sd & Sm + { Sd = float2float(Sm:2); } +vcvt_bt3216_op: "t" + is ((TMode=0 & c0707=1) + | (TMode=1 & thv_c0707=1)) + & Sd & Sm + { w:2 = Sm(2); Sd = float2float(w); } + +vcvt_bt6416_op: "b" + is ((TMode=0 & c0707=0) + | (TMode=1 & thv_c0707=0)) + & Dd & Sm + { Dd = float2float(Sm:2); } +vcvt_bt6416_op: "t" + is ((TMode=0 & c0707=1) + | (TMode=1 & thv_c0707=1)) + & Dd & Sm + { w:2 = Sm(2); Dd = float2float(w); } + +vcvt_bt1632_op: "b" + is ((TMode=0 & c0707=0) + | (TMode=1 & thv_c0707=0)) + & Sd & Sm + { Sd[0,16] = float2float(Sm); } +vcvt_bt1632_op: "t" + is ((TMode=0 & c0707=1) + | (TMode=1 & thv_c0707=1)) + & Sd & Sm + { tmp:2 = float2float(Sm); Sd = (zext(tmp)<<16) | zext(Sd[0,16]); } + +vcvt_bt1664_op: "b" + is ((TMode=0 & c0707=0) + | (TMode=1 & thv_c0707=0)) + & Sd & Dm + { Sd[0,16] = float2float(Dm); } +vcvt_bt1664_op: "t" + is ((TMode=0 & c0707=1) + | (TMode=1 & thv_c0707=1)) + & Sd & Dm + { tmp:2 = float2float(Dm); Sd = (zext(tmp)<<16) | zext(Sd[0,16]); } + +# F6.1.63 p3371 A1 cases op:sz = 00 (c1616, c0808) +# F6.1.71 p3389 A1 cases op:sz = 00 (c1616, c0808) +:vcvt^vcvt_bt3216_op^COND^".f32.f16" Sd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11001 & c0911=0b101 & c0606=1 & c0404=0 & c1616=0 & c0808=0) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11001 & thv_c0911=0b101 & thv_c0606=1 & thv_c0404=0 & thv_c1616=0 & thv_c0808=0)) + & COND & vcvt_bt3216_op & Sd & Sm + { build COND; build vcvt_bt3216_op; } + +# F6.1.63 p3371 A1 cases op:sz = 01 (c1616, c0808) +# F6.1.71 p3389 A1 cases op:sz = 01 (c1616, c0808) +:vcvt^vcvt_bt6416_op^COND^".f64.f16" Dd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11001 & c0911=0b101 & c0606=1 & c0404=0 & c1616=0 & c0808=1) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11001 & thv_c0911=0b101 & thv_c0606=1 & thv_c0404=0 & thv_c1616=0 & thv_c0808=1)) + & COND & vcvt_bt6416_op & Dd & Sm + { build COND; build vcvt_bt6416_op; } + +# F6.1.63 p3371 A1 cases op:sz = 10 (c1616, c0808) +# F6.1.71 p3389 A1 cases op:sz = 10 (c1616, c0808) +:vcvt^vcvt_bt1632_op^COND^".f16.f32" Sd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11001 & c0911=0b101 & c0606=1 & c0404=0 & c1616=1 & c0808=0) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11001 & thv_c0911=0b101 & thv_c0606=1 & thv_c0404=0 & thv_c1616=1 & thv_c0808=0)) + & COND & vcvt_bt1632_op & Sd & Sm + { build COND; build vcvt_bt1632_op; } + +# F6.1.63 p3371 A1 cases op:sz = 11 (c1616, c0808) +# F6.1.71 p3389 A1 cases op:sz = 11 (c1616, c0808) +:vcvt^vcvt_bt1664_op^COND^".f16.f64" Sd,Dm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11001 & c0911=0b101 & c0606=1 & c0404=0 & c1616=1 & c0808=1) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11001 & thv_c0911=0b101 & thv_c0606=1 & thv_c0404=0 & thv_c1616=1 & thv_c0808=1)) + & COND & vcvt_bt1664_op & Sd & Dm + { build COND; build vcvt_bt1664_op; } + +# vcvtr + +# F6.1.70 p3386 A1 case opc2=100 size=10 (c1618, c0809) +:vcvtr^COND^".u32.f32" Sd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b01 & c0404=0 & c1618=0b100 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c1618=0b100 & thv_c0809=0b10)) + & COND & Sd & Sm + { build COND; Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 1:1, $(FPSCR_RMODE)); } + +# F6.1.70 p3386 A1 case opc2=101 size=10 +:vcvtr^COND^".s32.f32" Sd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b01 & c0404=0 & c1618=0b101 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c1618=0b101 & thv_c0809=0b10)) + & COND & Sd & Sm + { build COND; Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 0:1, $(FPSCR_RMODE)); } + +# F6.1.70 p3386 A1 case opc2=100 size=11 +:vcvtr^COND^".u32.f64" Sd,Dm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b01 & c0404=0 & c1618=0b100 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c1618=0b100 & thv_c0809=0b11)) + & COND & Sd & Dm + { build COND; Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 1:1, $(FPSCR_RMODE)); } + +# F6.1.70 p3386 A1 case opc2=101 size=11 +:vcvtr^COND^".s32.f64" Sd,Dm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b01 & c0404=0 & c1618=0b101 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c1618=0b101 & thv_c0809=0b11)) + & COND & Sd & Dm + { build COND; Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 0:1, $(FPSCR_RMODE)); } + +####### +# VMAXNM/VMINNM + + +# FPMaxNum(Vn, Vm) +# Return the maximum of two floating point numbers. +# Includes FP and SIMD variants of all lane sizes. + +define pcodeop FPMaxNum; + +# FPMinNum(Vn, Vm) +# Return the minimum of two floating point numbers. +# Includes FP and SIMD variants of all lane sizes. + +define pcodeop FPMinNum; + +# F6.1.101 p3471 A1/T1 Q = 0 (c0606) +:vmaxnm^".f32" Dd,Dn,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1111 & c0404=1 & c0606=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) + & Dd & Dn & Dm + { Dd = FPMaxNum(Dn, Dm); } + +# F6.1.101 p3471 A1/T1 Q = 1 (c0606) +:vmaxnm^".f32" Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1111 & c0404=1 & c0606=1) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) + & Qd & Qn & Qm + { Qd = FPMaxNum(Qn, Qm); } + +# F6.1.101 p3471 A1/T1 Q = 0 (c0606) +:vmaxnm^".f16" Dd,Dn,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1111 & c0404=1 & c0606=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) + & Dd & Dn & Dm + { Dd = FPMaxNum(Dn, Dm); } + +# F6.1.101 p3471 A1/T1 Q = 1 (c0606) +:vmaxnm^".f16" Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1111 & c0404=1 & c0606=1) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) + & Qd & Qn & Qm + { Qd = FPMaxNum(Qn, Qm); } + +# F6.1.101 p3471 A2/T2 size = 01 (c0809) +:vmaxnm^".f16" Sd,Sn,Sm + is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b01) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b01)) + & Sd & Sn & Sm + { Sd = FPMaxNum(Sn, Sm); } + +# F6.1.101 p3471 A2/T2 size = 10 (c0809) +:vmaxnm^".f32" Sd,Sn,Sm + is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b10)) + & Sd & Sn & Sm + { Sd = FPMaxNum(Sn, Sm); } + +# F6.1.101 p3471 A2/T2 size = 11 (c0809) +:vmaxnm^".f64" Dd,Dn,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b11)) + & Dd & Dn & Dm + { Dd = FPMaxNum(Dn, Dm); } + +# F6.1.104 p3478 A1/T1 Q = 0 (c0606) +:vminnm^".f32" Dd,Dn,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1111 & c0404=1 & c0606=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) + & Dd & Dn & Dm + { Dd = FPMinNum(Dn, Dm); } + +# F6.1.104 p3478 A1/T1 Q = 1 (c0606) +:vminnm^".f32" Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1111 & c0404=1 & c0606=1) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) + & Qd & Qn & Qm + { Qd = FPMinNum(Qn, Qm); } + +# F6.1.104 p3478 A1/T1 Q = 0 (c0606) +:vminnm^".f16" Dd,Dn,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b11 & c0811=0b1111 & c0404=1 & c0606=0) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b11 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) + & Dd & Dn & Dm + { Dd = FPMinNum(Dn, Dm); } + +# F6.1.104 p3478 A1/T1 Q = 1 (c0606) +:vminnm^".f16" Qd,Qn,Qm + is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b11 & c0811=0b1111 & c0404=1 & c0606=1) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b11 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) + & Qd & Qn & Qm + { Qd = FPMinNum(Qn, Qm); } + +# F6.1.104 p3478 A2/T2 size = 01 (c0809) +:vminnm^".f16" Sd,Sn,Sm + is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b01) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b01)) + & Sd & Sn & Sm + { Sd = FPMinNum(Sn, Sm); } + +# F6.1.104 p3478 A2/T2 size = 10 (c0809) +:vminnm^".f32" Sd,Sn,Sm + is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b10)) + & Sd & Sn & Sm + { Sd = FPMinNum(Sn, Sm); } + +# F6.1.104 p3478 A2/T2 size = 11 (c0809) +:vminnm^".f64" Dd,Dn,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b11)) + & Dd & Dn & Dm + { Dd = FPMinNum(Dn, Dm); } + +####### +# VMULL instructions vector/polynomial multiplication + +vmull_dt: ".s8" + is ((TMode=0 & c0909=0 & c2424=0 & c2021=0b00) + | (TMode=1 & thv_c0909=0 & thv_c2828=0 & thv_c2021=0b00)) + { } + +vmull_dt: ".s16" + is ((TMode=0 & c0909=0 & c2424=0 & c2021=0b01) + | (TMode=1 & thv_c0909=0 & thv_c2828=0 & thv_c2021=0b01)) + { } + +vmull_dt: ".s32" + is ((TMode=0 & c0909=0 & c2424=0 & c2021=0b10) + | (TMode=1 & thv_c0909=0 & thv_c2828=0 & thv_c2021=0b10)) + { } + +vmull_dt: ".u8" + is ((TMode=0 & c0909=0 & c2424=1 & c2021=0b00) + | (TMode=1 & thv_c0909=0 & thv_c2828=1 & thv_c2021=0b00)) + { } + +vmull_dt: ".u16" + is ((TMode=0 & c0909=0 & c2424=1 & c2021=0b01) + | (TMode=1 & thv_c0909=0 & thv_c2828=1 & thv_c2021=0b01)) + { } + +vmull_dt: ".u32" + is ((TMode=0 & c0909=0 & c2424=1 & c2021=0b10) + | (TMode=1 & thv_c0909=0 & thv_c2828=1 & thv_c2021=0b10)) + { } + +vmull_dt: ".p8" + is ((TMode=0 & c0909=1 & c2424=0 & c2021=0b00) + | (TMode=1 & thv_c0909=1 & thv_c2828=0 & thv_c2021=0b00)) + { } + +vmull_dt: ".p64" + is ((TMode=0 & c0909=1 & c2424=0 & c2021=0b10) + | (TMode=1 & thv_c0909=1 & thv_c2828=0 & thv_c2021=0b10)) + { } + +# F6.1.130 p3537 VMULL (-integer and +polynomial) op=1 (c0909) (with condition U!=1 and size!=0b11 and size!=01) +:vmull^vmull_dt Qd,Dn,Dm + is ((TMode=0 & c2531=0b1111001 & c2424=0 & c2323=1 & ( c2121 & c2020=0) & c1011=0b11 & c0808=0 & c0606=0 & c0404=0 & c0909=1) + | (TMode=1 & thv_c2931=0b111 & thv_c2828=0 & thv_c2327=0b11111 & (thv_c2121 & thv_c2020=0) & thv_c1011=0b11 & thv_c0808=0 & thv_c0606=0 & thv_c0404=0 & thv_c0909=1)) + & vmull_dt & Qd & Dn & Dm + { Qd = PolynomialMult(Dn, Dm); } + +# F6.1.130 p3537 VMULL (+integer and -polynomial) op=0 (c0909) (with condition size!=0b11) +:vmull^vmull_dt Qd,Dn,Dm + is ((TMode=0 & c2531=0b1111001 & c2424 & c2323=1 & ( c2121=0 | c2020=0) & c1011=0b11 & c0808=0 & c0606=0 & c0404=0 & c0909=0) + | (TMode=1 & thv_c2931=0b111 & thv_c2828 & thv_c2327=0b11111 & (thv_c2121=0 | thv_c2020=0) & thv_c1011=0b11 & thv_c0808=0 & thv_c0606=0 & thv_c0404=0 & thv_c0909=0)) + & vmull_dt & Qd & Dn & Dm + { Qd = VectorMultiply(Dn, Dm); } + +####### +# The VRINT instructions round a "floating-point to an integral +# floating point value of the same size", i.e. trunc. +# The arguments are +# 1: floating point value (can be 2 packed in a Q register) +# 2: rounding mode +# 3: boolean exact, if true then raise the Inexact exception if the +# result differs from the original + +vrint_simd_RM: "a" + is ((TMode=0 & c0709=0b010) + | (TMode=1 & thv_c0709=0b010)) + { export $(FPRounding_TIEAWAY); } + +vrint_simd_RM: "m" + is ((TMode=0 & c0709=0b101) + | (TMode=1 & thv_c0709=0b101)) + { export $(FPRounding_NEGINF); } + +vrint_simd_RM: "n" + is ((TMode=0 & c0709=0b000) + | (TMode=1 & thv_c0709=0b000)) + { export $(FPRounding_TIEEVEN); } + +vrint_simd_RM: "p" + is ((TMode=0 & c0709=0b111) + | (TMode=1 & thv_c0709=0b111)) + { export $(FPRounding_POSINF); } + +vrint_simd_RM: "x" + is ((TMode=0 & c0709=0b001) + | (TMode=1 & thv_c0709=0b001)) + { export $(FPRounding_TIEEVEN); } + +vrint_simd_RM: "z" + is ((TMode=0 & c0709=0b011) + | (TMode=1 & thv_c0709=0b011)) + { export $(FPRounding_ZERO); } + +# For vrintx, the exact flag is 1, and the IXF flag is set (inexact) + +vrint_simd_exact: "x" + is ((TMode=0 & c0709=0b001) + | (TMode=1 & thv_c0709=0b001)) + { export 1:1; } + +vrint_simd_exact: + is ((TMode=0 & ( c0707=1 | c0808=1 | c0909=0)) + | (TMode=1 & ( thv_c0707=1 | thv_c0808=1 | thv_c0909=0))) + { export 0:1; } + +vrint_simd_ixf: + is ((TMode=0 & c0709=0b001) + | (TMode=1 & thv_c0709=0b001)) + { $(FPEXC_IXF) = FPConvertInexact(); } + +vrint_simd_ixf: + is ((TMode=0 & ( c0707=1 | c0808=1 | c0909=0)) + | (TMode=1 & ( thv_c0707=1 | thv_c0808=1 | thv_c0909=0))) + { } + + +# F6.1.178,180,182,184,187,189 p3646,3650,3654,3658,3664,3668 Q = 0 (c0606) +:vrint^vrint_simd_RM^".f32" Dd,Dm + is ((TMode=0 & c2331=0b111100111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c1011=0b01 & (( c0707=0 & c0909=0) | ( c0707=1 & c0909=1) | ( c0707=1 & c0909=0)) & c0404=0 & c0606=0) + | (TMode=1 & thv_c2331=0b111111111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b10 & thv_c1011=0b01 & ((thv_c0707=0 & thv_c0909=0) | (thv_c0707=1 & thv_c0909=1) | (thv_c0707=1 & thv_c0909=0)) & thv_c0404=0 & thv_c0606=0)) + & vrint_simd_RM & vrint_simd_exact & vrint_simd_ixf & Dd & Dm + { Dd = FPRoundInt(Dm, 32:1, vrint_simd_RM, 0:1); build vrint_simd_ixf; } + +# F6.1.178,180,182,184,187,189 p3646,3650,3654,3658,3664,3668 Q = 1 (c0606) +:vrint^vrint_simd_RM^".f32" Qd,Qm + is ((TMode=0 & c2331=0b111100111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c1011=0b01 & c0404=0 & c0606=1) + | (TMode=1 & thv_c2331=0b111111111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b10 & thv_c1011=0b01 & thv_c0404=0 & thv_c0606=1)) + & vrint_simd_RM & vrint_simd_exact & vrint_simd_ixf & Qd & Qm + { Qd = FPRoundInt(Qm, 32:1, vrint_simd_RM, 0:1); build vrint_simd_ixf; } + +vrint_fp_RM: "a" + is ((TMode=0 & c1617=0b00) + | (TMode=1 & thv_c1617=0b00)) + { export $(FPRounding_TIEAWAY); } + +vrint_fp_RM: "m" + is ((TMode=0 & c1617=0b11) + | (TMode=1 & thv_c1617=0b11)) + { export $(FPRounding_NEGINF); } + +vrint_fp_RM: "n" + is ((TMode=0 & c1617=0b01) + | (TMode=1 & thv_c1617=0b01)) + { export $(FPRounding_TIEEVEN); } + +vrint_fp_RM: "p" + is ((TMode=0 & c1617=0b10) + | (TMode=1 & thv_c1617=0b10)) + { export $(FPRounding_POSINF); } + +# F6.1.179,181,183,185 p3648,3652,3656,3660 size = 10 (c0809) +:vrint^vrint_fp_RM^".f32" Sd,Sm + is ((TMode=0 & c2331=0b111111101 & c1821=0b1110 & c1011=0b10 & c0607=0b01 & c0404=0 & c0809=0b10) + | (TMode=1 & thv_c2331=0b111111101 & thv_c1821=0b1110 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c0809=0b10)) + & vrint_fp_RM & Sd & Sm + { Sd = FPRoundInt(Sm, 32:1, vrint_fp_RM, 0:1); } + +# F6.1.179,181,183,185 p3648,3652,3656,3660 size = 11 (c0809) +:vrint^vrint_fp_RM^".f64" Dd,Dm + is ((TMode=0 & c2331=0b111111101 & c1821=0b1110 & c1011=0b10 & c0607=0b01 & c0404=0 & c0809=0b11) + | (TMode=1 & thv_c2331=0b111111101 & thv_c1821=0b1110 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c0809=0b11)) + & vrint_fp_RM & Dd & Dm + { Dd = FPRoundInt(Dm, 32:1, vrint_fp_RM, 0:1); } + +vrint_rxz_RM: "r" + is ((TMode=0 & c1616=0 & c0707=0) + | (TMode=1 & thv_c1616=0 & thv_c0707=0)) + { tmp:1 = $(FPSCR_RMODE); export tmp; } + +vrint_rxz_RM: "x" + is ((TMode=0 & c1616=1 & c0707=0) + | (TMode=1 & thv_c1616=1 & thv_c0707=0)) + { tmp:1 = $(FPSCR_RMODE); export tmp; } + +vrint_rxz_RM: "z" + is ((TMode=0 & c1616=0 & c0707=1) + | (TMode=1 & thv_c1616=0 & thv_c0707=1)) + { export $(FPRounding_ZERO); } + +# For vrintx, the exact flag is 1, and the IXF flag is set (inexact) + +vrint_rxz_exact: "x" + is ((TMode=0 & c1616=1 & c0707=0) + | (TMode=1 & thv_c1616=1 & thv_c0707=0)) + { export 1:1; } + +vrint_rxz_exact: + is ((TMode=0 & ( c1616=0 | c0707=1)) + | (TMode=1 & (thv_c1616=0 | thv_c0707=1))) + { export 0:1; } + +vrint_rxz_ixf: + is ((TMode=0 & c1616=1 & c0707=0) + | (TMode=1 & thv_c1616=1 & thv_c0707=0)) + { $(FPEXC_IXF) = FPConvertInexact(); } + +vrint_rxz_ixf: + is ((TMode=0 & ( c1616=0 | c0707=1)) + | (TMode=1 & (thv_c1616=0 | thv_c0707=1))) + { } + +# F6.1.186,188,190 p3662,3666,3670 A1 size = 10 (c0809) +:vrint^vrint_rxz_RM^COND^".f32" Sd,Sm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b110 & c1718=0b11 & c1011=0b10 & c0606=1 & c0404=0 & (( c1616=0) | ( c1616=1 & c0707=0)) & c0809=0b10) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b110 & thv_c1718=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & ((thv_c1616=0) | (thv_c1616=1 & thv_c0707=0)) & thv_c0809=0b10)) + & vrint_rxz_RM & vrint_rxz_exact & vrint_rxz_ixf & COND & Sd & Sm + { build COND; Sd = FPRoundInt(Sm, 32:1, vrint_rxz_RM, vrint_rxz_exact); build vrint_rxz_ixf; } + +# F6.1.186,188,190 p3662,3666,3670 A1 size = 11 (c0809) +:vrint^vrint_rxz_RM^COND^".f64" Dd,Dm + is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b110 & c1718=0b11 & c1011=0b10 & c0606=1 & c0404=0 & (( c1616=0) | ( c1616=1 & c0707=0)) & c0809=0b11) + | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b110 & thv_c1718=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & ((thv_c1616=0) | (thv_c1616=1 & thv_c0707=0)) & thv_c0809=0b11)) + & vrint_rxz_RM & vrint_rxz_exact & vrint_rxz_ixf & COND & Dd & Dm + { build COND; Dd = FPRoundInt(Dm, 32:1, vrint_rxz_RM, vrint_rxz_exact); build vrint_rxz_ixf; } + +####### +# VSEL + +vselcond: "eq" + is ((TMode=0 & c2021=0b00) + | (TMode=1 & thv_c2021=0b00)) + { tmp:1 = ZR; export tmp; } +vselcond: "ge" + is ((TMode=0 & c2021=0b10) + | (TMode=1 & thv_c2021=0b10)) + { tmp:1 = (NG==OV); export tmp; } +vselcond: "gt" + is ((TMode=0 & c2021=0b11) + | (TMode=1 & thv_c2021=0b11)) + { tmp:1 = (!ZR); export tmp; } +vselcond: "vs" + is ((TMode=0 & c2021=0b01) + | (TMode=1 & thv_c2021=0b01)) + { tmp:1 = OV; export tmp; } + +# F6.1.200 p3690 A1/T1 size = 11 doubleprec (c0809) +:vsel^vselcond^".f64" Dd,Dn,Dm + is ((TMode=0 & c2831=0b1111 & c2327=0b11100 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b11) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11100 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b11)) + & vselcond & Dn & Dd & Dm + { Dd = zext(vselcond != 0) * Dn + zext(vselcond == 0) * Dm; } + +# F6.1.200 p3690 A1/T1 size = 10 singleprec (c0809) +:vsel^vselcond".f32" Sd,Sn,Sm + is ((TMode=0 & c2831=0b1111 & c2327=0b11100 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b10) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11100 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b10)) + & vselcond & Sn & Sd & Sm + { Sd = zext(vselcond != 0) * Sn + zext(vselcond == 0) * Sm; } + +@endif # INCLUDE_NEON diff --git a/src/third-party/sleigh/processors/ARM/data/languages/old/ARMv5.lang b/src/third-party/sleigh/processors/ARM/data/languages/old/ARMv5.lang new file mode 100644 index 00000000..03ce0149 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/old/ARMv5.lang @@ -0,0 +1,62 @@ + + + + ARM:LE:32:DEPRECATED ARM V5 + ARM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/old/ARMv5.trans b/src/third-party/sleigh/processors/ARM/data/languages/old/ARMv5.trans new file mode 100644 index 00000000..9c8858d0 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/old/ARMv5.trans @@ -0,0 +1,6 @@ + + + Sleigh-ARMv5 + ARM:LE:32:v5 + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/old/THUMBv2.lang b/src/third-party/sleigh/processors/ARM/data/languages/old/THUMBv2.lang new file mode 100644 index 00000000..773cc611 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/old/THUMBv2.lang @@ -0,0 +1,55 @@ + + + + ARM:LE:32:DEPRECATED THUMB V2 + ARM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/languages/old/THUMBv2.trans b/src/third-party/sleigh/processors/ARM/data/languages/old/THUMBv2.trans new file mode 100644 index 00000000..781a37a9 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/languages/old/THUMBv2.trans @@ -0,0 +1,11 @@ + + + Sleigh-THUMBv2 + ARM:LE:32:v5t + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/manuals/ARM.idx b/src/third-party/sleigh/processors/ARM/data/manuals/ARM.idx new file mode 100644 index 00000000..f898a8bd --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/manuals/ARM.idx @@ -0,0 +1,6461 @@ +@Armv7AR_errata.pdf[ARM Architecture Reference Manual - ARMv7A and ARMv7-R edition Errata markup, July, 2011 (ARM DDI 0406B_errata_2011_Q2)] + +adc, 326 +adceq, 326 +adcne, 326 +adcmi, 326 +adcpl, 326 +adcvs, 326 +adcvc, 326 +adchi, 326 +adcls, 326 +adcge, 326 +adclt, 326 +adcgt, 326 +adcle, 326 +adcal, 326 +adclo, 326 +adccc, 326 +adchs, 326 +adccs, 326 +adcnv, 326 +adcs, 326 +adceqs, 326 +adcnes, 326 +adcmis, 326 +adcpls, 326 +adcvss, 326 +adcvcs, 326 +adchis, 326 +adclss, 326 +adcges, 326 +adclts, 326 +adcgts, 326 +adcles, 326 +adcals, 326 +adclos, 326 +adcccs, 326 +adchss, 326 +adccss, 326 +adcnvs, 326 +adcseq, 326 +adcsne, 326 +adcsmi, 326 +adcspl, 326 +adcsvs, 326 +adcsvc, 326 +adcshi, 326 +adcsls, 326 +adcsge, 326 +adcslt, 326 +adcsgt, 326 +adcsle, 326 +adcsal, 326 +adcslo, 326 +adcscc, 326 +adcshs, 326 +adcscs, 326 +adcsnv, 326 +add, 332 +addeq, 332 +addne, 332 +addmi, 332 +addpl, 332 +addvs, 332 +addvc, 332 +addhi, 332 +addls, 332 +addge, 332 +addlt, 332 +addgt, 332 +addle, 332 +addal, 332 +addlo, 332 +addcc, 332 +addhs, 332 +addcs, 332 +addnv, 332 +adds, 332 +addeqs, 332 +addnes, 332 +addmis, 332 +addpls, 332 +addvss, 332 +addvcs, 332 +addhis, 332 +addlss, 332 +addges, 332 +addlts, 332 +addgts, 332 +addles, 332 +addals, 332 +addlos, 332 +addccs, 332 +addhss, 332 +addcss, 332 +addnvs, 332 +addseq, 332 +addsne, 332 +addsmi, 332 +addspl, 332 +addsvs, 332 +addsvc, 332 +addshi, 332 +addsls, 332 +addsge, 332 +addslt, 332 +addsgt, 332 +addsle, 332 +addsal, 332 +addslo, 332 +addscc, 332 +addshs, 332 +addscs, 332 +addsnv, 332 +addw, 332 +addeq, 332 +addne, 332 +addmi, 332 +addpl, 332 +addvs, 332 +addvc, 332 +addhi, 332 +addls, 332 +addge, 332 +addlt, 332 +addgt, 332 +addle, 332 +addal, 332 +addlo, 332 +addcc, 332 +addhs, 332 +addcs, 332 +addnv, 332 +adr, 344 +adreq, 344 +adrne, 344 +adrmi, 344 +adrpl, 344 +adrvs, 344 +adrvc, 344 +adrhi, 344 +adrls, 344 +adrge, 344 +adrlt, 344 +adrgt, 344 +adrle, 344 +adral, 344 +adrlo, 344 +adrcc, 344 +adrhs, 344 +adrcs, 344 +adrnv, 344 +and, 346 +andeq, 346 +andne, 346 +andmi, 346 +andpl, 346 +andvs, 346 +andvc, 346 +andhi, 346 +andls, 346 +andge, 346 +andlt, 346 +andgt, 346 +andle, 346 +andal, 346 +andlo, 346 +andcc, 346 +andhs, 346 +andcs, 346 +andnv, 346 +ands, 346 +andeqs, 346 +andnes, 346 +andmis, 346 +andpls, 346 +andvss, 346 +andvcs, 346 +andhis, 346 +andlss, 346 +andges, 346 +andlts, 346 +andgts, 346 +andles, 346 +andals, 346 +andlos, 346 +andccs, 346 +andhss, 346 +andcss, 346 +andnvs, 346 +andseq, 346 +andsne, 346 +andsmi, 346 +andspl, 346 +andsvs, 346 +andsvc, 346 +andshi, 346 +andsls, 346 +andsge, 346 +andslt, 346 +andsgt, 346 +andsle, 346 +andsal, 346 +andslo, 346 +andscc, 346 +andshs, 346 +andscs, 346 +andsnv, 346 +asr, 352 +asreq, 352 +asrne, 352 +asrmi, 352 +asrpl, 352 +asrvs, 352 +asrvc, 352 +asrhi, 352 +asrls, 352 +asrge, 352 +asrlt, 352 +asrgt, 352 +asrle, 352 +asral, 352 +asrlo, 352 +asrcc, 352 +asrhs, 352 +asrcs, 352 +asrnv, 352 +b, 356 +beq, 356 +bne, 356 +bmi, 356 +bpl, 356 +bvs, 356 +bvc, 356 +bhi, 356 +bls, 356 +bge, 356 +blt, 356 +bgt, 356 +ble, 356 +bal, 356 +blo, 356 +bcc, 356 +bhs, 356 +bcs, 356 +bnv, 356 +bfc, 358 +bfceq, 358 +bfcne, 358 +bfcmi, 358 +bfcpl, 358 +bfcvs, 358 +bfcvc, 358 +bfchi, 358 +bfcls, 358 +bfcge, 358 +bfclt, 358 +bfcgt, 358 +bfcle, 358 +bfcal, 358 +bfclo, 358 +bfccc, 358 +bfchs, 358 +bfccs, 358 +bfcnv, 358 +bfi, 360 +bfieq, 360 +bfine, 360 +bfimi, 360 +bfipl, 360 +bfivs, 360 +bfivc, 360 +bfihi, 360 +bfils, 360 +bfige, 360 +bfilt, 360 +bfigt, 360 +bfile, 360 +bfial, 360 +bfilo, 360 +bficc, 360 +bfihs, 360 +bfics, 360 +bfinv, 360 +bic, 362 +biceq, 362 +bicne, 362 +bicmi, 362 +bicpl, 362 +bicvs, 362 +bicvc, 362 +bichi, 362 +bicls, 362 +bicge, 362 +biclt, 362 +bicgt, 362 +bicle, 362 +bical, 362 +biclo, 362 +biccc, 362 +bichs, 362 +biccs, 362 +bicnv, 362 +bics, 362 +biceqs, 362 +bicnes, 362 +bicmis, 362 +bicpls, 362 +bicvss, 362 +bicvcs, 362 +bichis, 362 +biclss, 362 +bicges, 362 +biclts, 362 +bicgts, 362 +bicles, 362 +bicals, 362 +biclos, 362 +bicccs, 362 +bichss, 362 +biccss, 362 +bicnvs, 362 +bicseq, 362 +bicsne, 362 +bicsmi, 362 +bicspl, 362 +bicsvs, 362 +bicsvc, 362 +bicshi, 362 +bicsls, 362 +bicsge, 362 +bicslt, 362 +bicsgt, 362 +bicsle, 362 +bicsal, 362 +bicslo, 362 +bicscc, 362 +bicshs, 362 +bicscs, 362 +bicsnv, 362 +bkpt, 368 +bl, 370 +bleq, 370 +blne, 370 +blmi, 370 +blpl, 370 +blvs, 370 +blvc, 370 +blhi, 370 +blls, 370 +blge, 370 +bllt, 370 +blgt, 370 +blle, 370 +blal, 370 +bllo, 370 +blcc, 370 +blhs, 370 +blcs, 370 +blnv, 370 +blx, 370 +blxeq, 370 +blxne, 370 +blxmi, 370 +blxpl, 370 +blxvs, 370 +blxvc, 370 +blxhi, 370 +blxls, 370 +blxge, 370 +blxlt, 370 +blxgt, 370 +blxle, 370 +blxal, 370 +blxlo, 370 +blxcc, 370 +blxhs, 370 +blxcs, 370 +blxnv, 370 +bx, 374 +bxeq, 374 +bxne, 374 +bxmi, 374 +bxpl, 374 +bxvs, 374 +bxvc, 374 +bxhi, 374 +bxls, 374 +bxge, 374 +bxlt, 374 +bxgt, 374 +bxle, 374 +bxal, 374 +bxlo, 374 +bxcc, 374 +bxhs, 374 +bxcs, 374 +bxnv, 374 +bxj, 376 +bxjeq, 376 +bxjne, 376 +bxjmi, 376 +bxjpl, 376 +bxjvs, 376 +bxjvc, 376 +bxjhi, 376 +bxjls, 376 +bxjge, 376 +bxjlt, 376 +bxjgt, 376 +bxjle, 376 +bxjal, 376 +bxjlo, 376 +bxjcc, 376 +bxjhs, 376 +bxjcs, 376 +bxjnv, 376 +cbnz, 378 +cbz, 378 +cdp, 380 +cdpeq, 380 +cdpne, 380 +cdpmi, 380 +cdppl, 380 +cdpvs, 380 +cdpvc, 380 +cdphi, 380 +cdpls, 380 +cdpge, 380 +cdplt, 380 +cdpgt, 380 +cdple, 380 +cdpal, 380 +cdplo, 380 +cdpcc, 380 +cdphs, 380 +cdpcs, 380 +cdpnv, 380 +cdp2, 380 +cdp2eq, 380 +cdp2ne, 380 +cdp2mi, 380 +cdp2pl, 380 +cdp2vs, 380 +cdp2vc, 380 +cdp2hi, 380 +cdp2ls, 380 +cdp2ge, 380 +cdp2lt, 380 +cdp2gt, 380 +cdp2le, 380 +cdp2al, 380 +cdp2lo, 380 +cdp2cc, 380 +cdp2hs, 380 +cdp2cs, 380 +cdp2nv, 380 +chka, 1141 +clrex, 382 +clrexeq, 382 +clrexne, 382 +clrexmi, 382 +clrexpl, 382 +clrexvs, 382 +clrexvc, 382 +clrexhi, 382 +clrexls, 382 +clrexge, 382 +clrexlt, 382 +clrexgt, 382 +clrexle, 382 +clrexal, 382 +clrexlo, 382 +clrexcc, 382 +clrexhs, 382 +clrexcs, 382 +clrexnv, 382 +clz, 384 +clzeq, 384 +clzne, 384 +clzmi, 384 +clzpl, 384 +clzvs, 384 +clzvc, 384 +clzhi, 384 +clzls, 384 +clzge, 384 +clzlt, 384 +clzgt, 384 +clzle, 384 +clzal, 384 +clzlo, 384 +clzcc, 384 +clzhs, 384 +clzcs, 384 +clznv, 384 +cmn, 386 +cmneq, 386 +cmnne, 386 +cmnmi, 386 +cmnpl, 386 +cmnvs, 386 +cmnvc, 386 +cmnhi, 386 +cmnls, 386 +cmnge, 386 +cmnlt, 386 +cmngt, 386 +cmnle, 386 +cmnal, 386 +cmnlo, 386 +cmncc, 386 +cmnhs, 386 +cmncs, 386 +cmnnv, 386 +cmp, 392 +cmpeq, 392 +cmpne, 392 +cmpmi, 392 +cmppl, 392 +cmpvs, 392 +cmpvc, 392 +cmphi, 392 +cmpls, 392 +cmpge, 392 +cmplt, 392 +cmpgt, 392 +cmple, 392 +cmpal, 392 +cmplo, 392 +cmpcc, 392 +cmphs, 392 +cmpcs, 392 +cmpnv, 392 +cps, 1561 +cpy, 398 +dbg, 400 +dbgeq, 400 +dbgne, 400 +dbgmi, 400 +dbgpl, 400 +dbgvs, 400 +dbgvc, 400 +dbghi, 400 +dbgls, 400 +dbgge, 400 +dbglt, 400 +dbggt, 400 +dbgle, 400 +dbgal, 400 +dbglo, 400 +dbgcc, 400 +dbghs, 400 +dbgcs, 400 +dbgnv, 400 +dmb, 402 +dmbeq, 402 +dmbne, 402 +dmbmi, 402 +dmbpl, 402 +dmbvs, 402 +dmbvc, 402 +dmbhi, 402 +dmbls, 402 +dmbge, 402 +dmblt, 402 +dmbgt, 402 +dmble, 402 +dmbal, 402 +dmblo, 402 +dmbcc, 402 +dmbhs, 402 +dmbcs, 402 +dmbnv, 402 +dsb, 404 +dsbeq, 404 +dsbne, 404 +dsbmi, 404 +dsbpl, 404 +dsbvs, 404 +dsbvc, 404 +dsbhi, 404 +dsbls, 404 +dsbge, 404 +dsblt, 404 +dsbgt, 404 +dsble, 404 +dsbal, 404 +dsblo, 404 +dsbcc, 404 +dsbhs, 404 +dsbcs, 404 +dsbnv, 404 +enterx, 1133 +eor, 406 +eoreq, 406 +eorne, 406 +eormi, 406 +eorpl, 406 +eorvs, 406 +eorvc, 406 +eorhi, 406 +eorls, 406 +eorge, 406 +eorlt, 406 +eorgt, 406 +eorle, 406 +eoral, 406 +eorlo, 406 +eorcc, 406 +eorhs, 406 +eorcs, 406 +eornv, 406 +eors, 406 +eoreqs, 406 +eornes, 406 +eormis, 406 +eorpls, 406 +eorvss, 406 +eorvcs, 406 +eorhis, 406 +eorlss, 406 +eorges, 406 +eorlts, 406 +eorgts, 406 +eorles, 406 +eorals, 406 +eorlos, 406 +eorccs, 406 +eorhss, 406 +eorcss, 406 +eornvs, 406 +eorseq, 406 +eorsne, 406 +eorsmi, 406 +eorspl, 406 +eorsvs, 406 +eorsvc, 406 +eorshi, 406 +eorsls, 406 +eorsge, 406 +eorslt, 406 +eorsgt, 406 +eorsle, 406 +eorsal, 406 +eorslo, 406 +eorscc, 406 +eorshs, 406 +eorscs, 406 +eorsnv, 406 +hb, 1142 +hbeq, 1142 +hbne, 1142 +hbmi, 1142 +hbpl, 1142 +hbvs, 1142 +hbvc, 1142 +hbhi, 1142 +hbls, 1142 +hbge, 1142 +hblt, 1142 +hbgt, 1142 +hble, 1142 +hbal, 1142 +hblo, 1142 +hbcc, 1142 +hbhs, 1142 +hbcs, 1142 +hbnv, 1142 +hbl, 1142 +hbleq, 1142 +hblne, 1142 +hblmi, 1142 +hblpl, 1142 +hblvs, 1142 +hblvc, 1142 +hblhi, 1142 +hblls, 1142 +hblge, 1142 +hbllt, 1142 +hblgt, 1142 +hblle, 1142 +hblal, 1142 +hbllo, 1142 +hblcc, 1142 +hblhs, 1142 +hblcs, 1142 +hblnv, 1142 +hblp, 1143 +hblpeq, 1143 +hblpne, 1143 +hblpmi, 1143 +hblppl, 1143 +hblpvs, 1143 +hblpvc, 1143 +hblphi, 1143 +hblpls, 1143 +hblpge, 1143 +hblplt, 1143 +hblpgt, 1143 +hblple, 1143 +hblpal, 1143 +hblplo, 1143 +hblpcc, 1143 +hblphs, 1143 +hblpcs, 1143 +hblpnv, 1143 +hbp, 1144 +hbpeq, 1144 +hbpne, 1144 +hbpmi, 1144 +hbppl, 1144 +hbpvs, 1144 +hbpvc, 1144 +hbphi, 1144 +hbpls, 1144 +hbpge, 1144 +hbplt, 1144 +hbpgt, 1144 +hbple, 1144 +hbpal, 1144 +hbplo, 1144 +hbpcc, 1144 +hbphs, 1144 +hbpcs, 1144 +hbpnv, 1144 +isb, 414 +isbeq, 414 +isbne, 414 +isbmi, 414 +isbpl, 414 +isbvs, 414 +isbvc, 414 +isbhi, 414 +isbls, 414 +isbge, 414 +isblt, 414 +isbgt, 414 +isble, 414 +isbal, 414 +isblo, 414 +isbcc, 414 +isbhs, 414 +isbcs, 414 +isbnv, 414 +it, 416 +itT, 416 +itE, 416 +itTT, 416 +itET, 416 +itTE, 416 +itEE, 416 +itTTT, 416 +itETT, 416 +itTET, 416 +itEET, 416 +itTTE, 416 +itETE, 416 +itTEE, 416 +itEEE, 416 +ldc, 418 +ldceq, 418 +ldcne, 418 +ldcmi, 418 +ldcpl, 418 +ldcvs, 418 +ldcvc, 418 +ldchi, 418 +ldcls, 418 +ldcge, 418 +ldclt, 418 +ldcgt, 418 +ldcle, 418 +ldcal, 418 +ldclo, 418 +ldccc, 418 +ldchs, 418 +ldccs, 418 +ldcnv, 418 +ldcl, 418 +ldceql, 418 +ldcnel, 418 +ldcmil, 418 +ldcpll, 418 +ldcvsl, 418 +ldcvcl, 418 +ldchil, 418 +ldclsl, 418 +ldcgel, 418 +ldcltl, 418 +ldcgtl, 418 +ldclel, 418 +ldcall, 418 +ldclol, 418 +ldcccl, 418 +ldchsl, 418 +ldccsl, 418 +ldcnvl, 418 +ldcleq, 418 +ldclne, 418 +ldclmi, 418 +ldclpl, 418 +ldclvs, 418 +ldclvc, 418 +ldclhi, 418 +ldclls, 418 +ldclge, 418 +ldcllt, 418 +ldclgt, 418 +ldclle, 418 +ldclal, 418 +ldcllo, 418 +ldclcc, 418 +ldclhs, 418 +ldclcs, 418 +ldclnv, 418 +ldc2, 418 +ldc2eq, 418 +ldc2ne, 418 +ldc2mi, 418 +ldc2pl, 418 +ldc2vs, 418 +ldc2vc, 418 +ldc2hi, 418 +ldc2ls, 418 +ldc2ge, 418 +ldc2lt, 418 +ldc2gt, 418 +ldc2le, 418 +ldc2al, 418 +ldc2lo, 418 +ldc2cc, 418 +ldc2hs, 418 +ldc2cs, 418 +ldc2nv, 418 +ldc2l, 418 +ldc2leq, 418 +ldc2lne, 418 +ldc2lmi, 418 +ldc2lpl, 418 +ldc2lvs, 418 +ldc2lvc, 418 +ldc2lhi, 418 +ldc2lls, 418 +ldc2lge, 418 +ldc2llt, 418 +ldc2lgt, 418 +ldc2lle, 418 +ldc2lal, 418 +ldc2llo, 418 +ldc2lcc, 418 +ldc2lhs, 418 +ldc2lcs, 418 +ldc2lnv, 418 +ldm, 422 +ldmeq, 422 +ldmne, 422 +ldmmi, 422 +ldmpl, 422 +ldmvs, 422 +ldmvc, 422 +ldmhi, 422 +ldmls, 422 +ldmge, 422 +ldmlt, 422 +ldmgt, 422 +ldmle, 422 +ldmal, 422 +ldmlo, 422 +ldmcc, 422 +ldmhs, 422 +ldmcs, 422 +ldmnv, 422 +ldmia, 422 +ldmeqia, 422 +ldmneia, 422 +ldmmiia, 422 +ldmplia, 422 +ldmvsia, 422 +ldmvcia, 422 +ldmhiia, 422 +ldmlsia, 422 +ldmgeia, 422 +ldmltia, 422 +ldmgtia, 422 +ldmleia, 422 +ldmalia, 422 +ldmloia, 422 +ldmccia, 422 +ldmhsia, 422 +ldmcsia, 422 +ldmnvia, 422 +ldmfd, 422 +ldmeqfd, 422 +ldmnefd, 422 +ldmmifd, 422 +ldmplfd, 422 +ldmvsfd, 422 +ldmvcfd, 422 +ldmhifd, 422 +ldmlsfd, 422 +ldmgefd, 422 +ldmltfd, 422 +ldmgtfd, 422 +ldmlefd, 422 +ldmalfd, 422 +ldmlofd, 422 +ldmccfd, 422 +ldmhsfd, 422 +ldmcsfd, 422 +ldmnvfd, 422 +ldmda, 424 +ldmdaeq, 424 +ldmdane, 424 +ldmdami, 424 +ldmdapl, 424 +ldmdavs, 424 +ldmdavc, 424 +ldmdahi, 424 +ldmdals, 424 +ldmdage, 424 +ldmdalt, 424 +ldmdagt, 424 +ldmdale, 424 +ldmdaal, 424 +ldmdalo, 424 +ldmdacc, 424 +ldmdahs, 424 +ldmdacs, 424 +ldmdanv, 424 +ldmeqda,424 +ldmneda, 424 +ldmmida, 424 +ldmplda, 424 +ldmvsda, 424 +ldmvcda, 424 +ldmhida, 424 +ldmlsda, 424 +ldmgeda, 424 +ldmltda, 424 +ldmgtda, 424 +ldmleda, 424 +ldmalda, 424 +ldmloda, 424 +ldmccda, 424 +ldmhsda, 424 +ldmcsda, 424 +ldmnvda, 424 +ldmeqfa,424 +ldmnefa, 424 +ldmmifa, 424 +ldmplfa, 424 +ldmvsfa, 424 +ldmvcfa, 424 +ldmhifa, 424 +ldmlsfa, 424 +ldmgefa, 424 +ldmltfa, 424 +ldmgtfa, 424 +ldmlefa, 424 +ldmalfa, 424 +ldmlofa, 424 +ldmccfa, 424 +ldmhsfa, 424 +ldmcsfa, 424 +ldmnvfa, 424 +ldmdb, 426 +ldmdbeq, 426 +ldmdbne, 426 +ldmdbmi, 426 +ldmdbpl, 426 +ldmdbvs, 426 +ldmdbvc, 426 +ldmdbhi, 426 +ldmdbls, 426 +ldmdbge, 426 +ldmdblt, 426 +ldmdbgt, 426 +ldmdble, 426 +ldmdbal, 426 +ldmdblo, 426 +ldmdbcc, 426 +ldmdbhs, 426 +ldmdbcs, 426 +ldmdbnv, 426 +ldmeqdb,426 +ldmnedb, 426 +ldmmidb, 426 +ldmpldb, 426 +ldmvsdb, 426 +ldmvcdb, 426 +ldmhidb, 426 +ldmlsdb, 426 +ldmgedb, 426 +ldmltdb, 426 +ldmgtdb, 426 +ldmledb, 426 +ldmaldb, 426 +ldmlodb, 426 +ldmccdb, 426 +ldmhsdb, 426 +ldmcsdb, 426 +ldmnvdb, 426 +ldmeqea,426 +ldmneea, 426 +ldmmiea, 426 +ldmplea, 426 +ldmvsea, 426 +ldmvcea, 426 +ldmhiea, 426 +ldmlsea, 426 +ldmgeea, 426 +ldmltea, 426 +ldmgtea, 426 +ldmleea, 426 +ldmalea, 426 +ldmloea, 426 +ldmccea, 426 +ldmhsea, 426 +ldmcsea, 426 +ldmnvea, 426 +ldmib, 428 +ldmibeq, 428 +ldmibne, 428 +ldmibmi, 428 +ldmibpl, 428 +ldmibvs, 428 +ldmibvc, 428 +ldmibhi, 428 +ldmibls, 428 +ldmibge, 428 +ldmiblt, 428 +ldmibgt, 428 +ldmible, 428 +ldmibal, 428 +ldmiblo, 428 +ldmibcc, 428 +ldmibhs, 428 +ldmibcs, 428 +ldmibnv, 428 +ldmeqib,428 +ldmneib, 428 +ldmmiib, 428 +ldmplib, 428 +ldmvsib, 428 +ldmvcib, 428 +ldmhiib, 428 +ldmlsib, 428 +ldmgeib, 428 +ldmltib, 428 +ldmgtib, 428 +ldmleib, 428 +ldmalib, 428 +ldmloib, 428 +ldmccib, 428 +ldmhsib, 428 +ldmcsib, 428 +ldmnvib, 428 +ldmeqed,428 +ldmneed, 428 +ldmmied, 428 +ldmpled, 428 +ldmvsed, 428 +ldmvced, 428 +ldmhied, 428 +ldmlsed, 428 +ldmgeed, 428 +ldmlted, 428 +ldmgted, 428 +ldmleed, 428 +ldmaled, 428 +ldmloed, 428 +ldmcced, 428 +ldmhsed, 428 +ldmcsed, 428 +ldmnved, 428 +ldr, 430 +ldreq, 430 +ldrne, 430 +ldrmi, 430 +ldrpl, 430 +ldrvs, 430 +ldrvc, 430 +ldrhi, 430 +ldrls, 430 +ldrge, 430 +ldrlt, 430 +ldrgt, 430 +ldrle, 430 +ldral, 430 +ldrlo, 430 +ldrcc, 430 +ldrhs, 430 +ldrcs, 430 +ldrnv, 430 +ldreqb, 430 +ldrneb, 430 +ldrmib, 430 +ldrplb, 430 +ldrvsb, 430 +ldrvcb, 430 +ldrhib, 430 +ldrlsb, 430 +ldrgeb, 430 +ldrltb, 430 +ldrgtb, 430 +ldrleb, 430 +ldralb, 430 +ldrlob, 430 +ldrccb, 430 +ldrhsb, 430 +ldrcsb, 430 +ldrnvb, 430 +ldrb, 438 +ldrbeq, 438 +ldrbne, 438 +ldrbmi, 438 +ldrbpl, 438 +ldrbvs, 438 +ldrbvc, 438 +ldrbhi, 438 +ldrbls, 438 +ldrbge, 438 +ldrblt, 438 +ldrbgt, 438 +ldrble, 438 +ldrbal, 438 +ldrblo, 438 +ldrbcc, 438 +ldrbhs, 438 +ldrbcs, 438 +ldrbnv, 438 +ldrbt, 446 +ldreqbt, 446 +ldrnebt, 446 +ldrmibt, 446 +ldrplbt, 446 +ldrvsbt, 446 +ldrvcbt, 446 +ldrhibt, 446 +ldrlsbt, 446 +ldrgebt, 446 +ldrltbt, 446 +ldrgtbt, 446 +ldrlebt, 446 +ldralbt, 446 +ldrlobt, 446 +ldrccbt, 446 +ldrhsbt, 446 +ldrcsbt, 446 +ldrnvbt, 446 +ldrbteq,446 +ldrbtne, 446 +ldrbtmi, 446 +ldrbtpl, 446 +ldrbtvs, 446 +ldrbtvc, 446 +ldrbthi, 446 +ldrbtls, 446 +ldrbtge, 446 +ldrbtlt, 446 +ldrbtgt, 446 +ldrbtle, 446 +ldrbtal, 446 +ldrbtlo, 446 +ldrbtcc, 446 +ldrbths, 446 +ldrbtcs, 446 +ldrbtnv, 446 +ldrd, 448 +ldreqd, 448 +ldrned, 448 +ldrmid, 448 +ldrpld, 448 +ldrvsd, 448 +ldrvcd, 448 +ldrhid, 448 +ldrlsd, 448 +ldrged, 448 +ldrltd, 448 +ldrgtd, 448 +ldrled, 448 +ldrald, 448 +ldrlod, 448 +ldrccd, 448 +ldrhsd, 448 +ldrcsd, 448 +ldrnvd, 448 +ldrdeq, 448 +ldrdne, 448 +ldrdmi, 448 +ldrdpl, 448 +ldrdvs, 448 +ldrdvc, 448 +ldrdhi, 448 +ldrdls, 448 +ldrdge, 448 +ldrdlt, 448 +ldrdgt, 448 +ldrdle, 448 +ldrdal, 448 +ldrdlo, 448 +ldrdcc, 448 +ldrdhs, 448 +ldrdcs, 448 +ldrdnv, 448 +ldrex, 454 +ldrexeq, 454 +ldrexne, 454 +ldrexmi, 454 +ldrexpl, 454 +ldrexvs, 454 +ldrexvc, 454 +ldrexhi, 454 +ldrexls, 454 +ldrexge, 454 +ldrexlt, 454 +ldrexgt, 454 +ldrexle, 454 +ldrexal, 454 +ldrexlo, 454 +ldrexcc, 454 +ldrexhs, 454 +ldrexcs, 454 +ldrexnv, 454 +ldrexb, 456 +ldrexbeq, 456 +ldrexbne, 456 +ldrexbmi, 456 +ldrexbpl, 456 +ldrexbvs, 456 +ldrexbvc, 456 +ldrexbhi, 456 +ldrexbls, 456 +ldrexbge, 456 +ldrexblt, 456 +ldrexbgt, 456 +ldrexble, 456 +ldrexbal, 456 +ldrexblo, 456 +ldrexbcc, 456 +ldrexbhs, 456 +ldrexbcs, 456 +ldrexbnv, 456 +ldrexd, 458 +ldrexdeq, 458 +ldrexdne, 458 +ldrexdmi, 458 +ldrexdpl, 458 +ldrexdvs, 458 +ldrexdvc, 458 +ldrexdhi, 458 +ldrexdls, 458 +ldrexdge, 458 +ldrexdlt, 458 +ldrexdgt, 458 +ldrexdle, 458 +ldrexdal, 458 +ldrexdlo, 458 +ldrexdcc, 458 +ldrexdhs, 458 +ldrexdcs, 458 +ldrexdnv, 458 +ldrexh, 460 +ldrexheq, 460 +ldrexhne, 460 +ldrexhmi, 460 +ldrexhpl, 460 +ldrexhvs, 460 +ldrexhvc, 460 +ldrexhhi, 460 +ldrexhls, 460 +ldrexhge, 460 +ldrexhlt, 460 +ldrexhgt, 460 +ldrexhle, 460 +ldrexhal, 460 +ldrexhlo, 460 +ldrexhcc, 460 +ldrexhhs, 460 +ldrexhcs, 460 +ldrexhnv, 460 +ldrh, 462 +ldreqh, 462 +ldrneh, 462 +ldrmih, 462 +ldrplh, 462 +ldrvsh, 462 +ldrvch, 462 +ldrhih, 462 +ldrlsh, 462 +ldrgeh, 462 +ldrlth, 462 +ldrgth, 462 +ldrleh, 462 +ldralh, 462 +ldrloh, 462 +ldrcch, 462 +ldrhsh, 462 +ldrcsh, 462 +ldrnvh, 462 +ldrheq, 462 +ldrhne, 462 +ldrhmi, 462 +ldrhpl, 462 +ldrhvs, 462 +ldrhvc, 462 +ldrhhi, 462 +ldrhls, 462 +ldrhge, 462 +ldrhlt, 462 +ldrhgt, 462 +ldrhle, 462 +ldrhal, 462 +ldrhlo, 462 +ldrhcc, 462 +ldrhhs, 462 +ldrhcs, 462 +ldrhnv, 462 +ldrht, 470 +ldrhteq, 470 +ldrhtne, 470 +ldrhtmi, 470 +ldrhtpl, 470 +ldrhtvs, 470 +ldrhtvc, 470 +ldrhthi, 470 +ldrhtls, 470 +ldrhtge, 470 +ldrhtlt, 470 +ldrhtgt, 470 +ldrhtle, 470 +ldrhtal, 470 +ldrhtlo, 470 +ldrhtcc, 470 +ldrhths, 470 +ldrhtcs, 470 +ldrhtnv, 470 +ldrsb, 472 +ldreqsb, 472 +ldrnesb, 472 +ldrmisb, 472 +ldrplsb, 472 +ldrvssb, 472 +ldrvcsb, 472 +ldrhisb, 472 +ldrlssb, 472 +ldrgesb, 472 +ldrltsb, 472 +ldrgtsb, 472 +ldrlesb, 472 +ldralsb, 472 +ldrlosb, 472 +ldrccsb, 472 +ldrhssb, 472 +ldrcssb, 472 +ldrnvsb, 472 +ldrsbeq,472 +ldrsbne, 472 +ldrsbmi, 472 +ldrsbpl, 472 +ldrsbvs, 472 +ldrsbvc, 472 +ldrsbhi, 472 +ldrsbls, 472 +ldrsbge, 472 +ldrsblt, 472 +ldrsbgt, 472 +ldrsble, 472 +ldrsbal, 472 +ldrsblo, 472 +ldrsbcc, 472 +ldrsbhs, 472 +ldrsbcs, 472 +ldrsbnv, 472 +ldrsbt, 478 +ldrsbteq, 478 +ldrsbne, 478 +ldrsbmi, 478 +ldrsbpl, 478 +ldrsbvs, 478 +ldrsbvc, 478 +ldrsbhi, 478 +ldrsbls, 478 +ldrsbge, 478 +ldrsblt, 478 +ldrsbgt, 478 +ldrsble, 478 +ldrsbal, 478 +ldrsblo, 478 +ldrsbcc, 478 +ldrsbhs, 478 +ldrsbcs, 478 +ldrsbnv, 478 +ldrsh, 480 +ldreqsh, 480 +ldrnesh, 480 +ldrmish, 480 +ldrplsh, 480 +ldrvssh, 480 +ldrvcsh, 480 +ldrhish, 480 +ldrlssh, 480 +ldrgesh, 480 +ldrltsh, 480 +ldrgtsh, 480 +ldrlesh, 480 +ldralsh, 480 +ldrlosh, 480 +ldrccsh, 480 +ldrhssh, 480 +ldrcshs, 480 +ldrnvsh, 480 +ldrsheq,480 +ldrshne, 480 +ldrshmi, 480 +ldrshpl, 480 +ldrshvs, 480 +ldrshvc, 480 +ldrshhi, 480 +ldrshls, 480 +ldrshge, 480 +ldrshlt, 480 +ldrshgt, 480 +ldrshle, 480 +ldrshal, 480 +ldrshlo, 480 +ldrshcc, 480 +ldrshhs, 480 +ldrshcs, 480 +ldrshnv, 480 +ldrsht, 486 +ldrshteq, 486 +ldrshtne, 486 +ldrshtmi, 486 +ldrshtpl, 486 +ldrshtvs, 486 +ldrshtvc, 486 +ldrshthi, 486 +ldrshtls, 486 +ldrshtge, 486 +ldrshtlt, 486 +ldrshtgt, 486 +ldrshtle, 486 +ldrshtal, 486 +ldrshtlo, 486 +ldrshtcc, 486 +ldrshths, 486 +ldrshtcs, 486 +ldrshtnv, 486 +ldrt, 488 +ldreqt, 488 +ldrnet, 488 +ldrmit, 488 +ldrplt, 488 +ldrvst, 488 +ldrvct, 488 +ldrhit, 488 +ldrlst, 488 +ldrget, 488 +ldrltt, 488 +ldrgtt, 488 +ldrlet, 488 +ldralt, 488 +ldrlot, 488 +ldrcct, 488 +ldrhst, 488 +ldrcst, 488 +ldrnvt, 488 +ldrteq, 488 +ldrtne, 488 +ldrtmi, 488 +ldrtpl, 488 +ldrtvs, 488 +ldrtvc, 488 +ldrthi, 488 +ldrtls, 488 +ldrtge, 488 +ldrtlt, 488 +ldrtgt, 488 +ldrtle, 488 +ldrtal, 488 +ldrtlo, 488 +ldrtcc, 488 +ldrths, 488 +ldrtcs, 488 +ldrtnv, 488 +leavex, 1133 +lsl, 490 +lsleq, 490 +lslne, 490 +lslmi, 490 +lslpl, 490 +lslvs, 490 +lslvc, 490 +lslhi, 490 +lslls, 490 +lslge, 490 +lsllt, 490 +lslgt, 490 +lslle, 490 +lslal, 490 +lsllo, 490 +lslcc, 490 +lslhs, 490 +lslcs, 490 +lslnv, 490 +lsls, 490 +lsleqs, 490 +lslnes, 490 +lslmis, 490 +lslpls, 490 +lslvss, 490 +lslvcs, 490 +lslhis, 490 +lsllss, 490 +lslges, 490 +lsllts, 490 +lslgts, 490 +lslles, 490 +lslals, 490 +lsllos, 490 +lslccs, 490 +lslhss, 490 +lslcss, 490 +lslnvs, 490 +lsr, 494 +lsreq, 494 +lsrne, 494 +lsrmi, 494 +lsrpl, 494 +lsrvs, 494 +lsrvc, 494 +lsrhi, 494 +lsrls, 494 +lsrge, 494 +lsrlt, 494 +lsrgt, 494 +lsrle, 494 +lsral, 494 +lsrlo, 494 +lsrcc, 494 +lsrhs, 494 +lsrcs, 494 +lsrnv, 494 +lsrs, 494 +lsreqs, 494 +lsrnes, 494 +lsrmis, 494 +lsrpls, 494 +lsrvss, 494 +lsrvcs, 494 +lsrhis, 494 +lsrlss, 494 +lsrges, 494 +lsrlts, 494 +lsrgts, 494 +lsrles, 494 +lsrals, 494 +lsrlos, 494 +lsrccs, 494 +lsrhss, 494 +lsrcss, 494 +lsrnvs, 494 +mcr, 498 +mcreq, 498 +mcrne, 498 +mcrmi, 498 +mcrpl, 498 +mcrvs, 498 +mcrvc, 498 +mcrhi, 498 +mcrls, 498 +mcrge, 498 +mcrlt, 498 +mcrgt, 498 +mcrle, 498 +mcral, 498 +mcrlo, 498 +mcrcc, 498 +mcrhs, 498 +mcrcs, 498 +mcrnv, 498 +mcr2, 498 +mcr2eq, 498 +mcr2ne, 498 +mcr2mi, 498 +mcr2pl, 498 +mcr2vs, 498 +mcr2vc, 498 +mcr2hi, 498 +mcr2ls, 498 +mcr2ge, 498 +mcr2lt, 498 +mcr2gt, 498 +mcr2le, 498 +mcr2al, 498 +mcr2lo, 498 +mcr2cc, 498 +mcr2hs, 498 +mcr2cs, 498 +mcr2nv, 498 +mcrr, 500 +mcrreq, 500 +mcrrne, 500 +mcrrmi, 500 +mcrrpl, 500 +mcrrvs, 500 +mcrrvc, 500 +mcrrhi, 500 +mcrrls, 500 +mcrrge, 500 +mcrrlt, 500 +mcrrgt, 500 +mcrrle, 500 +mcrral, 500 +mcrrlo, 500 +mcrrcc, 500 +mcrrhs, 500 +mcrrcs, 500 +mcrrnv, 500 +mcrr2, 500 +mcrr2eq, 500 +mcrr2ne, 500 +mcrr2mi, 500 +mcrr2pl, 500 +mcrr2vs, 500 +mcrr2vc, 500 +mcrr2hi, 500 +mcrr2ls, 500 +mcrr2ge, 500 +mcrr2lt, 500 +mcrr2gt, 500 +mcrr2le, 500 +mcrr2al, 500 +mcrr2lo, 500 +mcrr2cc, 500 +mcrr2hs, 500 +mcrr2cs, 500 +mcrr2nv, 500 +mla, 502 +mlaeq, 502 +mlane, 502 +mlami, 502 +mlapl, 502 +mlavs, 502 +mlavc, 502 +mlahi, 502 +mlals, 502 +mlage, 502 +mlalt, 502 +mlagt, 502 +mlale, 502 +mlaal, 502 +mlalo, 502 +mlacc, 502 +mlahs, 502 +mlacs, 502 +mlanv, 502 +mlas, 502 +mlaeqs, 502 +mlanes, 502 +mlamis, 502 +mlapls, 502 +mlavss, 502 +mlavcs, 502 +mlahis, 502 +mlalss, 502 +mlages, 502 +mlalts, 502 +mlagts, 502 +mlales, 502 +mlaals, 502 +mlalos, 502 +mlaccs, 502 +mlahss, 502 +mlacss, 502 +mlanvs, 502 +mlaseq, 502 +mlasne, 502 +mlasmi, 502 +mlaspl, 502 +mlasvs, 502 +mlasvc, 502 +mlashi, 502 +mlasls, 502 +mlasge, 502 +mlaslt, 502 +mlasgt, 502 +mlasle, 502 +mlasal, 502 +mlaslo, 502 +mlascc, 502 +mlashs, 502 +mlascs, 502 +mlasnv, 502 +mls, 504 +mlseq, 504 +mlsne, 504 +mlsmi, 504 +mlspl, 504 +mlsvs, 504 +mlsvc, 504 +mlshi, 504 +mlsls, 504 +mlsge, 504 +mlslt, 504 +mlsgt, 504 +mlsle, 504 +mlsal, 504 +mlslo, 504 +mlscc, 504 +mlshs, 504 +mlscs, 504 +mlsnv, 504 +mov, 506 +moveq, 506 +movne, 506 +movmi, 506 +movpl, 506 +movvs, 506 +movvc, 506 +movhi, 506 +movls, 506 +movge, 506 +movlt, 506 +movgt, 506 +movle, 506 +moval, 506 +movlo, 506 +movcc, 506 +movhs, 506 +movcs, 506 +movnv, 506 +movs, 506 +moveqs, 506 +movnes, 506 +movmis, 506 +movpls, 506 +movvss, 506 +movvcs, 506 +movhis, 506 +movlss, 506 +movges, 506 +movlts, 506 +movgts, 506 +movles, 506 +movals, 506 +movlos, 506 +movccs, 506 +movhss, 506 +movcss, 506 +movnvs, 506 +movseq, 506 +movsne, 506 +movsmi, 506 +movspl, 506 +movsvs, 506 +movsvc, 506 +movshi, 506 +movsls, 506 +movsge, 506 +movslt, 506 +movsgt, 506 +movsle, 506 +movsal, 506 +movslo, 506 +movscc, 506 +movshs, 506 +movscs, 506 +movsnv, 506 +movw, 506 +movweq, 506 +movwne, 506 +movwmi, 506 +movwpl, 506 +movwvs, 506 +movwvc, 506 +movwhi, 506 +movwls, 506 +movwge, 506 +movwlt, 506 +movwgt, 506 +movwle, 506 +movwal, 506 +movwlo, 506 +movwcc, 506 +movwhs, 506 +movwcs, 506 +movwnv, 506 +movt, 512 +moveqt, 512 +movnet, 512 +movmit, 512 +movplt, 512 +movvst, 512 +movvct, 512 +movhit, 512 +movlst, 512 +movget, 512 +movltt, 512 +movgtt, 512 +movlet, 512 +movalt, 512 +movlot, 512 +movcct, 512 +movhst, 512 +movcst, 512 +movnvt, 512 +mrc, 514 +mrceq, 514 +mrcne, 514 +mrcmi, 514 +mrcpl, 514 +mrcvs, 514 +mrcvc, 514 +mrchi, 514 +mrcls, 514 +mrcge, 514 +mrclt, 514 +mrcgt, 514 +mrcle, 514 +mrcal, 514 +mrclo, 514 +mrccc, 514 +mrchs, 514 +mrccs, 514 +mrcnv, 514 +mrc2, 514 +mrc2eq, 514 +mrc2ne, 514 +mrc2mi, 514 +mrc2pl, 514 +mrc2vs, 514 +mrc2vc, 514 +mrc2hi, 514 +mrc2ls, 514 +mrc2ge, 514 +mrc2lt, 514 +mrc2gt, 514 +mrc2le, 514 +mrc2al, 514 +mrc2lo, 514 +mrc2cc, 514 +mrc2hs, 514 +mrc2cs, 514 +mrc2nv, 514 +mrrc, 516 +mrrceq, 516 +mrrcne, 516 +mrrcmi, 516 +mrrcpl, 516 +mrrcvs, 516 +mrrcvc, 516 +mrrchi, 516 +mrrcls, 516 +mrrcge, 516 +mrrclt, 516 +mrrcgt, 516 +mrrcle, 516 +mrrcal, 516 +mrrclo, 516 +mrrccc, 516 +mrrchs, 516 +mrrccs, 516 +mrrcnv, 516 +mrrc2, 516 +mrrc2eq, 516 +mrrc2ne, 516 +mrrc2mi, 516 +mrrc2pl, 516 +mrrc2vs, 516 +mrrc2vc, 516 +mrrc2hi, 516 +mrrc2ls, 516 +mrrc2ge, 516 +mrrc2lt, 516 +mrrc2gt, 516 +mrrc2le, 516 +mrrc2al, 516 +mrrc2lo, 516 +mrrc2cc, 516 +mrrc2hs, 516 +mrrc2cs, 516 +mrrc2nv, 516 +mrs, 518 +mrseq, 518 +mrsne, 518 +mrsmi, 518 +mrspl, 518 +mrsvs, 518 +mrsvc, 518 +mrshi, 518 +mrsls, 518 +mrsge, 518 +mrslt, 518 +mrsgt, 518 +mrsle, 518 +mrsal, 518 +mrslo, 518 +mrscc, 518 +mrshs, 518 +mrscs, 518 +mrsnv, 518 +msr, 520 +msreq, 520 +msrne, 520 +msrmi, 520 +msrpl, 520 +msrvs, 520 +msrvc, 520 +msrhi, 520 +msrls, 520 +msrge, 520 +msrlt, 520 +msrgt, 520 +msrle, 520 +msral, 520 +msrlo, 520 +msrcc, 520 +msrhs, 520 +msrcs, 520 +msrnv, 520 +mul, 524 +muleq, 524 +mulne, 524 +mulmi, 524 +mulpl, 524 +mulvs, 524 +mulvc, 524 +mulhi, 524 +mulls, 524 +mulge, 524 +mullt, 524 +mulgt, 524 +mulle, 524 +mulal, 524 +mullo, 524 +mulcc, 524 +mulhs, 524 +mulcs, 524 +mulnv, 524 +muls, 524 +muleqs, 524 +mulnes, 524 +mulmis, 524 +mulpls, 524 +mulvss, 524 +mulvcs, 524 +mulhis, 524 +mullss, 524 +mulges, 524 +mullts, 524 +mulgts, 524 +mulles, 524 +mulals, 524 +mullos, 524 +mulccs, 524 +mulhss, 524 +mulcss, 524 +mulnvs, 524 +mvn, 526 +mvneq, 526 +mvnne, 526 +mvnmi, 526 +mvnpl, 526 +mvnvs, 526 +mvnvc, 526 +mvnhi, 526 +mvnls, 526 +mvnge, 526 +mvnlt, 526 +mvngt, 526 +mvnle, 526 +mvnal, 526 +mvnlo, 526 +mvncc, 526 +mvnhs, 526 +mvncs, 526 +mvnnv, 526 +mvns, 526 +mvneqs, 526 +mvnnes, 526 +mvnmis, 526 +mvnpls, 526 +mvnvss, 526 +mvnvcs, 526 +mvnhis, 526 +mvnlss, 526 +mvnges, 526 +mvnlts, 526 +mvngts, 526 +mvnles, 526 +mvnals, 526 +mvnlos, 526 +mvnccs, 526 +mvnhss, 526 +mvncss, 526 +mvnnvs, 526 +mvnseq, 526 +mvnsne, 526 +mvnsmi, 526 +mvnspl, 526 +mvnsvs, 526 +mvnsvc, 526 +mvnshi, 526 +mvnsls, 526 +mvnsge, 526 +mvnslt, 526 +mvnsgt, 526 +mvnsle, 526 +mvnsal, 526 +mvnslo, 526 +mvnscc, 526 +mvnshs, 526 +mvnscs, 526 +mvnsnv, 526 +neg, 532 +nop, 534 +orn, 536 +orneq, 536 +ornne, 536 +ornmi, 536 +ornpl, 536 +ornvs, 536 +ornvc, 536 +ornhi, 536 +ornls, 536 +ornge, 536 +ornlt, 536 +orngt, 536 +ornle, 536 +ornal, 536 +ornlo, 536 +orncc, 536 +ornhs, 536 +orncs, 536 +ornnv, 536 +orns, 536 +orneqs, 536 +ornnes, 536 +ornmis, 536 +ornpls, 536 +ornvss, 536 +ornvcs, 536 +ornhis, 536 +ornlss, 536 +ornges, 536 +ornlts, 536 +orngts, 536 +ornles, 536 +ornals, 536 +ornlos, 536 +ornccs, 536 +ornhss, 536 +orncss, 536 +ornnvs, 536 +orr, 540 +orreq, 540 +orrne, 540 +orrmi, 540 +orrpl, 540 +orrvs, 540 +orrvc, 540 +orrhi, 540 +orrls, 540 +orrge, 540 +orrlt, 540 +orrgt, 540 +orrle, 540 +orral, 540 +orrlo, 540 +orrcc, 540 +orrhs, 540 +orrcs, 540 +orrnv, 540 +orrs, 540 +orreqs, 540 +orrnes, 540 +orrmis, 540 +orrpls, 540 +orrvss, 540 +orrvcs, 540 +orrhis, 540 +orrlss, 540 +orrges, 540 +orrlts, 540 +orrgts, 540 +orrles, 540 +orrals, 540 +orrlos, 540 +orrccs, 540 +orrhss, 540 +orrcss, 540 +orrnvs, 540 +orrseq, 540 +orrsne, 540 +orrsmi, 540 +orrspl, 540 +orrsvs, 540 +orrsvc, 540 +orrshi, 540 +orrsls, 540 +orrsge, 540 +orrslt, 540 +orrsgt, 540 +orrsle, 540 +orrsal, 540 +orrslo, 540 +orrscc, 540 +orrshs, 540 +orrscs, 540 +orrsnv, 540 +pkhbt, 546 +pkhbteq, 546 +pkhbtne, 546 +pkhbtmi, 546 +pkhbtpl, 546 +pkhbtvs, 546 +pkhbtvc, 546 +pkhbthi, 546 +pkhbtls, 546 +pkhbtge, 546 +pkhbtlt, 546 +pkhbtgt, 546 +pkhbtle, 546 +pkhbtal, 546 +pkhbtlo, 546 +pkhbtcc, 546 +pkhbths, 546 +pkhbtcs, 546 +pkhbtnv, 546 +pkhtb, 546 +pkhtbeq, 546 +pkhtbne, 546 +pkhtbmi, 546 +pkhtbpl, 546 +pkhtbvs, 546 +pkhtbvc, 546 +pkhtbhi, 546 +pkhtbls, 546 +pkhtbge, 546 +pkhtblt, 546 +pkhtbgt, 546 +pkhtble, 546 +pkhtbal, 546 +pkhtblo, 546 +pkhtbcc, 546 +pkhtbhs, 546 +pkhtbcs, 546 +pkhtbnv, 546 +pld, 548 +pldeq, 548 +pldne, 548 +pldmi, 548 +pldpl, 548 +pldvs, 548 +pldvc, 548 +pldhi, 548 +pldls, 548 +pldge, 548 +pldlt, 548 +pldgt, 548 +pldle, 548 +pldal, 548 +pldlo, 548 +pldcc, 548 +pldhs, 548 +pldcs, 548 +pldnv, 548 +pldw, 548 +pldweq, 548 +pldwne, 548 +pldwmi, 548 +pldwpl, 548 +pldwvs, 548 +pldwvc, 548 +pldwhi, 548 +pldwls, 548 +pldwge, 548 +pldwlt, 548 +pldwgt, 548 +pldwle, 548 +pldwal, 548 +pldwlo, 548 +pldwcc, 548 +pldwhs, 548 +pldwcs, 548 +pldwnv, 548 +pli, 554 +plieq, 554 +pline, 554 +plimi, 554 +plipl, 554 +plivs, 554 +plivc, 554 +plihi, 554 +plils, 554 +plige, 554 +plilt, 554 +pligt, 554 +plile, 554 +plial, 554 +plilo, 554 +plicc, 554 +plihs, 554 +plics, 554 +plinv, 554 +pop, 558 +popeq, 558 +popne, 558 +popmi, 558 +poppl, 558 +popvs, 558 +popvc, 558 +pophi, 558 +popls, 558 +popge, 558 +poplt, 558 +popgt, 558 +pople, 558 +popal, 558 +poplo, 558 +popcc, 558 +pophs, 558 +popcs, 558 +popnv, 558 +push, 560 +pusheq, 560 +pushne, 560 +pushmi, 560 +pushpl, 560 +pushvs, 560 +pushvc, 560 +pushhi, 560 +pushls, 560 +pushge, 560 +pushlt, 560 +pushgt, 560 +pushle, 560 +pushal, 560 +pushlo, 560 +pushcc, 560 +pushhs, 560 +pushcs, 560 +pushnv, 560 +qadd, 562 +qaddeq, 562 +qaddne, 562 +qaddmi, 562 +qaddpl, 562 +qaddvs, 562 +qaddvc, 562 +qaddhi, 562 +qaddls, 562 +qaddge, 562 +qaddlt, 562 +qaddgt, 562 +qaddle, 562 +qaddal, 562 +qaddlo, 562 +qaddcc, 562 +qaddhs, 562 +qaddcs, 562 +qaddnv, 562 +qadd16, 564 +qadd16eq, 564 +qadd16ne, 564 +qadd16mi, 564 +qadd16pl, 564 +qadd16vs, 564 +qadd16vc, 564 +qadd16hi, 564 +qadd16ls, 564 +qadd16ge, 564 +qadd16lt, 564 +qadd16gt, 564 +qadd16le, 564 +qadd16al, 564 +qadd16lo, 564 +qadd16cc, 564 +qadd16hs, 564 +qadd16cs, 564 +qadd16nv, 564 +qadd8, 566 +qadd8eq, 566 +qadd8ne, 566 +qadd8mi, 566 +qadd8pl, 566 +qadd8vs, 566 +qadd8vc, 566 +qadd8hi, 566 +qadd8ls, 566 +qadd8ge, 566 +qadd8lt, 566 +qadd8gt, 566 +qadd8le, 566 +qadd8al, 566 +qadd8lo, 566 +qadd8cc, 566 +qadd8hs, 566 +qadd8cs, 566 +qadd8nv, 566 +qaddsubx,568 +qaddsubxeq, 568 +qaddsubxne, 568 +qaddsubxmi, 568 +qaddsubxpl, 568 +qaddsubxvs, 568 +qaddsubxvc, 568 +qaddsubxhi, 568 +qaddsubxls, 568 +qaddsubxge, 568 +qaddsubxlt, 568 +qaddsubxgt, 568 +qaddsubxle, 568 +qaddsubxal, 568 +qaddsubxlo, 568 +qaddsubxcc, 568 +qaddsubxhs, 568 +qaddsubxcs, 568 +qaddsubxnv, 568 +qasx, 568 +qasxeq, 568 +qasxne, 568 +qasxmi, 568 +qasxpl, 568 +qasxvs, 568 +qasxvc, 568 +qasxhi, 568 +qasxls, 568 +qasxge, 568 +qasxlt, 568 +qasxgt, 568 +qasxle, 568 +qasxal, 568 +qasxlo, 568 +qasxcc, 568 +qasxhs, 568 +qasxcs, 568 +qasxnv, 568 +qdadd, 570 +qdaddeq, 570 +qdaddne, 570 +qdaddmi, 570 +qdaddpl, 570 +qdaddvs, 570 +qdaddvc, 570 +qdaddhi, 570 +qdaddls, 570 +qdaddge, 570 +qdaddlt, 570 +qdaddgt, 570 +qdaddle, 570 +qdaddal, 570 +qdaddlo, 570 +qdaddcc, 570 +qdaddhs, 570 +qdaddcs, 570 +qdaddnv, 570 +qdsub, 572 +qdsubeq, 572 +qdsubne, 572 +qdsubmi, 572 +qdsubpl, 572 +qdsubvs, 572 +qdsubvc, 572 +qdsubhi, 572 +qdsubls, 572 +qdsubge, 572 +qdsublt, 572 +qdsubgt, 572 +qdsuble, 572 +qdsubal, 572 +qdsublo, 572 +qdsubcc, 572 +qdsubhs, 572 +qdsubcs, 572 +qdsubnv, 572 +qsubaddx,574 +qsubaddxeq, 574 +qsubaddxne, 574 +qsubaddxmi, 574 +qsubaddxpl, 574 +qsubaddxvs, 574 +qsubaddxvc, 574 +qsubaddxhi, 574 +qsubaddxls, 574 +qsubaddxge, 574 +qsubaddxlt, 574 +qsubaddxgt, 574 +qsubaddxle, 574 +qsubaddxal, 574 +qsubaddxlo, 574 +qsubaddxcc, 574 +qsubaddxhs, 574 +qsubaddxcs, 574 +qsubaddxnv, 574 +qsax, 574 +qsaxeq, 574 +qsaxne, 574 +qsaxmi, 574 +qsaxpl, 574 +qsaxvs, 574 +qsaxvc, 574 +qsaxhi, 574 +qsaxls, 574 +qsaxge, 574 +qsaxlt, 574 +qsaxgt, 574 +qsaxle, 574 +qsaxal, 574 +qsaxlo, 574 +qsaxcc, 574 +qsaxhs, 574 +qsaxcs, 574 +qsaxnv, 574 +qsub, 576 +qsubeq, 576 +qsubne, 576 +qsubmi, 576 +qsubpl, 576 +qsubvs, 576 +qsubvc, 576 +qsubhi, 576 +qsubls, 576 +qsubge, 576 +qsublt, 576 +qsubgt, 576 +qsuble, 576 +qsubal, 576 +qsublo, 576 +qsubcc, 576 +qsubhs, 576 +qsubcs, 576 +qsubnv, 576 +qsub16, 578 +qsub16eq, 578 +qsub16ne, 578 +qsub16mi, 578 +qsub16pl, 578 +qsub16vs, 578 +qsub16vc, 578 +qsub16hi, 578 +qsub16ls, 578 +qsub16ge, 578 +qsub16lt, 578 +qsub16gt, 578 +qsub16le, 578 +qsub16al, 578 +qsub16lo, 578 +qsub16cc, 578 +qsub16hs, 578 +qsub16cs, 578 +qsub16nv, 578 +qsub8, 580 +qsub8eq, 580 +qsub8ne, 580 +qsub8mi, 580 +qsub8pl, 580 +qsub8vs, 580 +qsub8vc, 580 +qsub8hi, 580 +qsub8ls, 580 +qsub8ge, 580 +qsub8lt, 580 +qsub8gt, 580 +qsub8le, 580 +qsub8al, 580 +qsub8lo, 580 +qsub8cc, 580 +qsub8hs, 580 +qsub8cs, 580 +qsub8nv, 580 +rbit, 582 +rbiteq, 582 +rbitne, 582 +rbitmi, 582 +rbitpl, 582 +rbitvs, 582 +rbitvc, 582 +rbithi, 582 +rbitls, 582 +rbitge, 582 +rbitlt, 582 +rbitgt, 582 +rbitle, 582 +rbital, 582 +rbitlo, 582 +rbitcc, 582 +rbiths, 582 +rbitcs, 582 +rbitnv, 582 +rev, 584 +reveq, 584 +revne, 584 +revmi, 584 +revpl, 584 +revvs, 584 +revvc, 584 +revhi, 584 +revls, 584 +revge, 584 +revlt, 584 +revgt, 584 +revle, 584 +reval, 584 +revlo, 584 +revcc, 584 +revhs, 584 +revcs, 584 +revnv, 584 +rev16, 586 +rev16eq, 586 +rev16ne, 586 +rev16mi, 586 +rev16pl, 586 +rev16vs, 586 +rev16vc, 586 +rev16hi, 586 +rev16ls, 586 +rev16ge, 586 +rev16lt, 586 +rev16gt, 586 +rev16le, 586 +rev16al, 586 +rev16lo, 586 +rev16cc, 586 +rev16hs, 586 +rev16cs, 586 +rev16nv, 586 +revsh, 588 +revsheq, 588 +revshne, 588 +revshmi, 588 +revshpl, 588 +revshvs, 588 +revshvc, 588 +revshhi, 588 +revshls, 588 +revshge, 588 +revshlt, 588 +revshgt, 588 +revshle, 588 +revshal, 588 +revshlo, 588 +revshcc, 588 +revshhs, 588 +revshcs, 588 +revshnv, 588 +rfeda, `1574 +rfedaeq, 1574 +rfedane, 1574 +rfedami, 1574 +rfedapl, 1574 +rfedavs, 1574 +rfedavc, 1574 +rfedahi, 1574 +rfedals, 1574 +rfedage, 1574 +rfedalt, 1574 +rfedagt, 1574 +rfedale, 1574 +rfedaal, 1574 +rfedalo, 1574 +rfedacc, 1574 +rfedahs, 1574 +rfedacs, 1574 +rfedanv, 1574 +rfedb, 1574 +rfedbeq, 1574 +rfedbne, 1574 +rfedbmi, 1574 +rfedbpl, 1574 +rfedbvs, 1574 +rfedbvc, 1574 +rfedbhi, 1574 +rfedbls, 1574 +rfedbge, 1574 +rfedblt, 1574 +rfedbgt, 1574 +rfedble, 1574 +rfedbal, 1574 +rfedblo, 1574 +rfedbcc, 1574 +rfedbhs, 1574 +rfedbcs, 1574 +rfedbnv, 1574 +rfeia, 1574 +rfeiaeq, 1574 +rfeiane, 1574 +rfeiami, 1574 +rfeiapl, 1574 +rfeiavs, 1574 +rfeiavc, 1574 +rfeiahi, 1574 +rfeials, 1574 +rfeiage, 1574 +rfeialt, 1574 +rfeiagt, 1574 +rfeiale, 1574 +rfeiaal, 1574 +rfeialo, 1574 +rfeiacc, 1574 +rfeiahs, 1574 +rfeiacs, 1574 +rfeianv, 1574 +rfeib, 1574 +rfeibeq, 1574 +rfeibne, 1574 +rfeibmi, 1574 +rfeibpl, 1574 +rfeibvs, 1574 +rfeibvc, 1574 +rfeibhi, 1574 +rfeibls, 1574 +rfeibge, 1574 +rfeiblt, 1574 +rfeibgt, 1574 +rfeible, 1574 +rfeibal, 1574 +rfeiblo, 1574 +rfeibcc, 1574 +rfeibhs, 1574 +rfeibcs, 1574 +rfeibnv, 1574 +ror, 590 +roreq, 590 +rorne, 590 +rormi, 590 +rorpl, 590 +rorvs, 590 +rorvc, 590 +rorhi, 590 +rorls, 590 +rorge, 590 +rorlt, 590 +rorgt, 590 +rorle, 590 +roral, 590 +rorlo, 590 +rorcc, 590 +rorhs, 590 +rorcs, 590 +rornv, 590 +rors, 590 +roreqs, 590 +rornes, 590 +rormis, 590 +rorpls, 590 +rorvss, 590 +rorvcs, 590 +rorhis, 590 +rorlss, 590 +rorges, 590 +rorlts, 590 +rorgts, 590 +rorles, 590 +rorals, 590 +rorlos, 590 +rorccs, 590 +rorhss, 590 +rorcss, 590 +rornvs, 590 +rrx, 594 +rrxeq, 594 +rrxne, 594 +rrxmi, 594 +rrxpl, 594 +rrxvs, 594 +rrxvc, 594 +rrxhi, 594 +rrxls, 594 +rrxge, 594 +rrxlt, 594 +rrxgt, 594 +rrxle, 594 +rrxal, 594 +rrxlo, 594 +rrxcc, 594 +rrxhs, 594 +rrxcs, 594 +rrxnv, 594 +rrxs, 594 +rrxeqs, 594 +rrxnes, 594 +rrxmis, 594 +rrxpls, 594 +rrxvss, 594 +rrxvcs, 594 +rrxhis, 594 +rrxlss, 594 +rrxges, 594 +rrxlts, 594 +rrxgts, 594 +rrxles, 594 +rrxals, 594 +rrxlos, 594 +rrxccs, 594 +rrxhss, 594 +rrxcss, 594 +rrxnvs, 594 +rsb, 596 +rsbeq, 596 +rsbne, 596 +rsbmi, 596 +rsbpl, 596 +rsbvs, 596 +rsbvc, 596 +rsbhi, 596 +rsbls, 596 +rsbge, 596 +rsblt, 596 +rsbgt, 596 +rsble, 596 +rsbal, 596 +rsblo, 596 +rsbcc, 596 +rsbhs, 596 +rsbcs, 596 +rsbnv, 596 +rsbs, 596 +rsbeqs, 596 +rsbnes, 596 +rsbmis, 596 +rsbpls, 596 +rsbvss, 596 +rsbvcs, 596 +rsbhis, 596 +rsblss, 596 +rsbges, 596 +rsblts, 596 +rsbgts, 596 +rsbles, 596 +rsbals, 596 +rsblos, 596 +rsbccs, 596 +rsbhss, 596 +rsbcss, 596 +rsbnvs, 596 +rsbseq, 596 +rsbsne, 596 +rsbsmi, 596 +rsbspl, 596 +rsbsvs, 596 +rsbsvc, 596 +rsbshi, 596 +rsbsls, 596 +rsbsge, 596 +rsbslt, 596 +rsbsgt, 596 +rsbsle, 596 +rsbsal, 596 +rsbslo, 596 +rsbscc, 596 +rsbshs, 596 +rsbscs, 596 +rsbsnv, 596 +rsc, 602 +rsceq, 602 +rscne, 602 +rscmi, 602 +rscpl, 602 +rscvs, 602 +rscvc, 602 +rschi, 602 +rscls, 602 +rscge, 602 +rsclt, 602 +rscgt, 602 +rscle, 602 +rscal, 602 +rsclo, 602 +rsccc, 602 +rschs, 602 +rsccs, 602 +rscnv, 602 +rscs, 602 +rsceqs, 602 +rscnes, 602 +rscmis, 602 +rscpls, 602 +rscvss, 602 +rscvcs, 602 +rschis, 602 +rsclss, 602 +rscges, 602 +rsclts, 602 +rscgts, 602 +rscles, 602 +rscals, 602 +rsclos, 602 +rscccs, 602 +rschss, 602 +rsccss, 602 +rscnvs, 602 +rscseq, 602 +rscsne, 602 +rscsmi, 602 +rscspl, 602 +rscsvs, 602 +rscsvc, 602 +rscshi, 602 +rscsls, 602 +rscsge, 602 +rscslt, 602 +rscsgt, 602 +rscsle, 602 +rscsal, 602 +rscslo, 602 +rscscc, 602 +rscshs, 602 +rscscs, 602 +rscsnv, 602 +sadd16, 608 +sadd16eq, 608 +sadd16ne, 608 +sadd16mi, 608 +sadd16pl, 608 +sadd16vs, 608 +sadd16vc, 608 +sadd16hi, 608 +sadd16ls, 608 +sadd16ge, 608 +sadd16lt, 608 +sadd16gt, 608 +sadd16le, 608 +sadd16al, 608 +sadd16lo, 608 +sadd16cc, 608 +sadd16hs, 608 +sadd16cs, 608 +sadd16nv, 608 +sadd8, 610 +sadd8eq, 610 +sadd8ne, 610 +sadd8mi, 610 +sadd8pl, 610 +sadd8vs, 610 +sadd8vc, 610 +sadd8hi, 610 +sadd8ls, 610 +sadd8ge, 610 +sadd8lt, 610 +sadd8gt, 610 +sadd8le, 610 +sadd8al, 610 +sadd8lo, 610 +sadd8cc, 610 +sadd8hs, 610 +sadd8cs, 610 +sadd8nv, 610 +saddsubx,612 +saddsubxeq, 612 +saddsubxne, 612 +saddsubxmi, 612 +saddsubxpl, 612 +saddsubxvs, 612 +saddsubxvc, 612 +saddsubxhi, 612 +saddsubxls, 612 +saddsubxge, 612 +saddsubxlt, 612 +saddsubxgt, 612 +saddsubxle, 612 +saddsubxal, 612 +saddsubxlo, 612 +saddsubxcc, 612 +saddsubxhs, 612 +saddsubxcs, 612 +saddsubxnv, 612 +sasx, 612 +sasxeq, 612 +sasxne, 612 +sasxmi, 612 +sasxpl, 612 +sasxvs, 612 +sasxvc, 612 +sasxhi, 612 +sasxls, 612 +sasxge, 612 +sasxlt, 612 +sasxgt, 612 +sasxle, 612 +sasxal, 612 +sasxlo, 612 +sasxcc, 612 +sasxhs, 612 +sasxcs, 612 +sasxnv, 612 +sbc, 614 +sbceq, 614 +sbcne, 614 +sbcmi, 614 +sbcpl, 614 +sbcvs, 614 +sbcvc, 614 +sbchi, 614 +sbcls, 614 +sbcge, 614 +sbclt, 614 +sbcgt, 614 +sbcle, 614 +sbcal, 614 +sbclo, 614 +sbccc, 614 +sbchs, 614 +sbccs, 614 +sbcnv, 614 +sbcs, 614 +sbceqs, 614 +sbcnes, 614 +sbcmis, 614 +sbcpls, 614 +sbcvss, 614 +sbcvcs, 614 +sbchis, 614 +sbclss, 614 +sbcges, 614 +sbclts, 614 +sbcgts, 614 +sbcles, 614 +sbcals, 614 +sbclos, 614 +sbcccs, 614 +sbchss, 614 +sbccss, 614 +sbcnvs, 614 +sbcseq, 614 +sbcsne, 614 +sbcsmi, 614 +sbcspl, 614 +sbcsvs, 614 +sbcsvc, 614 +sbcshi, 614 +sbcsls, 614 +sbcsge, 614 +sbcslt, 614 +sbcsgt, 614 +sbcsle, 614 +sbcsal, 614 +sbcslo, 614 +sbcscc, 614 +sbcshs, 614 +sbcscs, 614 +sbcsnv, 614 +sbfx, 620 +sbfxeq, 620 +sbfxne, 620 +sbfxmi, 620 +sbfxpl, 620 +sbfxvs, 620 +sbfxvc, 620 +sbfxhi, 620 +sbfxls, 620 +sbfxge, 620 +sbfxlt, 620 +sbfxgt, 620 +sbfxle, 620 +sbfxal, 620 +sbfxlo, 620 +sbfxcc, 620 +sbfxhs, 620 +sbfxcs, 620 +sbfxnv, 620 +sdiv, 622 +sdiveq, 622 +sdivne, 622 +sdivmi, 622 +sdivpl, 622 +sdivvs, 622 +sdivvc, 622 +sdivhi, 622 +sdivls, 622 +sdivge, 622 +sdivlt, 622 +sdivgt, 622 +sdivle, 622 +sdival, 622 +sdivlo, 622 +sdivcc, 622 +sdivhs, 622 +sdivcs, 622 +sdivnv, 622 +sel, 622 +seleq, 622 +selne, 622 +selmi, 622 +selpl, 622 +selvs, 622 +selvc, 622 +selhi, 622 +sells, 622 +selge, 622 +sellt, 622 +selgt, 622 +selle, 622 +selal, 622 +sello, 622 +selcc, 622 +selhs, 622 +selcs, 622 +selnv, 622 +setend, 626 +sev, 628 +seveq, 628 +sevne, 628 +sevmi, 628 +sevpl, 628 +sevvs, 628 +sevvc, 628 +sevhi, 628 +sevls, 628 +sevge, 628 +sevlt, 628 +sevgt, 628 +sevle, 628 +seval, 628 +sevlo, 628 +sevcc, 628 +sevhs, 628 +sevcs, 628 +sevnv, 628 +shadd16,630 +shadd16eq, 630 +shadd16ne, 630 +shadd16mi, 630 +shadd16pl, 630 +shadd16vs, 630 +shadd16vc, 630 +shadd16hi, 630 +shadd16ls, 630 +shadd16ge, 630 +shadd16lt, 630 +shadd16gt, 630 +shadd16le, 630 +shadd16al, 630 +shadd16lo, 630 +shadd16cc, 630 +shadd16hs, 630 +shadd16cs, 630 +shadd16nv, 630 +shadd8, 632 +shadd8eq, 632 +shadd8ne, 632 +shadd8mi, 632 +shadd8pl, 632 +shadd8vs, 632 +shadd8vc, 632 +shadd8hi, 632 +shadd8ls, 632 +shadd8ge, 632 +shadd8lt, 632 +shadd8gt, 632 +shadd8le, 632 +shadd8al, 632 +shadd8lo, 632 +shadd8cc, 632 +shadd8hs, 632 +shadd8cs, 632 +shadd8nv, 632 +shaddsubx,634 +shaddsubxeq, 634 +shaddsubxne, 634 +shaddsubxmi, 634 +shaddsubxpl, 634 +shaddsubxvs, 634 +shaddsubxvc, 634 +shaddsubxhi, 634 +shaddsubxls, 634 +shaddsubxge, 634 +shaddsubxlt, 634 +shaddsubxgt, 634 +shaddsubxle, 634 +shaddsubxal, 634 +shaddsubxlo, 634 +shaddsubxcc, 634 +shaddsubxhs, 634 +shaddsubxcs, 634 +shaddsubxnv, 634 +shasx, 634 +shasxeq, 634 +shasxne, 634 +shasxmi, 634 +shasxpl, 634 +shasxvs, 634 +shasxvc, 634 +shasxhi, 634 +shasxls, 634 +shasxge, 634 +shasxlt, 634 +shasxgt, 634 +shasxle, 634 +shasxal, 634 +shasxlo, 634 +shasxcc, 634 +shasxhs, 634 +shasxcs, 634 +shasxnv, 634 +shsubaddx,636 +shsubaddxeq, 636 +shsubaddxne, 636 +shsubaddxmi, 636 +shsubaddxpl, 636 +shsubaddxvs, 636 +shsubaddxvc, 636 +shsubaddxhi, 636 +shsubaddxls, 636 +shsubaddxge, 636 +shsubaddxlt, 636 +shsubaddxgt, 636 +shsubaddxle, 636 +shsubaddxal, 636 +shsubaddxlo, 636 +shsubaddxcc, 636 +shsubaddxhs, 636 +shsubaddxcs, 636 +shsubaddxnv, 636 +shsax, 636 +shsaxeq, 636 +shsaxne, 636 +shsaxmi, 636 +shsaxpl, 636 +shsaxvs, 636 +shsaxvc, 636 +shsaxhi, 636 +shsaxls, 636 +shsaxge, 636 +shsaxlt, 636 +shsaxgt, 636 +shsaxle, 636 +shsaxal, 636 +shsaxlo, 636 +shsaxcc, 636 +shsaxhs, 636 +shsaxcs, 636 +shsaxnv, 636 +shsub16,638 +shsub16eq, 638 +shsub16ne, 638 +shsub16mi, 638 +shsub16pl, 638 +shsub16vs, 638 +shsub16vc, 638 +shsub16hi, 638 +shsub16ls, 638 +shsub16ge, 638 +shsub16lt, 638 +shsub16gt, 638 +shsub16le, 638 +shsub16al, 638 +shsub16lo, 638 +shsub16cc, 638 +shsub16hs, 638 +shsub16cs, 638 +shsub16nv, 638 +shsub8, 640 +shsub8eq, 640 +shsub8ne, 640 +shsub8mi, 640 +shsub8pl, 640 +shsub8vs, 640 +shsub8vc, 640 +shsub8hi, 640 +shsub8ls, 640 +shsub8ge, 640 +shsub8lt, 640 +shsub8gt, 640 +shsub8le, 640 +shsub8al, 640 +shsub8lo, 640 +shsub8cc, 640 +shsub8hs, 640 +shsub8cs, 640 +shsub8nv, 640 +smc, 1576 +smi, 1576 +smlabb, 642 +smlabbeq, 642 +smlabbne, 642 +smlabbmi, 642 +smlabbpl, 642 +smlabbvs, 642 +smlabbvc, 642 +smlabbhi, 642 +smlabbls, 642 +smlabbge, 642 +smlabblt, 642 +smlabbgt, 642 +smlabble, 642 +smlabbal, 642 +smlabblo, 642 +smlabbcc, 642 +smlabbhs, 642 +smlabbcs, 642 +smlabbnv, 642 +smlabt, 642 +smlabteq, 642 +smlabtne, 642 +smlabtmi, 642 +smlabtpl, 642 +smlabtvs, 642 +smlabtvc, 642 +smlabthi, 642 +smlabtls, 642 +smlabtge, 642 +smlabtlt, 642 +smlabtgt, 642 +smlabtle, 642 +smlabtal, 642 +smlabtlo, 642 +smlabtcc, 642 +smlabths, 642 +smlabtcs, 642 +smlabtnv, 642 +smlatb, 642 +smlatbeq, 642 +smlatbne, 642 +smlatbmi, 642 +smlatbpl, 642 +smlatbvs, 642 +smlatbvc, 642 +smlatbhi, 642 +smlatbls, 642 +smlatbge, 642 +smlatblt, 642 +smlatbgt, 642 +smlatble, 642 +smlatbal, 642 +smlatblo, 642 +smlatbcc, 642 +smlatbhs, 642 +smlatbcs, 642 +smlatbnv, 642 +smlatt, 642 +smlatteq, 642 +smlattne, 642 +smlattmi, 642 +smlattpl, 642 +smlattvs, 642 +smlattvc, 642 +smlatthi, 642 +smlattls, 642 +smlattge, 642 +smlattlt, 642 +smlattgt, 642 +smlattle, 642 +smlattal, 642 +smlattlo, 642 +smlattcc, 642 +smlatths, 642 +smlattcs, 642 +smlattnv, 642 +smlad, 644 +smladeq, 644 +smladne, 644 +smladmi, 644 +smladpl, 644 +smladvs, 644 +smladvc, 644 +smladhi, 644 +smladls, 644 +smladge, 644 +smladlt, 644 +smladgt, 644 +smladle, 644 +smladal, 644 +smladlo, 644 +smladcc, 644 +smladhs, 644 +smladcs, 644 +smladnv, 644 +smladx, 644 +smladxeq, 644 +smladxne, 644 +smladxmi, 644 +smladxpl, 644 +smladxvs, 644 +smladxvc, 644 +smladxhi, 644 +smladxls, 644 +smladxge, 644 +smladxlt, 644 +smladxgt, 644 +smladxle, 644 +smladxal, 644 +smladxlo, 644 +smladxcc, 644 +smladxhs, 644 +smladxcs, 644 +smladxnv, 644 +smlal, 646 +smlaleq, 646 +smlalne, 646 +smlalmi, 646 +smlalpl, 646 +smlalvs, 646 +smlalvc, 646 +smlalhi, 646 +smlalls, 646 +smlalge, 646 +smlallt, 646 +smlalgt, 646 +smlalle, 646 +smlalal, 646 +smlallo, 646 +smlalcc, 646 +smlalhs, 646 +smlalcs, 646 +smlalnv, 646 +smlals, 646 +smlaleqs, 646 +smlalnes, 646 +smlalmis, 646 +smlalpls, 646 +smlalvss, 646 +smlalvcs, 646 +smlalhis, 646 +smlallss, 646 +smlalges, 646 +smlallts, 646 +smlalgts, 646 +smlalles, 646 +smlalals, 646 +smlallos, 646 +smlalccs, 646 +smlalhss, 646 +smlalcss, 646 +smlalnvs, 646 +smlalseq,646 +smlalsne, 646 +smlalsmi, 646 +smlalspl, 646 +smlalsvs, 646 +smlalsvc, 646 +smlalshi, 646 +smlalsls, 646 +smlalsge, 646 +smlalslt, 646 +smlalsgt, 646 +smlalsle, 646 +smlalsal, 646 +smlalslo, 646 +smlalscc, 646 +smlalshs, 646 +smlalscs, 646 +smlalsnv, 646 +smlalbb,648 +smlalbbeq, 648 +smlalbbne, 648 +smlalbbmi, 648 +smlalbbpl, 648 +smlalbbvs, 648 +smlalbbvc, 648 +smlalbbhi, 648 +smlalbbls, 648 +smlalbbge, 648 +smlalbblt, 648 +smlalbbgt, 648 +smlalbble, 648 +smlalbbal, 648 +smlalbblo, 648 +smlalbbcc, 648 +smlalbbhs, 648 +smlalbbcs, 648 +smlalbbnv, 648 +smlalbt,648 +smlalbteq, 648 +smlalbtne, 648 +smlalbtmi, 648 +smlalbtpl, 648 +smlalbtvs, 648 +smlalbtvc, 648 +smlalbthi, 648 +smlalbtls, 648 +smlalbtge, 648 +smlalbtlt, 648 +smlalbtgt, 648 +smlalbtle, 648 +smlalbtal, 648 +smlalbtlo, 648 +smlalbtcc, 648 +smlalbths, 648 +smlalbtcs, 648 +smlalbtnv, 648 +smlaltb,648 +smlaltbeq, 648 +smlaltbne, 648 +smlaltbmi, 648 +smlaltbpl, 648 +smlaltbvs, 648 +smlaltbvc, 648 +smlaltbhi, 648 +smlaltbls, 648 +smlaltbge, 648 +smlaltblt, 648 +smlaltbgt, 648 +smlaltble, 648 +smlaltbal, 648 +smlaltblo, 648 +smlatlbcc, 648 +smlatlbhs, 648 +smlatlbcs, 648 +smlatlbnv, 648 +smlaltt,648 +smlaltteq, 648 +smlalttne, 648 +smlalttmi, 648 +smlalttpl, 648 +smlalttvs, 648 +smlalttvc, 648 +smlaltthi, 648 +smlalttls, 648 +smlalttge, 648 +smlalttlt, 648 +smlalttgt, 648 +smlalttle, 648 +smlalttal, 648 +smlalttlo, 648 +smlalttcc, 648 +smlaltths, 648 +smlalttcs, 648 +smlalttnv, 648 +smlald, 650 +smlaldeq, 650 +smlaldne, 650 +smlaldmi, 650 +smlaldpl, 650 +smlaldvs, 650 +smlaldvc, 650 +smlaldhi, 650 +smlaldls, 650 +smlaldge, 650 +smlaldlt, 650 +smlaldgt, 650 +smlaldle, 650 +smlaldal, 650 +smlaldlo, 650 +smlaldcc, 650 +smlaldhs, 650 +smlaldcs, 650 +smlaldnv, 650 +smlawb, 652 +smlawbeq, 652 +smlawbne, 652 +smlawbmi, 652 +smlawbpl, 652 +smlawbvs, 652 +smlawbvc, 652 +smlawbhi, 652 +smlawbls, 652 +smlawbge, 652 +smlawblt, 652 +smlawbgt, 652 +smlawble, 652 +smlawbal, 652 +smlawblo, 652 +smlawbcc, 652 +smlawbhs, 652 +smlawbcs, 652 +smlawbnv, 652 +smlawt, 652 +smlawteq, 652 +smlawtne, 652 +smlawtmi, 652 +smlawtpl, 652 +smlawtvs, 652 +smlawtvc, 652 +smlawthi, 652 +smlawtls, 652 +smlawtge, 652 +smlawtlt, 652 +smlawtgt, 652 +smlawtle, 652 +smlawtal, 652 +smlawtlo, 652 +smlawtcc, 652 +smlawths, 652 +smlawtcs, 652 +smlawtnv, 652 +smlsd, 654 +smlsdeq, 654 +smlsdne, 654 +smlsdmi, 654 +smlsdpl, 654 +smlsdvs, 654 +smlsdvc, 654 +smlsdhi, 654 +smlsdls, 654 +smlsdge, 654 +smlsdlt, 654 +smlsdgt, 654 +smlsdle, 654 +smlsdal, 654 +smlsdlo, 654 +smlsdcc, 654 +smlsdhs, 654 +smlsdcs, 654 +smlsdnv, 654 +smlsdx, 654 +smlsdxeq, 654 +smlsdxne, 654 +smlsdxmi, 654 +smlsdxpl, 654 +smlsdxvs, 654 +smlsdxvc, 654 +smlsdxhi, 654 +smlsdxls, 654 +smlsdxge, 654 +smlsdxlt, 654 +smlsdxgt, 654 +smlsdxle, 654 +smlsdxal, 654 +smlsdxlo, 654 +smlsdxcc, 654 +smlsdxhs, 654 +smlsdxcs, 654 +smlsdxnv, 654 +smlsld, 656 +smlsldeq, 656 +smlsldne, 656 +smlsldmi, 656 +smlsldpl, 656 +smlsldvs, 656 +smlsldvc, 656 +smlsldhi, 656 +smlsldls, 656 +smlsldge, 656 +smlsldlt, 656 +smlsldgt, 656 +smlsldle, 656 +smlsldal, 656 +smlsldlo, 656 +smlsldcc, 656 +smlsldhs, 656 +smlsldcs, 656 +smlsldnv, 656 +smlsldx 656 +smlsldxeq, 656 +smlsldxne, 656 +smlsldxmi, 656 +smlsldxpl, 656 +smlsldxvs, 656 +smlsldxvc, 656 +smlsldxhi, 656 +smlsldxls, 656 +smlsldxge, 656 +smlsldxlt, 656 +smlsldxgt, 656 +smlsldxle, 656 +smlsldxal, 656 +smlsldxlo, 656 +smlsldxcc, 656 +smlsldxhs, 656 +smlsldxcs, 656 +smlsldxnv, 656 +smmla, 658 +smmlaeq, 658 +smmlane, 658 +smmlami, 658 +smmlapl, 658 +smmlavs, 658 +smmlavc, 658 +smmlahi, 658 +smmlals, 658 +smmlage, 658 +smmlalt, 658 +smmlagt, 658 +smmlale, 658 +smmlaal, 658 +smmlalo, 658 +smmlacc, 658 +smmlahs, 658 +smmlacs, 658 +smmlanv, 658 +smmlar, 658 +smmlareq, 658 +smmlarne, 658 +smmlarmi, 658 +smmlarpl, 658 +smmlarvs, 658 +smmlarvc, 658 +smmlarhi, 658 +smmlarls, 658 +smmlarge, 658 +smmlarlt, 658 +smmlargt, 658 +smmlarle, 658 +smmlaral, 658 +smmlarlo, 658 +smmlarcc, 658 +smmlarhs, 658 +smmlarcs, 658 +smmlarnv, 658 +smmls, 660 +smmlseq, 660 +smmlsne, 660 +smmlsmi, 660 +smmlspl, 660 +smmlsvs, 660 +smmlsvc, 660 +smmlshi, 660 +smmlsls, 660 +smmlsge, 660 +smmlslt, 660 +smmlsgt, 660 +smmlsle, 660 +smmlsal, 660 +smmlslo, 660 +smmlscc, 660 +smmlshs, 660 +smmlscs, 660 +smmlsnv, 660 +smmlsr, 660 +smmlsreq, 660 +smmlsrne, 660 +smmlsrmi, 660 +smmlsrpl, 660 +smmlsrvs, 660 +smmlsrvc, 660 +smmlsrhi, 660 +smmlsrls, 660 +smmlsrge, 660 +smmlsrlt, 660 +smmlsrgt, 660 +smmlsrle, 660 +smmlsral, 660 +smmlsrlo, 660 +smmlsrcc, 660 +smmlsrhs, 660 +smmlsrcs, 660 +smmlsrnv, 660 +smmul, 662 +smmuleq, 662 +smmulne, 662 +smmulmi, 662 +smmulpl, 662 +smmulvs, 662 +smmulvc, 662 +smmulhi, 662 +smmulls, 662 +smmulge, 662 +smmullt, 662 +smmulgt, 662 +smmulle, 662 +smmulal, 662 +smmullo, 662 +smmulcc, 662 +smmulhs, 662 +smmulcs, 662 +smmulnv, 662 +smmulr, 662 +smmulreq, 662 +smmulrne, 662 +smmulrmi, 662 +smmulrpl, 662 +smmulrvs, 662 +smmulrvc, 662 +smmulrhi, 662 +smmulrls, 662 +smmulrge, 662 +smmulrlt, 662 +smmulrgt, 662 +smmulrle, 662 +smmulral, 662 +smmulrlo, 662 +smmulrcc, 662 +smmulrhs, 662 +smmulrcs, 662 +smmulrnv, 662 +smuad, 664 +smuadeq, 664 +smuadne, 664 +smuadmi, 664 +smuadpl, 664 +smuadvs, 664 +smuadvc, 664 +smuadhi, 664 +smuadls, 664 +smuadge, 664 +smuadlt, 664 +smuadgt, 664 +smuadle, 664 +smuadal, 664 +smuadlo, 664 +smuadcc, 664 +smuadhs, 664 +smuadcs, 664 +smuadnv, 664 +smuadx, 664 +smuadxeq, 664 +smuadxne, 664 +smuadxmi, 664 +smuadxpl, 664 +smuadxvs, 664 +smuadxvc, 664 +smuadxhi, 664 +smuadxls, 664 +smuadxge, 664 +smuadxlt, 664 +smuadxgt, 664 +smuadxle, 664 +smuadxal, 664 +smuadxlo, 664 +smuadxcc, 664 +smuadxhs, 664 +smuadxcs, 664 +smuadxnv, 664 +smulbb, 666 +smulbbeq, 666 +smulbbne, 666 +smulbbmi, 666 +smulbbpl, 666 +smulbbvs, 666 +smulbbvc, 666 +smulbbhi, 666 +smulbbls, 666 +smulbbge, 666 +smulbblt, 666 +smulbbgt, 666 +smulbble, 666 +smulbbal, 666 +smulbblo, 666 +smulbbcc, 666 +smulbbhs, 666 +smulbbcs, 666 +smulbbnv, 666 +smulbt, 666 +smulbteq, 666 +smulbtne, 666 +smulbtmi, 666 +smulbtpl, 666 +smulbtvs, 666 +smulbtvc, 666 +smulbthi, 666 +smulbtls, 666 +smulbtge, 666 +smulbtlt, 666 +smulbtgt, 666 +smulbtle, 666 +smulbtal, 666 +smulbtlo, 666 +smulbtcc, 666 +smulbths, 666 +smulbtcs, 666 +smulbtnv, 666 +smultb, 666 +smultbeq, 666 +smultbne, 666 +smultbmi, 666 +smultbpl, 666 +smultbvs, 666 +smultbvc, 666 +smultbhi, 666 +smultbls, 666 +smultbge, 666 +smultblt, 666 +smultbgt, 666 +smultble, 666 +smultbal, 666 +smultblo, 666 +smultbcc, 666 +smultbhs, 666 +smultbcs, 666 +smultbnv, 666 +smultt, 666 +smultteq, 666 +smulttne, 666 +smulttmi, 666 +smulttpl, 666 +smulttvs, 666 +smulttvc, 666 +smultthi, 666 +smulttls, 666 +smulttge, 666 +smulttlt, 666 +smulttgt, 666 +smulttle, 666 +smulttal, 666 +smulttlo, 666 +smulttcc, 666 +smultths, 666 +smulttcs, 666 +smulttnv, 666 +smull, 668 +smulleq, 668 +smullne, 668 +smullmi, 668 +smullpl, 668 +smullvs, 668 +smullvc, 668 +smullhi, 668 +smullls, 668 +smullge, 668 +smulllt, 668 +smullgt, 668 +smullle, 668 +smullal, 668 +smulllo, 668 +smullcc, 668 +smullhs, 668 +smullcs, 668 +smullnv, 668 +smulls, 668 +smulleqs, 668 +smullnes, 668 +smullmis, 668 +smullpls, 668 +smullvss, 668 +smullvcs, 668 +smullhis, 668 +smulllss, 668 +smullges, 668 +smulllts, 668 +smullgts, 668 +smullles, 668 +smullals, 668 +smulllos, 668 +smullccs, 668 +smullhss, 668 +smullcss, 668 +smullnvs, 668 +smullseq,668 +smullsne, 668 +smullsmi, 668 +smullspl, 668 +smullsvs, 668 +smullsvc, 668 +smullshi, 668 +smullsls, 668 +smullsge, 668 +smullslt, 668 +smullsgt, 668 +smullsle, 668 +smullsal, 668 +smullslo, 668 +smullscc, 668 +smullshs, 668 +smullscs, 668 +smullsnv, 668 +smulwb, 670 +smulwbeq, 670 +smulwbne, 670 +smulwbmi, 670 +smulwbpl, 670 +smulwbvs, 670 +smulwbvc, 670 +smulwbhi, 670 +smulwbls, 670 +smulwbge, 670 +smulwblt, 670 +smulwbgt, 670 +smulwble, 670 +smulwbal, 670 +smulwblo, 670 +smulwbcc, 670 +smulwbhs, 670 +smulwbcs, 670 +smulwbnv, 670 +smulwt, 670 +smulwteq, 670 +smulwtne, 670 +smulwtmi, 670 +smulwtpl, 670 +smulwtvs, 670 +smulwtvc, 670 +smulwthi, 670 +smulwtls, 670 +smulwtge, 670 +smulwtlt, 670 +smulwtgt, 670 +smulwtle, 670 +smulwtal, 670 +smulwtlo, 670 +smulwtcc, 670 +smulwths, 670 +smulwtcs, 670 +smulwtnv, 670 +smusd, 672 +smusdeq, 672 +smusdne, 672 +smusdmi, 672 +smusdpl, 672 +smusdvs, 672 +smusdvc, 672 +smusdhi, 672 +smusdls, 672 +smusdge, 672 +smusdlt, 672 +smusdgt, 672 +smusdle, 672 +smusdal, 672 +smusdlo, 672 +smusdcc, 672 +smusdhs, 672 +smusdcs, 672 +smusdnv, 672 +smusdx, 672 +smusdxeq, 672 +smusdxne, 672 +smusdxmi, 672 +smusdxpl, 672 +smusdxvs, 672 +smusdxvc, 672 +smusdxhi, 672 +smusdxls, 672 +smusdxge, 672 +smusdxlt, 672 +smusdxgt, 672 +smusdxle, 672 +smusdxal, 672 +smusdxlo, 672 +smusdxcc, 672 +smusdxhs, 672 +smusdxcs, 672 +smusdxnv, 672 +srsda, 1578 +srsdaeq, 1578 +srsdane, 1578 +srsdami, 1578 +srsdapl, 1578 +srsdavs, 1578 +srsdavc, 1578 +srsdahi, 1578 +srsdals, 1578 +srsdage, 1578 +srsdalt, 1578 +srsdagt, 1578 +srsdale, 1578 +srsdaal, 1578 +srsdalo, 1578 +srsdacc, 1578 +srsdahs, 1578 +srsdacs, 1578 +srsdanv, 1578 +srsdb, 1578 +srsdbeq, 1578 +srsdbne, 1578 +srsdbmi, 1578 +srsdbpl, 1578 +srsdbvs, 1578 +srsdbvc, 1578 +srsdbhi, 1578 +srsdbls, 1578 +srsdbge, 1578 +srsdblt, 1578 +srsdbgt, 1578 +srsdble, 1578 +srsdbal, 1578 +srsdblo, 1578 +srsdbcc, 1578 +srsdbhs, 1578 +srsdbcs, 1578 +srsdbnv, 1578 +srsia, 1578 +srsiaeq, 1578 +srsiane, 1578 +srsiami, 1578 +srsiapl, 1578 +srsiavs, 1578 +srsiavc, 1578 +srsiahi, 1578 +srsials, 1578 +srsiage, 1578 +srsialt, 1578 +srsiagt, 1578 +srsiale, 1578 +srsiaal, 1578 +srsialo, 1578 +srsiacc, 1578 +srsiahs, 1578 +srsiacs, 1578 +srsianv, 1578 +srsib, 1578 +srsibeq, 1578 +srsibne, 1578 +srsibmi, 1578 +srsibpl, 1578 +srsibvs, 1578 +srsibvc, 1578 +srsibhi, 1578 +srsibls, 1578 +srsibge, 1578 +srsiblt, 1578 +srsibgt, 1578 +srsible, 1578 +srsibal, 1578 +srsiblo, 1578 +srsibcc, 1578 +srsibhs, 1578 +srsibcs, 1578 +srsibnv, 1578 +ssat, 674 +ssateq, 674 +ssatne, 674 +ssatmi, 674 +ssatpl, 674 +ssatvs, 674 +ssatvc, 674 +ssathi, 674 +ssatls, 674 +ssatge, 674 +ssatlt, 674 +ssatgt, 674 +ssatle, 674 +ssatal, 674 +ssatlo, 674 +ssatcc, 674 +ssaths, 674 +ssatcs, 674 +ssatnv, 674 +ssat16, 676 +ssateq16, 676 +ssatne16, 676 +ssatmi16, 676 +ssatpl16, 676 +ssatvs16, 676 +ssatvc16, 676 +ssathi16, 676 +ssatls16, 676 +ssatge16, 676 +ssatlt16, 676 +ssatgt16, 676 +ssatle16, 676 +ssatal16, 676 +ssatlo16, 676 +ssatcc16, 676 +ssaths16, 676 +ssatcs16, 676 +ssatnv16, 676 +ssubaddx,678 +ssubaddxeq, 678 +ssubaddxne, 678 +ssubaddxmi, 678 +ssubaddxpl, 678 +ssubaddxvs, 678 +ssubaddxvc, 678 +ssubaddxhi, 678 +ssubaddxls, 678 +ssubaddxge, 678 +ssubaddxlt, 678 +ssubaddxgt, 678 +ssubaddxle, 678 +ssubaddxal, 678 +ssubaddxlo, 678 +ssubaddxcc, 678 +ssubaddxhs, 678 +ssubaddxcs, 678 +ssubaddxnv, 678 +ssax, 678 +ssaxeq, 678 +ssaxne, 678 +ssaxmi, 678 +ssaxpl, 678 +ssaxvs, 678 +ssaxvc, 678 +ssaxhi, 678 +ssaxls, 678 +ssaxge, 678 +ssaxlt, 678 +ssaxgt, 678 +ssaxle, 678 +ssaxal, 678 +ssaxlo, 678 +ssaxcc, 678 +ssaxhs, 678 +ssaxcs, 678 +ssaxnv, 678 +ssub16, 680 +ssub16eq, 680 +ssub16ne, 680 +ssub16mi, 680 +ssub16pl, 680 +ssub16vs, 680 +ssub16vc, 680 +ssub16hi, 680 +ssub16ls, 680 +ssub16ge, 680 +ssub16lt, 680 +ssub16gt, 680 +ssub16le, 680 +ssub16al, 680 +ssub16lo, 680 +ssub16cc, 680 +ssub16hs, 680 +ssub16cs, 680 +ssub16nv, 680 +ssub8, 682 +ssub8eq, 682 +ssub8ne, 682 +ssub8mi, 682 +ssub8pl, 682 +ssub8vs, 682 +ssub8vc, 682 +ssub8hi, 682 +ssub8ls, 682 +ssub8ge, 682 +ssub8lt, 682 +ssub8gt, 682 +ssub8le, 682 +ssub8al, 682 +ssub8lo, 682 +ssub8cc, 682 +ssub8hs, 682 +ssub8cs, 682 +ssub8nv, 682 +stc, 684 +stceq, 684 +stcne, 684 +stcmi, 684 +stcpl, 684 +stcvs, 684 +stcvc, 684 +stchi, 684 +stcls, 684 +stcge, 684 +stclt, 684 +stcgt, 684 +stcle, 684 +stcal, 684 +stclo, 684 +stccc, 684 +stchs, 684 +stccs, 684 +stcnv, 684 +stcl, 684 +stceql, 684 +stcnel, 684 +stcmil, 684 +stcpll, 684 +stcvsl, 684 +stcvcl, 684 +stchil, 684 +stclsl, 684 +stcgel, 684 +stcltl, 684 +stcgtl, 684 +stclel, 684 +stcall, 684 +stclol, 684 +stcccl, 684 +stchsl, 684 +stccsl, 684 +stcnvl, 684 +stcleq, 684 +stclne, 684 +stclmi, 684 +stclpl, 684 +stclvs, 684 +stclvc, 684 +stclhi, 684 +stclls, 684 +stclge, 684 +stcllt, 684 +stclgt, 684 +stclle, 684 +stclal, 684 +stcllo, 684 +stclcc, 684 +stclhs, 684 +stclcs, 684 +stclnv, 684 +stc2, 684 +stc2eq, 684 +stc2ne, 684 +stc2mi, 684 +stc2pl, 684 +stc2vs, 684 +stc2vc, 684 +stc2hi, 684 +stc2ls, 684 +stc2ge, 684 +stc2lt, 684 +stc2gt, 684 +stc2le, 684 +stc2al, 684 +stc2lo, 684 +stc2cc, 684 +stc2hs, 684 +stc2cs, 684 +stc2nv, 684 +stc2l, 684 +stc2leq, 684 +stc2lne, 684 +stc2lmi, 684 +stc2lpl, 684 +stc2lvs, 684 +stc2lvc, 684 +stc2lhi, 684 +stc2lls, 684 +stc2lge, 684 +stc2llt, 684 +stc2lgt, 684 +stc2lle, 684 +stc2lal, 684 +stc2llo, 684 +stc2lcc, 684 +stc2lhs, 684 +stc2lcs, 684 +stc2lnv, 684 +stm, 686 +stmeq, 686 +stmne, 686 +stmmi, 686 +stmpl, 686 +stmvs, 686 +stmvc, 686 +stmhi, 686 +stmls, 686 +stmge, 686 +stmlt, 686 +stmgt, 686 +stmle, 686 +stmal, 686 +stmlo, 686 +stmcc, 686 +stmhs, 686 +stmcs, 686 +stmnv, 686 +stmia, 686 +stmeqia, 686 +stmneia, 686 +stmmiia, 686 +stmplia, 686 +stmvsia, 686 +stmvcia, 686 +stmhiia, 686 +stmlsia, 686 +stmgeia, 686 +stmltia, 686 +stmgtia, 686 +stmleia, 686 +stmalia, 686 +stmloia, 686 +stmccia, 686 +stmhsia, 686 +stmcsia, 686 +stmnvia, 686 +stmea, 686 +stmeqea, 686 +stmneea, 686 +stmmiea, 686 +stmplea, 686 +stmvsea, 686 +stmvcea, 686 +stmhiea, 686 +stmlsea, 686 +stmgeea, 686 +stmltea, 686 +stmgtea, 686 +stmleea, 686 +stmalea, 686 +stmloea, 686 +stmccea, 686 +stmhsea, 686 +stmcsea, 686 +stmnvea, 686 +stmib, 692 +stmeqib, 692 +stmneib, 692 +stmmiib, 692 +stmplib, 692 +stmvsib, 692 +stmvcib, 692 +stmhiib, 692 +stmlsib, 692 +stmgeib, 692 +stmltib, 692 +stmgtib, 692 +stmleib, 692 +stmalib, 692 +stmloib, 692 +stmccib, 692 +stmhsib, 692 +stmcsib, 692 +stmnvib, 692 +stmibeq,692 +stmibne, 692 +stmibmi, 692 +stmibpl, 692 +stmibvs, 692 +stmibvc, 692 +stmibhi, 692 +stmibls, 692 +stmibge, 692 +stmiblt, 692 +stmibgt, 692 +stmible, 692 +stmibal, 692 +stmiblo, 692 +stmibcc, 692 +stmibhs, 692 +stmibcs, 692 +stmibnv, 692 +stmeqda,692 +stmnefa, 692 +stmmifa, 692 +stmplfa, 692 +stmvsfa, 692 +stmvcfa, 692 +stmhifa, 692 +stmlsfa, 692 +stmgefa, 692 +stmltfa, 692 +stmgtfa, 692 +stmlefa, 692 +stmalfa, 692 +stmlofa, 692 +stmccfa, 692 +stmhsfa, 692 +stmcsfa, 692 +stmnvfa, 692 +stmda, 688 +stmeqda, 688 +stmneda, 688 +stmmida, 688 +stmplda, 688 +stmvsda, 688 +stmvcda, 688 +stmhida, 688 +stmlsda, 688 +stmgeda, 688 +stmltda, 688 +stmgtda, 688 +stmleda, 688 +stmalda, 688 +stmloda, 688 +stmccda, 688 +stmhsda, 688 +stmcsda, 688 +stmnvda, 688 +stmdaeq,688 +stmdane, 688 +stmdami, 688 +stmdapl, 688 +stmdavs, 688 +stmdavc, 688 +stmdahi, 688 +stmdals, 688 +stmdage, 688 +stmdalt, 688 +stmdagt, 688 +stmdale, 688 +stmdaal, 688 +stmdalo, 688 +stmdacc, 688 +stmdahs, 688 +stmdacs, 688 +stmdanv, 688 +stmeqed,688 +stmneed, 688 +stmmied, 688 +stmpled, 688 +stmvsed, 688 +stmvced, 688 +stmhied, 688 +stmlsed, 688 +stmgeed, 688 +stmlted, 688 +stmgted, 688 +stmleed, 688 +stmaled, 688 +stmloed, 688 +stmcced, 688 +stmhsed, 688 +stmcsed, 688 +stmnved, 688 +stmdb, 690 +stmeqdb, 690 +stmnedb, 690 +stmmidb, 690 +stmpldb, 690 +stmvsdb, 690 +stmvcdb, 690 +stmhidb, 690 +stmlsdb, 690 +stmgedb, 690 +stmltdb, 690 +stmgtdb, 690 +stmledb, 690 +stmaldb, 690 +stmlodb, 690 +stmccdb, 690 +stmhsdb, 690 +stmcsdb, 690 +stmnvdb, 690 +stmeqfd,690 +stmnefd, 690 +stmmifd, 690 +stmplfd, 690 +stmvsfd, 690 +stmvcfd, 690 +stmhifd, 690 +stmlsfd, 690 +stmgefd, 690 +stmltfd, 690 +stmgtfd, 690 +stmlefd, 690 +stmalfd, 690 +stmlofd, 690 +stmccfd, 690 +stmhsfd, 690 +stmcsfd, 690 +stmnvfd, 690 +stmdbeq,690 +stmdbne, 690 +stmdbmi, 690 +stmdbpl, 690 +stmdbvs, 690 +stmdbvc, 690 +stmdbhi, 690 +stmdbls, 690 +stmdbge, 690 +stmdblt, 690 +stmdbgt, 690 +stmdble, 690 +stmdbal, 690 +stmdblo, 690 +stmdbcc, 690 +stmdbhs, 690 +stmdbcs, 690 +stmdbnv, 690 +str, 694 +streq, 694 +strne, 694 +strmi, 694 +strpl, 694 +strvs, 694 +strvc, 694 +strhi, 694 +strls, 694 +strge, 694 +strlt, 694 +strgt, 694 +strle, 694 +stral, 694 +strlo, 694 +strcc, 694 +strhs, 694 +strcs, 694 +strnv, 694 +strb, 700 +streqb, 700 +strneb, 700 +strmib, 700 +strplb, 700 +strvsb, 700 +strvcb, 700 +strhib, 700 +strlsb, 700 +strgeb, 700 +strltb, 700 +strgtb, 700 +strleb, 700 +stralb, 700 +strlob, 700 +strccb, 700 +strhsb, 700 +strcsb, 700 +strnvb, 700 +strbeq, 700 +strbne, 700 +strbmi, 700 +strbpl, 700 +strbvs, 700 +strbvc, 700 +strbhi, 700 +strbls, 700 +strbge, 700 +strblt, 700 +strbgt, 700 +strble, 700 +strbal, 700 +strblo, 700 +strbcc, 700 +strbhs, 700 +strbcs, 700 +strbnv, 700 +strbt, 706 +streqbt, 706 +strnebt, 706 +strmibt, 706 +strplbt, 706 +strvsbt, 706 +strvcbt, 706 +strhibt, 706 +strlsbt, 706 +strgebt, 706 +strltbt, 706 +strgtbt, 706 +strlebt, 706 +stralbt, 706 +strlobt, 706 +strccbt, 706 +strhsbt, 706 +strcsbt, 706 +strnvbt, 706 +strbteq 706 +strbtne, 706 +strbtmi, 706 +strbtpl, 706 +strbtvs, 706 +strbtvc, 706 +strbthi, 706 +strbtls, 706 +strbtge, 706 +strbtlt, 706 +strbtgt, 706 +strbtle, 706 +strbtal, 706 +strbtlo, 706 +strbtcc, 706 +strbths, 706 +strbtcs, 706 +strbtnv, 706 +strd, 708 +streqd, 708 +strned, 708 +strmid, 708 +strpld, 708 +strvsd, 708 +strvcd, 708 +strhid, 708 +strlsd, 708 +strged, 708 +strltd, 708 +strgtd, 708 +strled, 708 +strald, 708 +strlod, 708 +strccd, 708 +strhsd, 708 +strcsd, 708 +strnvd, 708 +strdeq, 708 +strdne, 708 +strdmi, 708 +strdpl, 708 +strdvs, 708 +strdvc, 708 +strdhi, 708 +strdls, 708 +strdge, 708 +strdlt, 708 +strdgt, 708 +strdle, 708 +strdal, 708 +strdlo, 708 +strdcc, 708 +strdhs, 708 +strdcs, 708 +strdnv, 708 +strex, 712 +strexeq, 712 +strexne, 712 +strexmi, 712 +strexpl, 712 +strexvs, 712 +strexvc, 712 +strexhi, 712 +strexls, 712 +strexge, 712 +strexlt, 712 +strexgt, 712 +strexle, 712 +strexal, 712 +strexlo, 712 +strexcc, 712 +strexhs, 712 +strexcs, 712 +strexnv, 712 +strexb, 714 +strexbeq, 714 +strexbne, 714 +strexbmi, 714 +strexbpl, 714 +strexbvs, 714 +strexbvc, 714 +strexbhi, 714 +strexbls, 714 +strexbge, 714 +strexblt, 714 +strexbgt, 714 +strexble, 714 +strexbal, 714 +strexblo, 714 +strexbcc, 714 +strexbhs, 714 +strexbcs, 714 +strexbnv, 714 +strexd, 716 +strexdeq, 716 +strexdne, 716 +strexdmi, 716 +strexdpl, 716 +strexdvs, 716 +strexdvc, 716 +strexdhi, 716 +strexdls, 716 +strexdge, 716 +strexdlt, 716 +strexdgt, 716 +strexdle, 716 +strexdal, 716 +strexdlo, 716 +strexdcc, 716 +strexdhs, 716 +strexdcs, 716 +strexdnv, 716 +strexh, 718 +strexheq, 718 +strexhne, 718 +strexhmi, 718 +strexhpl, 718 +strexhvs, 718 +strexhvc, 718 +strexhhi, 718 +strexhls, 718 +strexhge, 718 +strexhlt, 718 +strexhgt, 718 +strexhle, 718 +strexhal, 718 +strexhlo, 718 +strexhcc, 718 +strexhhs, 718 +strexhcs, 718 +strexhnv, 718 +strh, 720 +streqh, 720 +strneh, 720 +strmih, 720 +strplh, 720 +strvsh, 720 +strvch, 720 +strhih, 720 +strlsh, 720 +strgeh, 720 +strlth, 720 +strgth, 720 +strleh, 720 +stralh, 720 +strloh, 720 +strcch, 720 +strhsh, 720 +strcsh, 720 +strnvh, 720 +strheq, 720 +strhne, 720 +strhmi, 720 +strhpl, 720 +strhvs, 720 +strhvc, 720 +strhhi, 720 +strhls, 720 +strhge, 720 +strhlt, 720 +strhgt, 720 +strhle, 720 +strhal, 720 +strhlo, 720 +strhcc, 720 +strhhs, 720 +strhcs, 720 +strhnv, 720 +strht, 726 +strhteq, 726 +strhtne, 726 +strhtmi, 726 +strhtpl, 726 +strhtvs, 726 +strhtvc, 726 +strhthi, 726 +strhtls, 726 +strhtge, 726 +strhlt, 726 +strhtgt, 726 +strhtle, 726 +strhtal, 726 +strhtlo, 726 +strhtcc, 726 +strhths, 726 +strhtcs, 726 +strhtnv, 726 +strt, 728 +streqt, 728 +strnet, 728 +strmit, 728 +strplt, 728 +strvst, 728 +strvct, 728 +strhit, 728 +strlst, 728 +strget, 728 +strltt, 728 +strgtt, 728 +strlet, 728 +stralt, 728 +strlot, 728 +strcct, 728 +strhst, 728 +strcst, 728 +strnvt, 728 +strteq, 728 +strtne, 728 +strtmi, 728 +strtpl, 728 +strtvs, 728 +strtvc, 728 +strthi, 728 +strtls, 728 +strtge, 728 +strtlt, 728 +strtgt, 728 +strtle, 728 +strtal, 728 +strtlo, 728 +strtcc, 728 +strths, 728 +strtcs, 728 +strtnv, 728 +sub, 730 +subeq, 730 +subne, 730 +submi, 730 +subpl, 730 +subvs, 730 +subvc, 730 +subhi, 730 +subls, 730 +subge, 730 +sublt, 730 +subgt, 730 +suble, 730 +subal, 730 +sublo, 730 +subcc, 730 +subhs, 730 +subcs, 730 +subnv, 730 +subs, 730 +subeqs, 730 +subnes, 730 +submis, 730 +subpls, 730 +subvss, 730 +subvcs, 730 +subhis, 730 +sublss, 730 +subges, 730 +sublts, 730 +subgts, 730 +subles, 730 +subals, 730 +sublos, 730 +subccs, 730 +subhss, 730 +subcss, 730 +subnvs, 730 +subseq, 730 +subsne, 730 +subsmi, 730 +subspl, 730 +subsvs, 730 +subsvc, 730 +subshi, 730 +subsls, 730 +subsge, 730 +subslt, 730 +subsgt, 730 +subsle, 730 +subsal, 730 +subslo, 730 +subscc, 730 +subshs, 730 +subscs, 730 +subsnv, 730 +svc, 742 +svceq, 742 +svcne, 742 +svcmi, 742 +svcpl, 742 +svcvs, 742 +svcvc, 742 +svchi, 742 +svcls, 742 +svcge, 742 +svclt, 742 +svcgt, 742 +svcle, 742 +svcal, 742 +svclo, 742 +svccc, 742 +svchs, 742 +svccs, 742 +svcnv, 742 +swi, 742 +swieq, 742 +swine, 742 +swimi, 742 +swipl, 742 +swivs, 742 +swivc, 742 +swihi, 742 +swils, 742 +swige, 742 +swilt, 742 +swigt, 742 +swile, 742 +swial, 742 +swilo, 742 +swicc, 742 +swihs, 742 +swics, 742 +swinv, 742 +swp, 744 +swpeq, 744 +swpne, 744 +swpmi, 744 +swppl, 744 +swpvs, 744 +swpvc, 744 +swphi, 744 +swpls, 744 +swpge, 744 +swplt, 744 +swpgt, 744 +swple, 744 +swpal, 744 +swplo, 744 +swpcc, 744 +swphs, 744 +swpcs, 744 +swpnv, 744 +swpb, 744 +swpeqb, 744 +swpneb, 744 +swpmib, 744 +swpplb, 744 +swpvsb, 744 +swpvcb, 744 +swphib, 744 +swplsb, 744 +swpgeb, 744 +swpltb, 744 +swpgtb, 744 +swpleb, 744 +swpalb, 744 +swplob, 744 +swpccb, 744 +swphsb, 744 +swpcsb, 744 +swpnvb, 744 +sxtab, 746 +sxtabeq, 746 +sxtabne, 746 +sxtabmi, 746 +sxtabpl, 746 +sxtabvs, 746 +sxtabvc, 746 +sxtabhi, 746 +sxtabls, 746 +sxtabge, 746 +sxtablt, 746 +sxtabgt, 746 +sxtable, 746 +sxtabal, 746 +sxtablo, 746 +sxtabcc, 746 +sxtabhs, 746 +sxtabcs, 746 +sxtabnv, 746 +sxtab16, 748 +sxtab16eq, 748 +sxtab16ne, 748 +sxtab16mi, 748 +sxtab16pl, 748 +sxtab16vs, 748 +sxtab16vc, 748 +sxtab16hi, 748 +sxtab16ls, 748 +sxtab16ge, 748 +sxtab16lt, 748 +sxtab16gt, 748 +sxtab16le, 748 +sxtab6al, 748 +sxtab6lo, 748 +sxtab16cc, 748 +sxtab16hs, 748 +sxtab16cs, 748 +sxtab16nv, 748 +sxtah, 750 +sxtaheq, 750 +sxtahne, 750 +sxtahmi, 750 +sxtahpl, 750 +sxtahvs, 750 +sxtahvc, 750 +sxtahhi, 750 +sxtahls, 750 +sxtahge, 750 +sxtahlt, 750 +sxtahgt, 750 +sxtahle, 750 +sxtahal, 750 +sxtahlo, 750 +sxtahcc, 750 +sxtahhs, 750 +sxtahcs, 750 +sxtahnv, 750 +sxtb, 752 +sxtbeq, 752 +sxtbne, 752 +sxtbmi, 752 +sxtbpl, 752 +sxtbvs, 752 +sxtbvc, 752 +sxtbhi, 752 +sxtbls, 752 +sxtbge, 752 +sxtblt, 752 +sxtbgt, 752 +sxtble, 752 +sxtbal, 752 +sxtblo, 752 +sxtbcc, 752 +sxtbhs, 752 +sxtbcs, 752 +sxtbnv, 752 +sxtb16, 754 +sxtb16eq, 754 +sxtb16ne, 754 +sxtb16mi, 754 +sxtb16pl, 754 +sxtb16vs, 754 +sxtb16vc, 754 +sxtb16hi, 754 +sxtb16ls, 754 +sxtb16ge, 754 +sxtb16lt, 754 +sxtb16gt, 754 +sxtb16le, 754 +sxtb16al, 754 +sxtb16lo, 754 +sxtb16cc, 754 +sxtb16hs, 754 +sxtb16cs, 754 +sxtb16nv, 754 +sxth, 756 +sxtheq, 756 +sxthne, 756 +sxthmi, 756 +sxthpl, 756 +sxthvs, 756 +sxthvc, 756 +sxthhi, 756 +sxthls, 756 +sxthge, 756 +sxthlt, 756 +sxthgt, 756 +sxthle, 756 +sxthal, 756 +sxthlo, 756 +sxthcc, 756 +sxthhs, 756 +sxthcs, 756 +sxthnv, 756 +tbb, 758 +tbbeq, 758 +tbbne, 758 +tbbmi, 758 +tbbpl, 758 +tbbvs, 758 +tbbvc, 758 +tbbhi, 758 +tbbls, 758 +tbbge, 758 +tbblt, 758 +tbbgt, 758 +tbble, 758 +tbbal, 758 +tbblo, 758 +tbbcc, 758 +tbbhs, 758 +tbbcs, 758 +tbbnv, 758 +tbh, 758 +tbheq, 758 +tbhne, 758 +tbhmi, 758 +tbhpl, 758 +tbhvs, 758 +tbhvc, 758 +tbhhi, 758 +tbhls, 758 +tbhge, 758 +tbhlt, 758 +tbhgt, 758 +tbhle, 758 +tbhal, 758 +tbhlo, 758 +tbhcc, 758 +tbhhs, 758 +tbhcs, 758 +tbhnv, 758 +teq, 760 +teqeq, 760 +teqne, 760 +teqmi, 760 +teqpl, 760 +teqvs, 760 +teqvc, 760 +teqhi, 760 +teqls, 760 +teqge, 760 +teqlt, 760 +teqgt, 760 +teqle, 760 +teqal, 760 +teqlo, 760 +teqcc, 760 +teqhs, 760 +teqcs, 760 +teqnv, 760 +tst, 766 +tsteq, 766 +tstne, 766 +tstmi, 766 +tstpl, 766 +tstvs, 766 +tstvc, 766 +tsthi, 766 +tstls, 766 +tstge, 766 +tstlt, 766 +tstgt, 766 +tstle, 766 +tstal, 766 +tstlo, 766 +tstcc, 766 +tsths, 766 +tstcs, 766 +tstnv, 766 +uadd16, 772 +uadd16eq, 772 +uadd16ne, 772 +uadd16mi, 772 +uadd16pl, 772 +uadd16vs, 772 +uadd16vc, 772 +uadd16hi, 772 +uadd16ls, 772 +uadd16ge, 772 +uadd16lt, 772 +uadd16gt, 772 +uadd16le, 772 +uadd16al, 772 +uadd16lo, 772 +uadd16cc, 772 +uadd16hs, 772 +uadd16cs, 772 +uadd16nv, 772 +uadd8, 774 +uadd8eq, 774 +uadd8ne, 774 +uadd8mi, 774 +uadd8pl, 774 +uadd8vs, 774 +uadd8vc, 774 +uadd8hi, 774 +uadd8ls, 774 +uadd8ge, 774 +uadd8lt, 774 +uadd8gt, 774 +uadd8le, 774 +uadd8al, 774 +uadd8lo, 774 +uadd8cc, 774 +uadd8hs, 774 +uadd8cs, 774 +uadd8nv, 774 +uaddsubx, 776 +uaddsubxeq, 776 +uaddsubxne, 776 +uaddsubxmi, 776 +uaddsubxpl, 776 +uaddsubxvs, 776 +uaddsubxvc, 776 +uaddsubxhi, 776 +uaddsubxls, 776 +uaddsubxge, 776 +uaddsubxlt, 776 +uaddsubxgt, 776 +uaddsubxle, 776 +uaddsubxal, 776 +uaddsubxlo, 776 +uaddsubxcc, 776 +uaddsubxhs, 776 +uaddsubxcs, 776 +uaddsubxnv, 776 +uasx, 776 +uasxeq, 776 +uasxne, 776 +uasxmi, 776 +uasxpl, 776 +uasxvs, 776 +uasxvc, 776 +uasxhi, 776 +uasxls, 776 +uasxge, 776 +uasxlt, 776 +uasxgt, 776 +uasxle, 776 +uasxal, 776 +uasxlo, 776 +uasxcc, 776 +uasxhs, 776 +uasxcs, 776 +uasxnv, 776 +ubfx, 778 +ubfxeq, 778 +ubfxne, 778 +ubfxmi, 778 +ubfxpl, 778 +ubfxvs, 778 +ubfxvc, 778 +ubfxhi, 778 +ubfxls, 778 +ubfxge, 778 +ubfxlt, 778 +ubfxgt, 778 +ubfxle, 778 +ubfxal, 778 +ubfxlo, 778 +ubfxcc, 778 +ubfxhs, 778 +ubfxcs, 778 +ubfxnv, 778 +udiv, 780 +udiveq, 780 +udivne, 780 +udivmi, 780 +udivpl, 780 +udivvs, 780 +udivvc, 780 +udivhi, 780 +udivls, 780 +udivge, 780 +udivlt, 780 +udivgt, 780 +udivle, 780 +udival, 780 +udivlo, 780 +udivcc, 780 +udivhs, 780 +udivcs, 780 +udivnv, 780 +uhadd16, 782 +uhadd16eq, 782 +uhadd16ne, 782 +uhadd16mi, 782 +uhadd16pl, 782 +uhadd16vs, 782 +uhadd16vc, 782 +uhadd16hi, 782 +uhadd16ls, 782 +uhadd16ge, 782 +uhadd16lt, 782 +uhadd16gt, 782 +uhadd16le, 782 +uhadd16al, 782 +uhadd16lo, 782 +uhadd16cc, 782 +uhadd16hs, 782 +uhadd16cs, 782 +uhadd16nv, 782 +uhadd8, 784 +uhadd8eq, 784 +uhadd8ne, 784 +uhadd8mi, 784 +uhadd8pl, 784 +uhadd8vs, 784 +uhadd8vc, 784 +uhadd8hi, 784 +uhadd8ls, 784 +uhadd8ge, 784 +uhadd8lt, 784 +uhadd8gt, 784 +uhadd8le, 784 +uhadd8al, 784 +uhadd8lo, 784 +uhadd8cc, 784 +uhadd8hs, 784 +uhadd8cs, 784 +uhadd8nv, 784 +uhaddsubx, 786 +uhaddsubxeq, 786 +uhaddsubxne, 786 +uhaddsubxmi, 786 +uhaddsubxpl, 786 +uhaddsubxvs, 786 +uhaddsubxvc, 786 +uhaddsubxhi, 786 +uhaddsubxls, 786 +uhaddsubxge, 786 +uhaddsubxlt, 786 +uhaddsubxgt, 786 +uhaddsubxle, 786 +uhaddsubxal, 786 +uhaddsubxlo, 786 +uhaddsubxcc, 786 +uhaddsubxhs, 786 +uhaddsubxcs, 786 +uhaddsubxnv, 786 +uhasx, 786 +uhasxeq, 786 +uhasxne, 786 +uhasxmi, 786 +uhasxpl, 786 +uhasxvs, 786 +uhasxvc, 786 +uhasxhi, 786 +uhasxls, 786 +uhasxge, 786 +uhasxlt, 786 +uhasxgt, 786 +uhasxle, 786 +uhasxal, 786 +uhasxlo, 786 +uhasxcc, 786 +uhasxhs, 786 +uhasxcs, 786 +uhasxnv, 786 +uhsubaddx,788 +uhsubaddxeq, 788 +uhsubaddxne, 788 +uhsubaddxmi, 788 +uhsubaddxpl, 788 +uhsubaddxvs, 788 +uhsubaddxvc, 788 +uhsubaddxhi, 788 +uhsubaddxls, 788 +uhsubaddxge, 788 +uhsubaddxlt, 788 +uhsubaddxgt, 788 +uhsubaddxle, 788 +uhsubaddxal, 788 +uhsubaddxlo, 788 +uhsubaddxcc, 788 +uhsubaddxhs, 788 +uhsubaddxcs, 788 +uhsubaddxnv, 788 +uhsax, 788 +uhsaxeq, 788 +uhsaxne, 788 +uhsaxmi, 788 +uhsaxpl, 788 +uhsaxvs, 788 +uhsaxvc, 788 +uhsaxhi, 788 +uhsaxls, 788 +uhsaxge, 788 +uhsaxlt, 788 +uhsaxgt, 788 +uhsaxle, 788 +uhsaxal, 788 +uhsaxlo, 788 +uhsaxcc, 788 +uhsaxhs, 788 +uhsaxcs, 788 +uhsaxnv, 788 +uhsub16, 790 +uhsub16eq, 790 +uhsub16ne, 790 +uhsub16mi, 790 +uhsub16pl, 790 +uhsub16vs, 790 +uhsub16vc, 790 +uhsub16hi, 790 +uhsub16ls, 790 +uhsub16ge, 790 +uhsub16lt, 790 +uhsub16gt, 790 +uhsub16le, 790 +uhsub16al, 790 +uhsub16lo, 790 +uhsub16cc, 790 +uhsub16hs, 790 +uhsub16cs, 790 +uhsub16nv, 790 +uhsub8, 792 +uhsub8eq, 792 +uhsub8ne, 792 +uhsub8mi, 792 +uhsub8pl, 792 +uhsub8vs, 792 +uhsub8vc, 792 +uhsub8hi, 792 +uhsub8ls, 792 +uhsub8ge, 792 +uhsub8lt, 792 +uhsub8gt, 792 +uhsub8le, 792 +uhsub8al, 792 +uhsub8lo, 792 +uhsub8cc, 792 +uhsub8hs, 792 +uhsub8cs, 792 +uhsub8nv, 792 +umaal, 794 +umaaleq, 794 +umaalne, 794 +umaalmi, 794 +umaalpl, 794 +umaalvs, 794 +umaalvc, 794 +umaalhi, 794 +umaalls, 794 +umaalge, 794 +umaallt, 794 +umaalgt, 794 +umaalle, 794 +umaalal, 794 +umaallo, 794 +umaalcc, 794 +umaalhs, 794 +umaalcs, 794 +umaalnv, 794 +umlal, 796 +umlaleq, 796 +umlalne, 796 +umlalmi, 796 +umlalpl, 796 +umlalvs, 796 +umlalvc, 796 +umlalhi, 796 +umlalls, 796 +umlalge, 796 +umlallt, 796 +umlalgt, 796 +umlalle, 796 +umlalal, 796 +umlallo, 796 +umlalcc, 796 +umlalhs, 796 +umlalcs, 796 +umlalnv, 796 +umlals, 796 +umlaleqs, 796 +umlalnes, 796 +umlalmis, 796 +umlalpls, 796 +umlalvss, 796 +umlalvcs, 796 +umlalhis, 796 +umlallss, 796 +umlalges, 796 +umlallts, 796 +umlalgts, 796 +umlalles, 796 +umlalals, 796 +umlallos, 796 +umlalccs, 796 +umlalhss, 796 +umlalcss, 796 +umlalnvs, 796 +umlals, 796 +umlalseq, 796 +umlalsne, 796 +umlalsmi, 796 +umlalspl, 796 +umlalsvs, 796 +umlalsvc, 796 +umlalshi, 796 +umlalsls, 796 +umlalsge, 796 +umlalslt, 796 +umlalsgt, 796 +umlalsle, 796 +umlalsal, 796 +umlalslo, 796 +umlalscc, 796 +umlalshs, 796 +umlalscs, 796 +umlalsnv, 796 +umull, 798 +umulleq, 798 +umullne, 798 +umullmi, 798 +umullpl, 798 +umullvs, 798 +umullvc, 798 +umullhi, 798 +umullls, 798 +umullge, 798 +umulllt, 798 +umullgt, 798 +umullle, 798 +umullal, 798 +umulllo, 798 +umullcc, 798 +umullhs, 798 +umullcs, 798 +umullnv, 798 +umulls, 798 +umulleqs, 798 +umullnes, 798 +umullmis, 798 +umullpls, 798 +umullvss, 798 +umullvcs, 798 +umullhis, 798 +umulllss, 798 +umullges, 798 +umulllts, 798 +umullgts, 798 +umullles, 798 +umullals, 798 +umulllos, 798 +umullccs, 798 +umullhss, 798 +umullcss, 798 +umullnvs, 798 +umulls, 798 +umullseq, 798 +umullsne, 798 +umullsmi, 798 +umullspl, 798 +umullsvs, 798 +umullsvc, 798 +umullshi, 798 +umullsls, 798 +umullsge, 798 +umullslt, 798 +umullsgt, 798 +umullsle, 798 +umullsal, 798 +umullslo, 798 +umullscc, 798 +umullshs, 798 +umullscs, 798 +umullsnv, 798 +uqadd16, 800 +uqadd16eq, 800 +uqadd16ne, 800 +uqadd16mi, 800 +uqadd16pl, 800 +uqadd16vs, 800 +uqadd16vc, 800 +uqadd16hi, 800 +uqadd16ls, 800 +uqadd16ge, 800 +uqadd16lt, 800 +uqadd16gt, 800 +uqadd16le, 800 +uqadd16al, 800 +uqadd16lo, 800 +uqadd16cc, 800 +uqadd16hs, 800 +uqadd16cs, 800 +uqadd16nv, 800 +uqadd8, 802 +uqadd8eq, 802 +uqadd8ne, 802 +uqadd8mi, 802 +uqadd8pl, 802 +uqadd8vs, 802 +uqadd8vc, 802 +uqadd8hi, 802 +uqadd8ls, 802 +uqadd8ge, 802 +uqadd8lt, 802 +uqadd8gt, 802 +uqadd8le, 802 +uqadd8al, 802 +uqadd8lo, 802 +uqadd8cc, 802 +uqadd8hs, 802 +uqadd8cs, 802 +uqadd8nv, 802 +uqaddsubx, 804 +uqaddsubxeq, 804 +uqaddsubxne, 804 +uqaddsubxmi, 804 +uqaddsubxpl, 804 +uqaddsubxvs, 804 +uqaddsubxvc, 804 +uqaddsubxhi, 804 +uqaddsubxls, 804 +uqaddsubxge, 804 +uqaddsubxlt, 804 +uqaddsubxgt, 804 +uqaddsubxle, 804 +uqaddsubxal, 804 +uqaddsubxlo, 804 +uqaddsubxcc, 804 +uqaddsubxhs, 804 +uqaddsubxcs, 804 +uqaddsubxnv, 804 +uqasx, 804 +uqasxeq, 804 +uqasxne, 804 +uqasxmi, 804 +uqasxpl, 804 +uqasxvs, 804 +uqasxvc, 804 +uqasxhi, 804 +uqasxls, 804 +uqasxge, 804 +uqasxlt, 804 +uqasxgt, 804 +uqasxle, 804 +uqasxal, 804 +uqasxlo, 804 +uqasxcc, 804 +uqasxhs, 804 +uqasxcs, 804 +uqasxnv, 804 +uqsubaddx, 806 +uqsubaddxeq, 806 +uqsubaddxne, 806 +uqsubaddxmi, 806 +uqsubaddxpl, 806 +uqsubaddxvs, 806 +uqsubaddxvc, 806 +uqsubaddxhi, 806 +uqsubaddxls, 806 +uqsubaddxge, 806 +uqsubaddxlt, 806 +uqsubaddxgt, 806 +uqsubaddxle, 806 +uqsubaddxal, 806 +uqsubaddxlo, 806 +uqsubaddxcc, 806 +uqsubaddxhs, 806 +uqsubaddxcs, 806 +uqsubaddxnv, 806 +uqsax, 806 +uqsaxeq, 806 +uqsaxne, 806 +uqsaxmi, 806 +uqsaxpl, 806 +uqsaxvs, 806 +uqsaxvc, 806 +uqsaxhi, 806 +uqsaxls, 806 +uqsaxge, 806 +uqsaxlt, 806 +uqsaxgt, 806 +uqsaxle, 806 +uqsaxal, 806 +uqsaxlo, 806 +uqsaxcc, 806 +uqsaxhs, 806 +uqsaxcs, 806 +uqsaxnv, 806 +uqsub16, 808 +uqsub16eq, 808 +uqsub16ne, 808 +uqsub16mi, 808 +uqsub16pl, 808 +uqsub16vs, 808 +uqsub16vc, 808 +uqsub16hi, 808 +uqsub16ls, 808 +uqsub16ge, 808 +uqsub16lt, 808 +uqsub16gt, 808 +uqsub16le, 808 +uqsub16al, 808 +uqsub16lo, 808 +uqsub16cc, 808 +uqsub16hs, 808 +uqsub16cs, 808 +uqsub16nv, 808 +uqsub8, 810 +uqsub8eq, 810 +uqsub8ne, 810 +uqsub8mi, 810 +uqsub8pl, 810 +uqsub8vs, 810 +uqsub8vc, 810 +uqsub8hi, 810 +uqsub8ls, 810 +uqsub8ge, 810 +uqsub8lt, 810 +uqsub8gt, 810 +uqsub8le, 810 +uqsub8al, 810 +uqsub8lo, 810 +uqsub8cc, 810 +uqsub8hs, 810 +uqsub8cs, 810 +uqsub8nv, 810 +usad8, 812 +usad8eq, 812 +usad8ne, 812 +usad8mi, 812 +usad8pl, 812 +usad8vs, 812 +usad8vc, 812 +usad8hi, 812 +usad8ls, 812 +usad8ge, 812 +usad8lt, 812 +usad8gt, 812 +usad8le, 812 +usad8al, 812 +usad8lo, 812 +usad8cc, 812 +usad8hs, 812 +usad8cs, 812 +usad8nv, 812 +usada8, 814 +usada8eq, 814 +usada8ne, 814 +usada8mi, 814 +usada8pl, 814 +usada8vs, 814 +usada8vc, 814 +usada8hi, 814 +usada8ls, 814 +usada8ge, 814 +usada8lt, 814 +usada8gt, 814 +usada8le, 814 +usada8al, 814 +usada8lo, 814 +usada8cc, 814 +usada8hs, 814 +usada8cs, 814 +usada8nv, 814 +usat, 816 +usateq, 816 +usatne, 816 +usatmi, 816 +usatpl, 816 +usatvs, 816 +usatvc, 816 +usathi, 816 +usatls, 816 +usatge, 816 +usatlt, 816 +usatgt, 816 +usatle, 816 +usatal, 816 +usatlo, 816 +usatcc, 816 +usaths, 816 +usatcs, 816 +usatnv, 816 +usat16, 818 +usat16eq, 818 +usat16ne, 818 +usat16mi, 818 +usat16pl, 818 +usat16vs, 818 +usat16vc, 818 +usat16hi, 818 +usat16ls, 818 +usat16ge, 818 +usat16lt, 818 +usat16gt, 818 +usat16le, 818 +usat16al, 818 +usat16lo, 818 +usat16cc, 818 +usat16hs, 818 +usat16cs, 818 +usat16nv, 818 +usubaddx, 820 +usubaddxeq, 820 +usubaddxne, 820 +usubaddxmi, 820 +usubaddxpl, 820 +usubaddxvs, 820 +usubaddxvc, 820 +usubaddxhi, 820 +usubaddxls, 820 +usubaddxge, 820 +usubaddxlt, 820 +usubaddxgt, 820 +usubaddxle, 820 +usubaddxal, 820 +usubaddxlo, 820 +usubaddxcc, 820 +usubaddxhs, 820 +usubaddxcs, 820 +usubaddxnv, 820 +usax, 820 +usaxeq, 820 +usaxne, 820 +usaxmi, 820 +usaxpl, 820 +usaxvs, 820 +usaxvc, 820 +usaxhi, 820 +usaxls, 820 +usaxge, 820 +usaxlt, 820 +usaxgt, 820 +usaxle, 820 +usaxal, 820 +usaxlo, 820 +usaxcc, 820 +usaxhs, 820 +usaxcs, 820 +usaxnv, 820 +usub16, 822 +usub16eq, 822 +usub16ne, 822 +usub16mi, 822 +usub16pl, 822 +usub16vs, 822 +usub16vc, 822 +usub16hi, 822 +usub16ls, 822 +usub16ge, 822 +usub16lt, 822 +usub16gt, 822 +usub16le, 822 +usub16al, 822 +usub16lo, 822 +usub16cc, 822 +usub16hs, 822 +usub16cs, 822 +usub16nv, 822 +usub8, 824 +usub8eq, 824 +usub8ne, 824 +usub8mi, 824 +usub8pl, 824 +usub8vs, 824 +usub8vc, 824 +usub8hi, 824 +usub8ls, 824 +usub8ge, 824 +usub8lt, 824 +usub8gt, 824 +usub8le, 824 +usub8al, 824 +usub8lo, 824 +usub8cc, 824 +usub8hs, 824 +usub8cs, 824 +usub8nv, 824 +uxtab, 826 +uxtabeq, 826 +uxtabne, 826 +uxtabmi, 826 +uxtabpl, 826 +uxtabvs, 826 +uxtabvc, 826 +uxtabhi, 826 +uxtabls, 826 +uxtabge, 826 +uxtablt, 826 +uxtabgt, 826 +uxtable, 826 +uxtabal, 826 +uxtablo, 826 +uxtabcc, 826 +uxtabhs, 826 +uxtabcs, 826 +uxtabnv, 826 +uxtab16, 828 +uxtab16eq, 828 +uxtab16ne, 828 +uxtab16mi, 828 +uxtab16pl, 828 +uxtab16vs, 828 +uxtab16vc, 828 +uxtab16hi, 828 +uxtab16ls, 828 +uxtab16ge, 828 +uxtab16lt, 828 +uxtab16gt, 828 +uxtab16le, 828 +uxtab16al, 828 +uxtab16lo, 828 +uxtab16cc, 828 +uxtab16hs, 828 +uxtab16cs, 828 +uxtab16nv, 828 +uxtah, 830 +uxtaheq, 830 +uxtahne, 830 +uxtahmi, 830 +uxtahpl, 830 +uxtahvs, 830 +uxtahvc, 830 +uxtahhi, 830 +uxtahls, 830 +uxtahge, 830 +uxtahlt, 830 +uxtahgt, 830 +uxtahle, 830 +uxtahal, 830 +uxtahlo, 830 +uxtahcc, 830 +uxtahhs, 830 +uxtahcs, 830 +uxtahnv, 830 +uxtb, 832 +uxtbeq, 832 +uxtbne, 832 +uxtbmi, 832 +uxtbpl, 832 +uxtbvs, 832 +uxtbvc, 832 +uxtbhi, 832 +uxtbls, 832 +uxtbge, 832 +uxtblt, 832 +uxtbgt, 832 +uxtble, 832 +uxtbal, 832 +uxtblo, 832 +uxtbcc, 832 +uxtbhs, 832 +uxtbcs, 832 +uxtbnv, 832 +uxtb16, 834 +uxtb16eq, 834 +uxtb16ne, 834 +uxtb16mi, 834 +uxtb16pl, 834 +uxtb16vs, 834 +uxtb16vc, 834 +uxtb16hi, 834 +uxtb16ls, 834 +uxtb16ge, 834 +uxtb16lt, 834 +uxtb16gt, 834 +uxtb16le, 834 +uxtb16al, 834 +uxtb16lo, 834 +uxtb16cc, 834 +uxtb16hs, 834 +uxtb16cs, 834 +uxtb16nv, 834 +uxth, 836 +uxtheq, 836 +uxthne, 836 +uxthmi, 836 +uxthpl, 836 +uxthvs, 836 +uxthvc, 836 +uxthhi, 836 +uxthls, 836 +uxthge, 836 +uxthlt, 836 +uxthgt, 836 +uxthle, 836 +uxthal, 836 +uxthlo, 836 +uxthcc, 836 +uxthhs, 836 +uxthcs, 836 +uxthnv, 836 +vaba, 838 +vabal, 838 +vabd, 840 +vabdl, 840 +vabs, 844 +vacge, 846 +vacgt, 846 +vacle, 846 +vaclt, 846 +vadd, 848 +vaddhn, 852 +vaddl, 854 +vaddw, 854 +vand, 856 +vbic, 858 +vbif, 862 +vbit, 862 +vbsl, 862 +vceq, 864 +vcge, 868 +vcgt, 872 +vcle, 876 +vcls, 878 +vclt, 880 +vclz, 882 +vcmp, 884 +vcmpeq, 884 +vcmpne, 884 +vcmpmi, 884 +vcmppl, 884 +vcmpvs, 884 +vcmpvc, 884 +vcmphi, 884 +vcmpls, 884 +vcmpge, 884 +vcmplt, 884 +vcmpgt, 884 +vcmple, 884 +vcmpal, 884 +vcmplo, 884 +vcmpcc, 884 +vcmphs, 884 +vcmpcs, 884 +vcmpnv, 884 +vcmpe, 884 +vcmpeeq, 884 +vcmpene, 884 +vcmpemi, 884 +vcmpepl, 884 +vcmpevs, 884 +vcmpevc, 884 +vcmpehi, 884 +vcmpels, 884 +vcmpege, 884 +vcmpelt, 884 +vcmpegt, 884 +vcmpele, 884 +vcmpeal, 884 +vcmpelo, 884 +vcmpecc, 884 +vcmpehs, 884 +vcmpecs, 884 +vcmpenv, 884 +vcnt, 886 +vcvt, 888 +vcvteq, 888 +vcvtne, 888 +vcvtmi, 888 +vcvtpl, 888 +vcvtvs, 888 +vcvtvc, 888 +vcvthi, 888 +vcvtls, 888 +vcvtge, 888 +vcvtlt, 888 +vcvtgt, 888 +vcvtle, 888 +vcvtal, 888 +vcvtlo, 888 +vcvtcc, 888 +vcvths, 888 +vcvtcs, 888 +vcvtnv, 888 +vcvtr, 890 +vcvtreq, 890 +vcvtrne, 890 +vcvtrmi, 890 +vcvtrpl, 890 +vcvtrvs, 890 +vcvtrvc, 890 +vcvtrhi, 890 +vcvtrls, 890 +vcvtrge, 890 +vcvtrlt, 890 +vcvtrgt, 890 +vcvtrle, 890 +vcvtral, 890 +vcvtrlo, 890 +vcvtrcc, 890 +vcvtrhs, 890 +vcvtrcs, 890 +vcvtrnv, 890 +vcvtb, 900 +vcvtbeq, 900 +vcvtbne, 900 +vcvtbmi, 900 +vcvtbpl, 900 +vcvtbvs, 900 +vcvtbvc, 900 +vcvtbhi, 900 +vcvtbls, 900 +vcvtbge, 900 +vcvtblt, 900 +vcvtbgt, 900 +vcvtble, 900 +vcvtbal, 900 +vcvtblo, 900 +vcvtbcc, 900 +vcvtbhs, 900 +vcvtbcs, 900 +vcvtbnv, 900 +vcvtt, 900 +vcvtteq, 900 +vcvttne, 900 +vcvttmi, 900 +vcvttpl, 900 +vcvttvs, 900 +vcvttvc, 900 +vcvtthi, 900 +vcvttls, 900 +vcvttge, 900 +vcvttlt, 900 +vcvttgt, 900 +vcvttle, 900 +vcvttal, 900 +vcvttlo, 900 +vcvttcc, 900 +vcvtths, 900 +vcvttcs, 900 +vcvttnv, 900 +vdiv, 902 +vdiveq, 902 +vdivne, 902 +vdivmi, 902 +vdivpl, 902 +vdivvs, 902 +vdivvc, 902 +vdivhi, 902 +vdivls, 902 +vdivge, 902 +vdivlt, 902 +vdivgt, 902 +vdivle, 902 +vdival, 902 +vdivlo, 902 +vdivcc, 902 +vdivhs, 902 +vdivcs, 902 +vdivnv, 902 +vdup, 904 +veor, 908 +vext, 910 +vhadd, 912 +vhsub, 912 +vld1, 914 +vld2, 920 +vld3, 926 +vld4, 932 +vldmia, 938 +vldmiaeq, 938 +vldmiane, 938 +vldmiami, 938 +vldmiapl, 938 +vldmiavs, 938 +vldmiavc, 938 +vldmiahi, 938 +vldmials, 938 +vldmiage, 938 +vldmialt, 938 +vldmiagt, 938 +vldmiale, 938 +vldmiaal, 938 +vldmialo, 938 +vldmiacc, 938 +vldmiahs, 938 +vldmiacs, 938 +vldmianv, 938 +vldmdb, 938 +vldmdbeq, 938 +vldmdbne, 938 +vldmdbmi, 938 +vldmdbpl, 938 +vldmdbvs, 938 +vldmdbvc, 938 +vldmdbhi, 938 +vldmdbls, 938 +vldmdbge, 938 +vldmdblt, 938 +vldmdbgt, 938 +vldmdble, 938 +vldmdbal, 938 +vldmdblo, 938 +vldmdbcc, 939 +vldmdbhs, 939 +vldmdbcs, 939 +vldmdbnv, 939 +vldr, 940 +vldreq, 940 +vldrne, 940 +vldrmi, 940 +vldrpl, 940 +vldrvs, 940 +vldrvc, 940 +vldrhi, 940 +vldrls, 940 +vldrge, 940 +vldrlt, 940 +vldrgt, 940 +vldrle, 940 +vldral, 940 +vldrlo, 940 +vldrcc, 940 +vldrhs, 940 +vldrcs, 940 +vldrnv, 940 +vmax, 942 +vmin, 942 +vmla, 946 +vmlal, 946 +vmls, 946 +vmlsl, 946 +vmov, 952 +vmoveq, 956 +vmovne, 956 +vmovmi, 956 +vmovpl, 956 +vmovvs, 956 +vmovvc, 956 +vmovhi, 956 +vmovls, 956 +vmovge, 956 +vmovlt, 956 +vmovgt, 956 +vmovle, 956 +vmoval, 956 +vmovlo, 956 +vmovcc, 956 +vmovhs, 956 +vmovcs, 956 +vmovnv, 956 +vmovl, 966 +vmovn, 968 +vmrs, 970 +vmrseq, 970 +vmrsne, 970 +vmrsmi, 970 +vmrspl, 970 +vmrsvs, 970 +vmrsvc, 970 +vmrshi, 970 +vmrsls, 970 +vmrsge, 970 +vmrslt, 970 +vmrsgt, 970 +vmrsle, 970 +vmrsal, 970 +vmrslo, 970 +vmrscc, 970 +vmrshs, 970 +vmrscs, 970 +vmrsnv, 970 +vmsr, 972 +vmsreq, 972 +vmsrne, 972 +vmsrmi, 972 +vmsrpl, 972 +vmsrvs, 972 +vmsrvc, 972 +vmsrhi, 972 +vmsrls, 972 +vmsrge, 972 +vmsrlt, 972 +vmsrgt, 972 +vmsrle, 972 +vmsral, 972 +vmsrlo, 972 +vmsrcc, 972 +vmsrhs, 972 +vmsrcs, 972 +vmsrnv, 972 +vmul, 974 +vmull, 974 +vmvn, 980 +vneg, 984 +vnmla, 986 +vnmlaeq, 986 +vnmlane, 986 +vnmlami, 986 +vnmlapl, 986 +vnmlavs, 986 +vnmlavc, 986 +vnmlahi, 986 +vnmlals, 986 +vnmlage, 986 +vnmlalt, 986 +vnmlagt, 986 +vnmlale, 986 +vnmlaal, 986 +vnmlalo, 986 +vnmlacc, 986 +vnmlahs, 986 +vnmlacs, 986 +vnmlanv, 986 +vnmls, 986 +vnmlseq, 986 +vnmlsne, 986 +vnmlsmi, 986 +vnmlspl, 986 +vnmlsvs, 986 +vnmlsvc, 986 +vnmlshi, 986 +vnmlsls, 986 +vnmlsge, 986 +vnmlslt, 986 +vnmlsgt, 986 +vnmlsle, 986 +vnmlsal, 986 +vnmlslo, 986 +vnmlscc, 986 +vnmlshs, 986 +vnmlscs, 986 +vnmlsnv, 986 +vnmul, 986 +vnmuleq, 986 +vnmulne, 986 +vnmulmi, 986 +vnmulpl, 986 +vnmulvs, 986 +vnmulvc, 986 +vnmulhi, 986 +vnmulls, 986 +vnmulge, 986 +vnmullt, 986 +vnmulgt, 986 +vnmulle, 986 +vnmulal, 986 +vnmullo, 986 +vnmulcc, 986 +vnmulhs, 986 +vnmulcs, 986 +vnmulnv, 986 +vorn, 988 +vorr, 990 +vpadal, 994 +vpadd, 996 +vpaddl, 1000 +vpmax, 1002 +vpmin, 1002 +vpop, 1006 +vpopeq, 1006 +vpopne, 1006 +vpopmi, 1006 +vpoppl, 1006 +vpopvs, 1006 +vpopvc, 1006 +vpophi, 1006 +vpopls, 1006 +vpopge, 1006 +vpoplt, 1006 +vpopgt, 1006 +vpople, 1006 +vpopal, 1006 +vpoplo, 1006 +vpopcc, 1006 +vpophs, 1006 +vpopcs, 1006 +vpopnv, 1006 +vpush, 1008 +vpusheq, 1008 +vpushne, 1008 +vpushmi, 1008 +vpushpl, 1008 +vpushvs, 1008 +vpushvc, 1008 +vpushhi, 1008 +vpushls, 1008 +vpushge, 1008 +vpushlt, 1008 +vpushgt, 1008 +vpushle, 1008 +vpushal, 1008 +vpushlo, 1008 +vpushcc, 1008 +vpushhs, 1008 +vpushcs, 1008 +vpushnv, 1008 +vqabs, 1010 +vqadd, 1012 +vqdmlal, 1014 +vqdmlsl, 1014 +vqdmulh, 1016 +vqdmull, 1018 +vqmovn, 1020 +vqmovun, 1020 +vqneg, 1022 +vqrdmulh, 1024 +vqrshl, 1026 +vqrshrn, 1028 +vqrshrun, 1028 +vqshl, 1030 +vqshlu, 1032 +vqshrn, 1034 +vqshrun, 1034 +vqsub, 1036 +vraddhn, 1038 +vrecpe, 1040 +vrecps, 1042 +vrev16, 1044 +vrev32, 1044 +vrev64, 1044 +vrhadd, 1046 +vrshl, 1048 +vrshr, 1050 +vrshrn, 1052 +vrsqrte, 1054 +vrsqrts, 1056 +vrsra, 1058 +vrsubhn, 1060 +vshl, 1062 +vshll, 1066 +vshr, 1068 +vshrn, 1070 +vsli, 1072 +vsqrt, 1074 +vsqrteq, 1074 +vsqrtne, 1074 +vsqrtmi, 1074 +vsqrtpl, 1074 +vsqrtvs, 1074 +vsqrtvc, 1074 +vsqrthi, 1074 +vsqrtls, 1074 +vsqrtge, 1074 +vsqrtlt, 1074 +vsqrtgt, 1074 +vsqrtle, 1074 +vsqrtal, 1074 +vsqrtlo, 1074 +vsqrtcc, 1074 +vsqrths, 1074 +vsqrtcs, 1074 +vsqrtnv, 1074 +vsra, 1076 +vsri, 1078 +vst1, 1080 +vst2, 1084 +vst3, 1088 +vst4, 1092 +vstmia, 1096 +vstmiaeq, 1096 +vstmiane, 1096 +vstmiami, 1096 +vstmiapl, 1096 +vstmiavs, 1096 +vstmiavc, 1096 +vstmiahi, 1096 +vstmials, 1096 +vstmiage, 1096 +vstmialt, 1096 +vstmiagt, 1096 +vstmiale, 1096 +vstmiaal, 1096 +vstmialo, 1096 +vstmiacc, 1096 +vstmiahs, 1096 +vstmiacs, 1096 +vstmianv, 1096 +vstmdb, 1096 +vstmdbeq, 1096 +vstmdbne, 1096 +vstmdbmi, 1096 +vstmdbpl, 1096 +vstmdbvs, 1096 +vstmdbvc, 1096 +vstmdbhi, 1096 +vstmdbls, 1096 +vstmdbge, 1096 +vstmdblt, 1096 +vstmdbgt, 1096 +vstmdble, 1096 +vstmdbal, 1096 +vstmdblo, 1096 +vstmdbcc, 1096 +vstmdbhs, 1096 +vstmdbcs, 1096 +vstmdbnv, 1096 +vstr, 1098 +vstreq, 1098 +vstrne, 1098 +vstrmi, 1098 +vstrpl, 1098 +vstrvs, 1098 +vstrvc, 1098 +vstrhi, 1098 +vstrls, 1098 +vstrge, 1098 +vstrlt, 1098 +vstrgt, 1098 +vstrle, 1098 +vstral, 1098 +vstrlo, 1098 +vstrcc, 1098 +vstrhs, 1098 +vstrcs, 1098 +vstrnv, 1098 +vsub, 1100 +vsubeq, 1100 +vsubne, 1100 +vsubmi, 1100 +vsubpl, 1100 +vsubvs, 1100 +vsubvc, 1100 +vsubhi, 1100 +vsubls, 1100 +vsubge, 1100 +vsublt, 1100 +vsubgt, 1100 +vsuble, 1100 +vsubal, 1100 +vsublo, 1100 +vsubcc, 1100 +vsubhs, 1100 +vsubcs, 1100 +vsubnv, 1100 +vsubhn, 1104 +vsubl, 1106 +vsubw, 1106 +vswp, 1108 +vtbl, 1110 +vtbx, 1110 +vtrn, 1112 +vtst, 1114 +vuzp, 1116 +vzip, 1118 +wfe, 1120 +wfeeq, 1120 +wfene, 1120 +wfemi, 1120 +wfepl, 1120 +wfevs, 1120 +wfevc, 1120 +wfehi, 1120 +wfels, 1120 +wfege, 1120 +wfelt, 1120 +wfegt, 1120 +wfele, 1120 +wfeal, 1120 +wfelo, 1120 +wfecc, 1120 +wfehs, 1120 +wfecs, 1120 +wfenv, 1120 +wfi, 1122 +wfieq, 1122 +wfine, 1122 +wfimi, 1122 +wfipl, 1122 +wfivs, 1122 +wfivc, 1122 +wfihi, 1122 +wfils, 1122 +wfige, 1122 +wfilt, 1122 +wfigt, 1122 +wfile, 1122 +wfial, 1122 +wfilo, 1122 +wficc, 1122 +wfihs, 1122 +wfics, 1122 +wfinv, 1122 +yield, 1124 +yieldeq, 1124 +yieldne, 1124 +yieldmi, 1124 +yieldpl, 1124 +yieldvs, 1124 +yieldvc, 1124 +yieldhi, 1124 +yieldls, 1124 +yieldge, 1124 +yieldlt, 1124 +yieldgt, 1124 +yieldle, 1124 +yieldal, 1124 +yieldlo, 1124 +yieldcc, 1124 +yieldhs, 1124 +yieldcs, 1124 +yieldnv, 1124 diff --git a/src/third-party/sleigh/processors/ARM/data/patterns/ARM_BE_patterns.xml b/src/third-party/sleigh/processors/ARM/data/patterns/ARM_BE_patterns.xml new file mode 100644 index 00000000..c74608db --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/patterns/ARM_BE_patterns.xml @@ -0,0 +1,201 @@ + + + + 0xbd .......0 + 0xbd .......0 0x0000 + 0xbd .......0 0xbf00 + 0xbd .......0 0x46c0 + 0xffff + 0x46c0 + 0x4770 + 0x4770 0x0000 + 0x4770 0x46c0 + 0xb0 000..... 0xbd ....0000 + 0x00bf + 0x8000f3af + 0xe8bd 1....... ........ + 0xf746 + 0xf8 0x5d 0xfb 0....... + + + + 0xb5 ........ 0xb0 100..... + 0xb5 ........ 0x1c 00...... + 0xb5 ........ 0x46 0x.. + 0xb5 ........ 01.01... 0x.. + 0xb5 ........ 0x68 0x.. + 0xb5 ........ 01.01... 0x.. 0xb0 10...... + 0xb5 1....... 0xaf.. + 0xb0 100..... 0xb5 ....0000 + 0x1c 00...... 0xb5 ....0000 + 0x46 0x.. 0xb5 ....0000 + 01.01...0x.. 0xb5 ....0000 + 0x68 0x.. 0xb5 ....0000 + 0xe92d 010..... ........ + + + + + + + + + 0xe12fff1. + 0xe12fff1e 0x46c0 + 0xe12fff1e 0xe1a00000 + 0xea...... + 0xe8 10.11101 10.0.... 0x.. + 0xe4 0x9d 0xf0 0x08 + 0xe1 0xa0 0xf0 0x0e + 0xe320f000 0xe1a00000 + 0xe1a00000 + + + + 0xe24dd... 11101001 00101101 .1...... ....0000 + 11101001 00101101 .1...... ....0000 0xe24dd... + 11101001 00101101 .1...... ....0000 0x........ 0xe24dd... + 11101001 00101101 .1...... ....0000 0xe1a0 010.0000 0000000. + 11101001 00101101 .1...... ....0000 + 0xe24dd... 11100101 00101101 1110.... ........ + 11101001 00101101 .0...... ........ 11100101 00101101 11100000 ......00 + 11100101 00101101 1110.... ........ 0xe24dd... + 11100101 00101101 1110.... ........ 0x........ 0xe24dd... + 0xe5 0x2d 0xe0 0x08 + 0xe1a0c00d 0xe92d.... + + + + + + + + 0xe24dd... 11101001 00101101 .1...... ....0000 + + + + + + + + 11101001 00101101 .1...... ....0000 + + + + + + + 11101001 00101101 .1...... ....0000 + + + + + + + 0xe24dd... 11100101 00101101 1110.... ........ + + + + + + + 11100101 00101101 1110.... ........ 0xe24dd... + + + + + + + 11101001 00101101 .1...... ....0000 0x........ 0xe24dd... + + + + + + + 11100101 00101101 1110.... ........ 0x........ 0xe24dd... + + + + + + + 0xe1a0c00d 0xe92d.... + + + + + + + 0xb5 ....0000 0xb0 100..... + + + + + + + 0xe92d 010..... ........ + + + + + + + 0xb5 ....0000 0x1c 00...... + + + + + + + 0xb5 ....0000 0x46 0x.. + + + + + + + 0xb5 ....0000 01.01... 0x.. + + + + + + + 0xb5 ....0000 0x68 0x.. + + + + + + + 0xb5 ....0000 01.01... 0x.. 0xb0 10...... + + + + + + + 0xb5 1...0000 0xaf.. + + + + + + + + + 0xbd .......0 + 0xbd .......0 0xbf00 + 0x4770 + 0x4770 0xbf00 + + + 0xb5 .......0 + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/patterns/ARM_LE_patterns.xml b/src/third-party/sleigh/processors/ARM/data/patterns/ARM_LE_patterns.xml new file mode 100644 index 00000000..7e18bea0 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/patterns/ARM_LE_patterns.xml @@ -0,0 +1,322 @@ + + + + .......0 0xbd + .......0 0xbd 0x0000 + .......0 0xbd 0x00bf + .......0 0xbd 0xc0 0x46 + 0xffff + 0xc046 + 0x7047 + 0x7047 0x0000 + 0x7047 0xc046 + 000..... 0xb0 ....0000 0xbd + 0x00bf + 0xaff30080 + 0xbde8 ........ 1....... + 0x46f7 + 0x5d 0xf8 0....... 0xfb + + + + ........ 0xb5 1....... 0xb0 + ........ 0xb5 00...... 0x1c + ........ 0xb5 0x.. 0x46 + ........ 0xb5 0x.. 01.01... + ........ 0xb5 0x.. 0x68 + ........ 0xb5 0x.. 01.01... 10...... 0xb0 + 1....... 0xb5 0x..af + 100..... 0xb0 ....0000 0xb5 + 00...... 0x1c ....0000 0xb5 + + 0x.. 01.01... ....0000 0xb5 + 0x.. 0x68 ....0000 0xb5 + 0x2de9 ........ 010..... + + + + + + + + + 0x1.ff2fe1 + 0x1eff2fe1 0xc046 + 0x1eff2fe1 0x0000a0e1 + 0x......ea + 0x.. 10.0.... 10.11101 0xe8 + 0x08 0xf0 0x9d 0xe4 + 0x0e 0xf0 0xa0 0xe1 + 0x00f020e3 0x0000a0e1 + 0x0000a0e1 + + + + 0x..d.4de2 ....0000 .1...... 00101101 11101001 + ....0000 .1...... 00101101 11101001 0x..d.4de2 + ....0000 .1...... 00101101 11101001 0x........ 0x..d.4de2 + ....0000 .1...... 00101101 11101001 0000000. 010.0000 0xa0e1 + ....0000 .1...... 00101101 11101001 + 0x..d.4de2 ........ 1110.... 00101101 11100101 + ........ .0...... 00101101 11101001 ......00 11100000 00101101 11100101 + ........ 1110.... 00101101 11100101 0x..d.4de2 + ........ 1110.... 00101101 11100101 0x........ 0x..d.4de2 + 0x08 0xe0 0x2d 0xe5 + 0x0dc0a0e1 0x....2de9 + ........ .1...... 00101101 11101001 + + + + + + + + 0x..d.4de2 ....0000 .1...... 00101101 11101001 + + + + + + + + + ....0000 .1...... 00101101 11101001 + + + + + + + ........ .1...... 00101101 11101001 + + + + + + + 0x..d.4de2 ........ 1110.... 00101101 11100101 + + + + + + + + ........ 1110.... 00101101 11100101 0x..d.4de2 + + + + + + + + ....0000 .1...... 00101101 11101001 0x........ 0x..d.4de2 + + + + + + + + ........ 1110.... 00101101 11100101 0x........ 0x..d.4de2 + + + + + + + 0x0dc0a0e1 0x....2de9 + + + + + + + ....0000 0xb5 1....... 0xb0 + + + + + + + 0x2de9 ........ 010..... + + + + + + + ....0000 0xb5 00...... 0x1c + + + + + + + ....0000 0xb5 0x.. 0x46 + + + + + + + ....0000 0xb5 0x.. 01.01... + + + + + + + ....0000 0xb5 0x.. 0x68 + + + + + + + ....0000 0xb5 0x.. 01.01... 10...... 0xb0 + + + + + + + 1...0000 0xb5 0x..af + + + + + + + + + .......0 0xbd + .......0 0xbd 0x00bf + 0x7047 + 0x7047 0x00bf + + + + .......0 0xb5 + + + + + + + + + + + 0x03b4 0x7146 0x0231 0x8908 0x8000 0x8900 0x0858 0x4018 0x8646 0x03bc 0xf746 + + + + + + + 0x02b4 0x7146 0x4908 0x4900 0x095c 0x4900 0x8e44 0x02bc 0x7047 + + + + + + + 0x02b4 0x7146 0x4908 0x4900 0x0956 0x4900 0x8e44 0x02bc 0x7047 + + + + + + + 0x03b4 0x7146 0x4908 0x4000 0x4900 0x095e 0x4900 0x8e44 0x03bc 0x7047 + + + + + + + 0x03b4 0x7146 0x4908 0x4000 0x4900 0x095a 0x4900 0x8e44 0x03bc 0x7047 + + + + + + + 0x01c05ee5 0x0c0053e1 0x0330de37 0x0c30de27 0x83 11.00000 0x8ee0 000111.0 0xff2fe1 + + + + + + + + + 0x01c05ee5 0x0c0053e1 0x0c30de27 0x0330de37 0x83 11.00000 0x8ee0 000111.0 0xff2fe1 + + + + + + + + diff --git a/src/third-party/sleigh/processors/ARM/data/patterns/patternconstraints.xml b/src/third-party/sleigh/processors/ARM/data/patterns/patternconstraints.xml new file mode 100644 index 00000000..29ca71a3 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/data/patterns/patternconstraints.xml @@ -0,0 +1,13 @@ + + + ARM_LE_patterns.xml + + + + ARM_BE_patterns.xml + + + + ARM_LE_patterns.xml + + diff --git a/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/plugin/core/analysis/ARMPreAnalyzer.java b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/plugin/core/analysis/ARMPreAnalyzer.java new file mode 100644 index 00000000..0c9c6bc9 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/plugin/core/analysis/ARMPreAnalyzer.java @@ -0,0 +1,110 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.plugin.core.analysis; + +import java.util.List; + +/** + * This is a temporary analyzer, until we can get the pattern search framework up and going. + * This searches for patterns that are functions that have side-effects. + */ + +import ghidra.app.cmd.disassemble.DisassembleCommand; +import ghidra.app.cmd.function.CreateFunctionCmd; +import ghidra.app.plugin.core.searchmem.RegExSearchData; +import ghidra.app.services.*; +import ghidra.app.util.importer.MessageLog; +import ghidra.program.model.address.*; +import ghidra.program.model.lang.Processor; +import ghidra.program.model.listing.*; +import ghidra.util.datastruct.ListAccumulator; +import ghidra.util.search.memory.*; +import ghidra.util.task.TaskMonitor; + +public class ARMPreAnalyzer extends AbstractAnalyzer { + private static String DESCRIPTION = + "Analyze ARM binaries for switch8_r3 functions. This will be replaced by a general hashing algorithm next release."; + + public ARMPreAnalyzer() { + super("ARM Pre-Pattern Analyzer", DESCRIPTION, AnalyzerType.BYTE_ANALYZER); + setPriority(AnalysisPriority.BLOCK_ANALYSIS.after()); + setDefaultEnablement(true); + setSupportsOneTimeAnalysis(); + } + + @Override + public boolean canAnalyze(Program program) { + Processor processor = program.getLanguage().getProcessor(); + return (processor.equals(Processor.findOrPossiblyCreateProcessor("ARM"))); + } + + @Override + public boolean added(Program program, AddressSetView set, TaskMonitor monitor, MessageLog log) { + + String switch_fn = "\\x01\\xc0\\x5e\\xe5" + // ldrb ip,[lr,#-0x1] + "\\x0c\\x00\\x53\\xe1" + // cmp r3,ip + "(" + "\\x03\\x30\\xde\\x37" + // ldrbcc r3,[lr,r3] + "\\x0c\\x30\\xde\\x27" + // ldrbcs r3,[lr,ip] + "|" + // OR + "\\x0c\\x30\\xde\\x27" + // ldrbcs r3,[lr,ip] + "\\x03\\x30\\xde\\x37" + // ldrbcc r3,[lr,r3] + ")" + "(" + "\\x83\\xc0\\x8e\\xe0" + // add ip,lr,r3, lsl #0x1 + "\\x1c\\xff\\x2f\\xe1" + // bx ip + "|" + // OR + "\\x83\\xe0\\x8e\\xe0" + // add lr,lr,r3, lsl #0x1 + "\\x1e\\xff\\x2f\\xe1" + // bx lr + ")"; + + RegExSearchData searchData = RegExSearchData.createRegExSearchData(switch_fn); + + SearchInfo searchInfo = new SearchInfo(searchData, 30, false, true, 4, false, null); + + AddressSet intersection = + program.getMemory().getLoadedAndInitializedAddressSet().intersect(set); + RegExMemSearcherAlgorithm searcher = + new RegExMemSearcherAlgorithm(searchInfo, intersection, program, true); + + ListAccumulator accumulator = new ListAccumulator<>(); + searcher.search(accumulator, monitor); + List results = accumulator.asList(); + + // create a function here with the correct call fixup + for (MemSearchResult result : results) { + + Address addr = result.getAddress(); + + // disassemble ARM + DisassembleCommand disassembleCommand = new DisassembleCommand(addr, null, true); + disassembleCommand.applyTo(program); + + // create function + CreateFunctionCmd createFunctionCmd = new CreateFunctionCmd(addr, false); + createFunctionCmd.applyTo(program); + + // set call fixup + Function func = program.getFunctionManager().getFunctionAt(addr); + if (func != null) { + func.setCallFixup("switch8_r3"); + } + + BookmarkManager bookmarkManager = program.getBookmarkManager(); + bookmarkManager.setBookmark(addr, BookmarkType.ANALYSIS, getName(), + "Found Switch8_r3 Function"); + } + + return true; + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/plugin/core/analysis/ArmAnalyzer.java b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/plugin/core/analysis/ArmAnalyzer.java new file mode 100644 index 00000000..9e8d9c97 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/plugin/core/analysis/ArmAnalyzer.java @@ -0,0 +1,831 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.plugin.core.analysis; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Iterator; + +import ghidra.app.cmd.function.CreateFunctionCmd; +import ghidra.app.cmd.label.AddLabelCmd; +import ghidra.app.plugin.core.disassembler.AddressTable; +import ghidra.framework.options.Options; +import ghidra.program.disassemble.Disassembler; +import ghidra.program.model.address.*; +import ghidra.program.model.block.*; +import ghidra.program.model.data.*; +import ghidra.program.model.lang.*; +import ghidra.program.model.listing.*; +import ghidra.program.model.mem.MemoryBlock; +import ghidra.program.model.pcode.Varnode; +import ghidra.program.model.scalar.Scalar; +import ghidra.program.model.symbol.*; +import ghidra.program.model.util.CodeUnitInsertionException; +import ghidra.program.util.*; +import ghidra.util.Msg; +import ghidra.util.exception.*; +import ghidra.util.task.TaskMonitor; + +public class ArmAnalyzer extends ConstantPropagationAnalyzer { + private static final String SWITCH_OPTION_NAME = "Switch Table Recovery"; + private static final String SWITCH_OPTION_DESCRIPTION = "Turn on to recover switch tables"; + private static final boolean SWITCH_OPTION_DEFAULT_VALUE = false; + + private boolean recoverSwitchTables = SWITCH_OPTION_DEFAULT_VALUE; + + private static final long MAX_DISTANCE = (4 * 1024); + + private Register tbRegister; + private Register tmodeRegister; + private Register lrRegister; + + private final static String PROCESSOR_NAME = "ARM"; + + public ArmAnalyzer() { + super(PROCESSOR_NAME); + } + + @Override + public boolean canAnalyze(Program program) { + boolean canAnalyze = program.getLanguage().getProcessor().equals( + Processor.findOrPossiblyCreateProcessor(PROCESSOR_NAME)); + + if (!canAnalyze) { + return false; + } + + tmodeRegister = program.getProgramContext().getRegister("TMode"); + tbRegister = program.getProgramContext().getRegister("ISAModeSwitch"); + lrRegister = program.getProgramContext().getRegister("lr"); + + return true; + } + + @Override + public AddressSet flowConstants(final Program program, Address flowStart, + AddressSetView flowSet, final SymbolicPropogator symEval, final TaskMonitor monitor) + throws CancelledException { + // follow all flows building up context + // use context to fill out addresses on certain instructions + ConstantPropagationContextEvaluator eval = + new ConstantPropagationContextEvaluator(trustWriteMemOption) { + + @Override + public boolean evaluateContext(VarnodeContext context, Instruction instr) { + + FlowType ftype = instr.getFlowType(); + if (ftype.isComputed() && ftype.isJump()) { + Varnode pcVal = context.getRegisterVarnodeValue( + program.getLanguage().getProgramCounter()); + if (pcVal != null) { + if (isLinkRegister(context, pcVal) && + !instr.getFlowType().isTerminal()) { + // need to set the return override + instr.setFlowOverride(FlowOverride.RETURN); + } + } + // if LR is a constant and is set right after this, this is a call + Varnode lrVal = context.getRegisterVarnodeValue(lrRegister); + if (lrVal != null) { + if (lrVal.isConstant()) { + long target = lrVal.getAddress().getOffset(); + Address addr = instr.getMaxAddress().add(1); + if (target == addr.getOffset() && !instr.getFlowType().isCall()) { + // if there are is a read reference there as well, + // then this is really a branch, not a call + if (hasDataReferenceTo(program, addr)) { + return false; + } + instr.setFlowOverride(FlowOverride.CALL); + // need to trigger disassembly below! if not already + doArmThumbDisassembly(program, instr, context, addr, + instr.getFlowType(), false, monitor); + // need to trigger re-function creation! + Function f = program.getFunctionManager().getFunctionContaining( + instr.getMinAddress()); + if (f != null) { + try { + CreateFunctionCmd.fixupFunctionBody(program, f, + monitor); + } + catch (CancelledException e) { + return true; + } + //AutoAnalysisManager.getAnalysisManager(program).functionDefined( + // func.getBody()); + } + } + } + } + + } + return false; + } + + /** + * Check if there are any data references to this location. + * @param program + * @param addr + * @return true if there are any data references to addr + */ + private boolean hasDataReferenceTo(Program program, Address addr) { + ReferenceManager refMgr = program.getReferenceManager(); + if (!refMgr.hasReferencesTo(addr)) { + return false; + } + ReferenceIterator referencesTo = refMgr.getReferencesTo(addr); + while (referencesTo.hasNext()) { + Reference reference = referencesTo.next(); + if (reference.getReferenceType().isData()) { + return true; + } + } + return false; + } + + private boolean isLinkRegister(VarnodeContext context, Varnode pcVal) { + return (pcVal.isRegister() && + pcVal.getAddress().equals(lrRegister.getAddress())) || + (context.isSymbol(pcVal) && + pcVal.getAddress().getAddressSpace().getName().equals( + lrRegister.getName()) && + pcVal.getOffset() == 0); + } + + @Override + public boolean evaluateReference(VarnodeContext context, Instruction instr, + int pcodeop, Address address, int size, RefType refType) { + if (refType.isJump() && refType.isComputed() && + program.getMemory().contains(address) && address.getOffset() != 0) { + if (instr.getMnemonicString().startsWith("tb")) { + return false; + } + doArmThumbDisassembly(program, instr, context, address, instr.getFlowType(), + true, monitor); + return !symEval.encounteredBranch(); + } + if (refType.isData() && program.getMemory().contains(address)) { + if (refType.isRead() || refType.isWrite()) { + createData(program, address, size); + instr.addOperandReference(instr.getNumOperands() - 1, address, refType, + SourceType.ANALYSIS); + return false; + } + } + else if (refType.isCall() && refType.isComputed()) { + // must disassemble right now, because TB flag could get set back at end of blx + doArmThumbDisassembly(program, instr, context, address, instr.getFlowType(), + true, monitor); + return false; + } + + return super.evaluateReference(context, instr, pcodeop, address, size, refType); + } + + @Override + public boolean evaluateDestination(VarnodeContext context, + Instruction instruction) { + FlowType flowType = instruction.getFlowType(); + if (!flowType.isJump()) { + return false; + } + + Reference[] refs = instruction.getReferencesFrom(); + if (refs.length <= 0 || + (refs.length == 1 && refs[0].getReferenceType().isData()) || + symEval.encounteredBranch()) { + destSet.addRange(instruction.getMinAddress(), instruction.getMinAddress()); + } + return false; + } + }; + + AddressSet resultSet = symEval.flowConstants(flowStart, flowSet, eval, true, monitor); + + if (recoverSwitchTables) { + recoverSwitches(program, eval.getDestinationSet(), symEval, monitor); + } + + return resultSet; + } + + private void recoverSwitches(final Program program, AddressSet destSet, + SymbolicPropogator symEval, TaskMonitor monitor) throws CancelledException { + + // now handle symbolic execution assuming values! + class SwitchEvaluator implements ContextEvaluator { + + int tableSizeMax = 64; + Long assumeValue = new Long(0); + Address targetSwitchAddr = null; + int addrByteSize = 1; + boolean hitTheGuard = false; + ArrayList
targetList = new ArrayList
(); + ArrayList
accessList = new ArrayList
(); + + public void init(Address loc, int maxSize) { + addrByteSize = 1; + assumeValue = new Long(0); + tableSizeMax = maxSize; + targetSwitchAddr = loc; + hitTheGuard = false; + + targetList.clear(); + accessList.clear(); + } + + public void initForCase(Long assume) { + assumeValue = new Long(assume); + hitTheGuard = false; + } + + public int getTableSizeMax() { + return tableSizeMax; + } + + public int getAddrByteSize() { + return addrByteSize; + } + + public ArrayList
getTargetList() { + return targetList; + } + + @Override + public boolean evaluateContextBefore(VarnodeContext context, Instruction instr) { + return false; + } + + @Override + public boolean evaluateContext(VarnodeContext context, Instruction instr) { + if (context.readExecutableCode()) { + return true; + } + // find the cmpli to set the size of the table + // tableSize = size + String mnemonic = instr.getMnemonicString(); + if ((mnemonic.compareToIgnoreCase("cmp") == 0)) { + int numOps = instr.getNumOperands(); + if (numOps > 1) { + Register reg = instr.getRegister(numOps - 2); + if ((reg != null)) { + context.clearRegister(reg); + Scalar scalar = instr.getScalar(numOps - 1); + if (scalar != null) { + int newTableSizeMax = (int) scalar.getSignedValue() + 2; + if (newTableSizeMax > 0 && newTableSizeMax < 128) { + tableSizeMax = newTableSizeMax; + } +// RegisterValue rval = context.getRegisterValue(reg); +// if (rval != null) { +// long lval = rval.getSignedValue().longValue(); +// if (lval < 0) +// tableIndexOffset = -lval; +// } else { +// } + } + } + } + hitTheGuard = true; + } + if ((mnemonic.compareToIgnoreCase("sub") == 0)) { + int numOps = instr.getNumOperands(); + if (numOps > 1) { + Register reg = instr.getRegister(numOps - 2); + if ((reg != null)) { + BigInteger val = context.getValue(reg, true); + if (val == null) { + return false; + } + context.clearRegister(reg); + Scalar scalar = instr.getScalar(numOps - 1); + if (scalar == null) { + return false; + } + context.setValue(reg, + val.add(BigInteger.valueOf(scalar.getSignedValue()))); + val = context.getValue(reg, true); +// if (scalar != null) { +// tableSizeMax = (int) scalar.getSignedValue() + 1; +// RegisterValue rval = context.getRegisterValue(reg); +// if (rval != null) { +// long lval = rval.getSignedValue().longValue(); +// if (lval < 0) +// tableIndexOffset = -lval; +// } else { +// } +// } + } + } +// hitTheGuard = true; + } + return false; + } + + @Override + public Address evaluateConstant(VarnodeContext context, Instruction instr, int pcodeop, + Address constant, int size, RefType refType) { + return null; + } + + @Override + public boolean evaluateReference(VarnodeContext context, Instruction instr, int pcodeop, + Address address, int size, RefType refType) { + + // if ever see a reference to 0, something went wrong, stop the process + if (address == null) { + return terminatePropogation(context); + } + + // for switches, if access is below 256, then there is a problem + // if ever loading from instructions in memory, must EXIT! + // + long offset = address.getOffset(); + if ((offset >= 0 && offset < 256) || context.readExecutableCode()) { + return terminatePropogation(context); + } + if (!((refType.isComputed() || refType.isConditional() == !followConditional) && + program.getMemory().contains(address))) { + if (refType.isRead()) { + if (targetList.contains(address)) { + return terminatePropogation(context); + } + size = createDataType(instr, address); + if (size != 0) { + addrByteSize = size; + } + } + return false; + } + if (refType.isJump() || refType.isCall()) { + if (accessList.contains(address)) { + return terminatePropogation(context); + } + long diff = Math.abs(address.subtract(targetSwitchAddr)); + // don't allow jumps backward, or too far if this is not a call + if (refType.isCall() || diff < 32 * 1024) { + address = flowArmThumb(program, instr, context, address, + instr.getFlowType(), false); + if (address != null) { + targetList.add(address); + } + } + return false; + } + // no markup, computing the jump table + return false; + } + + private boolean terminatePropogation(VarnodeContext context) { + hitTheGuard = false; + context.setReadExecutableCode(); + return false; + } + + @Override + public boolean evaluateDestination(VarnodeContext context, Instruction instruction) { + return instruction.getMinAddress().equals(targetSwitchAddr); + } + + @Override + public Long unknownValue(VarnodeContext context, Instruction instruction, + Varnode node) { + if (node.isRegister()) { + Register reg = program.getRegister(node.getAddress()); + if (reg != null) { + // never assume for flags, or control registers + String regName = reg.getName(); + if (regName.equals("sp")) { + return null; + } + if (!regName.startsWith("r")) { + return new Long(0); + } + } + if (hitTheGuard) { + return assumeValue; + } + } + if (hitTheGuard && context.isSymbol(node)) { + return assumeValue; + } + return null; + } + + @Override + public boolean followFalseConditionalBranches() { + return false; + } + + @Override + public boolean evaluateSymbolicReference(VarnodeContext context, Instruction instr, + Address address) { + return false; + } + + @Override + public boolean allowAccess(VarnodeContext context, Address addr) { + accessList.add(addr); + return false; + } + } + + SwitchEvaluator switchEvaluator = new SwitchEvaluator(); + + // now flow with the simple block of this branch.... + + // for each unknown branch destination, + AddressIterator iter = destSet.getAddresses(true); + SimpleBlockModel model = new SimpleBlockModel(program); + while (iter.hasNext() && !monitor.isCancelled()) { + Address loc = iter.next(); + CodeBlock bl = null; + try { + bl = model.getFirstCodeBlockContaining(loc, monitor); + } + catch (CancelledException e) { + break; + } + AddressSet branchSet = new AddressSet(bl); + CodeBlockReferenceIterator bliter; + try { + bliter = bl.getSources(monitor); + while (bliter.hasNext()) { + if (hasCallsTo(program, bl)) { + break; + } + CodeBlockReference sbl = bliter.next(); + bl = sbl.getSourceBlock(); + if (bl == null) { + continue; + } + if (!sbl.getFlowType().isCall()) { + branchSet.add(bl); + } + if (sbl.getFlowType().isJump() && bl.getNumSources(monitor) == 1) { + if (sbl.getFlowType().isConditional()) { + followConditional = true; + break; + } + bliter = bl.getSources(monitor); + } + } + } + catch (CancelledException e) { + break; + } + + switchEvaluator.init(loc, 64); + + Instruction targetInstr = program.getListing().getInstructionAt(loc); + + SymbolicPropogator targetEval = symEval; + // if this is a tbX instruction, don't assume any old values + if (targetInstr != null && targetInstr.getMnemonicString().startsWith("tb")) { + targetEval = new SymbolicPropogator(program); + } + + Address zeroAddr = targetInstr.getMinAddress().getNewAddress(0); + for (long assume = 0; assume < switchEvaluator.getTableSizeMax(); assume++) { + switchEvaluator.initForCase(new Long(assume)); + + targetEval.flowConstants(branchSet.getMinAddress(), branchSet, switchEvaluator, + false, monitor); + // go around once, table might be 1 based + if (assume > 0 && targetEval.readExecutable()) { + break; + } + // if it didn't get it after try with 1 + if (assume > 1 && switchEvaluator.getTargetList().size() < 1) { + break; + } + // if the target list ever contains zero, is bad + if (switchEvaluator.getTargetList().contains(zeroAddr)) { + switchEvaluator.getTargetList().clear(); + break; + } + } + + // re-create the function body with the newly found code + if (switchEvaluator.getTargetList().size() > 1) { + Iterator
liter = switchEvaluator.getTargetList().iterator(); + Address firstAddress = switchEvaluator.getTargetList().get(0); + while (liter.hasNext()) { + if (!firstAddress.equals(liter.next())) { + AddressTable table; + table = new AddressTable(loc, + switchEvaluator.getTargetList().toArray(new Address[0]), + switchEvaluator.getAddrByteSize(), 0, false); + Instruction jmpInstr = program.getListing().getInstructionAt(loc); + if (jmpInstr.getReferencesFrom().length <= 1) { + Iterator
jmpIter = switchEvaluator.getTargetList().iterator(); + while (jmpIter.hasNext()) { + Address address = jmpIter.next(); + jmpInstr.addMnemonicReference(address, jmpInstr.getFlowType(), + SourceType.ANALYSIS); + } + } + table.disassemble(program, jmpInstr, monitor); + table.fixupFunctionBody(program, jmpInstr, monitor); + labelTable(program, loc, switchEvaluator.getTargetList()); + switchEvaluator.getTargetList().clear(); + break; + } + } + } + if (switchEvaluator.getTargetList().size() > 0) { + AddressTable table; + table = new AddressTable(loc, switchEvaluator.getTargetList().toArray(new Address[0]), + switchEvaluator.getAddrByteSize(), 0, false); + table.disassemble(program, targetInstr,monitor); + } + } + } + + /* + * @return true if there are currently any call references to this CodeBlock + */ + private boolean hasCallsTo(Program program, CodeBlock bl) { + Address startAddr = bl.getFirstStartAddress(); + ReferenceIterator referencesTo = program.getReferenceManager().getReferencesTo(startAddr); + while (referencesTo.hasNext()) { + Reference reference = referencesTo.next(); + if (reference.getReferenceType().isCall()) { + return true; + } + } + return false; + } + + private int createDataType(Instruction instr, Address address) { + Program program = instr.getProgram(); + if (!program.getListing().isUndefined(address, address)) { + return 0; + } + + String mnemonic = instr.getMnemonicString(); + + int charOff = 0; + if (mnemonic.startsWith("ldrex") || mnemonic.startsWith("strex")) { + charOff = 5; + } + else if (mnemonic.startsWith("ldrs") || mnemonic.startsWith("strs")) { + charOff = 4; + } + else if (mnemonic.startsWith("ldr") || mnemonic.startsWith("str")) { + charOff = 3; + } + else if (mnemonic.startsWith("ld") || mnemonic.startsWith("st")) { + charOff = 2; + } + else if (mnemonic.startsWith("tbh")) { + charOff = 2; + } + else if (mnemonic.startsWith("tbb")) { + charOff = 2; + } + else if (mnemonic.startsWith("vldr") || mnemonic.startsWith("vstr")) { + charOff = mnemonic.length() - 2; + } + + if (charOff <= 0) { + return 0; + } + + DataType dt = Undefined4DataType.dataType; + if (mnemonic.length() > charOff) { + char endCh = mnemonic.charAt(charOff); + switch (endCh) { + case '6': + dt = Undefined8DataType.dataType; + break; + case '3': + dt = Undefined4DataType.dataType; + break; + case 'l': + dt = Undefined4DataType.dataType; + break; + case 'w': + case 'h': + dt = Undefined2DataType.dataType; + break; + case 'b': + dt = Undefined1DataType.dataType; + break; + } + } + + //new CreateDataCmd(address, dt).applyTo(program); + Data data = null; + try { + data = program.getListing().createData(address, dt); + } + catch (CodeUnitInsertionException e) { + data = program.getListing().getDefinedDataAt(address); + } + catch (DataTypeConflictException e) { + // ignore data type conflict + } + int addrByteSize = dt.getLength(); + //data = program.getListing().getDefinedDataAt(address); + if (data != null) { + Object dValue = data.getValue(); + // if the value at the location looks like a pointer, create a pointer + if (dValue != null && dValue instanceof Scalar) { + Scalar sValue = (Scalar) dValue; + + long value = sValue.getUnsignedValue(); + if (value < 4096 || value == 0xffff || value == 0xff00 || value == 0xffffff || + value == 0xff0000 || value == 0xff00ff || value == 0xffffffff || + value == 0xffffff00 || value == 0xffff0000 || value == 0xff000000) { + return 0; + } + + // If the access is a read, and the data is not far away, consider it constant + long distance = address.getOffset() - instr.getAddress().getOffset(); + if (distance > 0 && distance < MAX_DISTANCE) { + markDataAsConstant(data); + } + +// Address sAddr = address.getNewAddress(sValue.getUnsignedValue()); +// if (program.getMemory().contains(sAddr)) { +// program.getListing().clearCodeUnits(address, address); +// new CreateDataCmd(address, DataTypeFactory.POINTER).applyTo(program); +// } + } + } + return addrByteSize; + } + + private void labelTable(Program program, Address loc, ArrayList
targets) { + Namespace space = null; + + Instruction start_inst = program.getListing().getInstructionAt(loc); + + // not putting switch into functions anymore + // program.getSymbolTable().getNamespace(start_inst.getMinAddress()); + String spaceName = "switch_" + start_inst.getMinAddress(); + try { + space = program.getSymbolTable().createNameSpace(space, spaceName, SourceType.ANALYSIS); + } + catch (DuplicateNameException e) { + space = program.getSymbolTable().getNamespace(spaceName, program.getGlobalNamespace()); + } + catch (InvalidInputException e) { + // just go with default space + } + + int tableNumber = 0; + for (Address addr : targets) { + AddLabelCmd lcmd = new AddLabelCmd(addr, "case_" + Long.toHexString(tableNumber), space, + SourceType.ANALYSIS); + tableNumber++; + lcmd.setNamespace(space); + + lcmd.applyTo(program); + } + } + + /** + * Disassemble at the specified target address and optionally create a mnemonic flow reference. + * @param monitor + * @param instruction flow from instruction + * @param target disassembly address + * @param flowType if not null a reference from the instruction mnemonic will be created to the specified + * target address using this flowType. + * @param addReference + */ + Address flowArmThumb(Program program, Instruction instruction, VarnodeContext context, + Address target, FlowType flowType, boolean addReference) { + if (target == null) { + return null; + } + long bxOffset = target.getOffset(); + long thumbMode = bxOffset & 0x1; + + Address addr = instruction.getMinAddress().getNewAddress(bxOffset & 0xfffffffe); + + Listing listing = program.getListing(); + + if (flowType != null) { + int opIndex = -1; + for (int i = 0; i < instruction.getNumOperands(); i++) { + int opType = instruction.getOperandType(i); + // markup the program counter for any flow + if ((opType & OperandType.REGISTER) != 0 || (opType & OperandType.DYNAMIC) != 0) { + opIndex = i; + break; + } + } + + if (addReference) { + Reference[] refsFrom = instruction.getReferencesFrom(); + boolean foundRef = false; + for (Reference element : refsFrom) { + if (element.getToAddress().equals(addr)) { + // reference already there, assume thumb bit propagated + foundRef = true; + break; + } + } + if (!foundRef) { + if (opIndex == -1) { + instruction.addMnemonicReference(addr, flowType, SourceType.ANALYSIS); + } + else { + instruction.addOperandReference(opIndex, addr, flowType, + SourceType.ANALYSIS); + } + } + } + } + + if (tmodeRegister != null && listing.getUndefinedDataAt(addr) != null) { + boolean inThumbMode = false; + RegisterValue curvalue = + context.getRegisterValue(tmodeRegister, instruction.getMinAddress()); + if (curvalue != null && curvalue.hasValue()) { + inThumbMode = (curvalue.getUnsignedValue().intValue() == 1); + } + // if the TB register is set, that trumps any mode we are tracking + RegisterValue tbvalue = context.getRegisterValue(tbRegister); + if (tbvalue != null && tbvalue.hasValue()) { + inThumbMode = (tbvalue.getUnsignedValue().intValue() == 1); + } + else { + // blx instruction on a direct address in ARM mode always goes to thumb mode + if (instruction.getMnemonicString().equals("blx") || thumbMode != 0) { + inThumbMode = true; + } + } + BigInteger thumbModeValue = BigInteger.valueOf(inThumbMode ? 1 : 0); + try { + program.getProgramContext().setValue(tmodeRegister, addr, addr, thumbModeValue); + } + catch (ContextChangeException e) { + Msg.error(this, "Unexpected Exception", e); + } + return addr; + } + + // instruction already there + return null; + } + + /** + * Disassemble at the specified target address and optionally create a mnemonic flow reference. + * @param monitor + * @param instruction flow from instruction + * @param target disassembly address + * @param flowType if not null a reference from the instruction mnemonic will be created to the specified + * target address using this flowType. + * @param addRef true if a reference should be added. + * + */ + void doArmThumbDisassembly(Program program, Instruction instruction, VarnodeContext context, + Address target, FlowType flowType, boolean addRef, TaskMonitor monitor) { + if (target == null) { + return; + } + + target = flowArmThumb(program, instruction, context, target, flowType, addRef); + if (target == null) { + return; + } + + // this is here so the reference gets created, but not - disassembled if it is in a bad part of memory. + // something computed it into the memory + MemoryBlock block = program.getMemory().getBlock(target); + if (block == null || !block.isExecute() || !block.isInitialized() || + block.getName().equals(MemoryBlock.EXTERNAL_BLOCK_NAME)) { + return; + } + + Disassembler dis = Disassembler.getDisassembler(program, monitor, null); + AddressSet disassembleAddrs = dis.disassemble(target, null); + AutoAnalysisManager.getAnalysisManager(program).codeDefined(disassembleAddrs); + } + + @Override + public void optionsChanged(Options options, Program program) { + super.optionsChanged(options, program); + + options.registerOption(SWITCH_OPTION_NAME, recoverSwitchTables, null, + SWITCH_OPTION_DESCRIPTION); + recoverSwitchTables = options.getBoolean(SWITCH_OPTION_NAME, recoverSwitchTables); + } + +} diff --git a/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/extend/ARM_ElfExtension.java b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/extend/ARM_ElfExtension.java new file mode 100644 index 00000000..a43c0cb0 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/extend/ARM_ElfExtension.java @@ -0,0 +1,172 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.extend; + +import java.math.BigInteger; + +import ghidra.app.util.bin.format.elf.*; +import ghidra.program.model.address.Address; +import ghidra.program.model.lang.Language; +import ghidra.program.model.lang.Register; +import ghidra.program.model.listing.ContextChangeException; +import ghidra.program.model.listing.Program; +import ghidra.util.exception.CancelledException; +import ghidra.util.task.TaskMonitor; + +public class ARM_ElfExtension extends ElfExtension { + + // Elf Program Header Extensions + public static final ElfProgramHeaderType PT_ARM_EXIDX = + new ElfProgramHeaderType(0x70000000, "PT_ARM_EXIDX", "Frame unwind information"); + + // Elf Section Header Extensions + public static final ElfSectionHeaderType SHT_ARM_EXIDX = + new ElfSectionHeaderType(0x70000001, "SHT_ARM_EXIDX", "Exception Index table"); + public static final ElfSectionHeaderType SHT_ARM_PREEMPTMAP = new ElfSectionHeaderType( + 0x70000002, "SHT_ARM_PREEMPTMAP", "BPABI DLL dynamic linking preemption map"); + public static final ElfSectionHeaderType SHT_ARM_ATTRIBUTES = new ElfSectionHeaderType( + 0x70000003, "SHT_ARM_ATTRIBUTES", "Object file compatibility attributes"); + public static final ElfSectionHeaderType SHT_ARM_DEBUGOVERLAY = + new ElfSectionHeaderType(0x70000004, "SHT_ARM_DEBUGOVERLAY", "See DBGOVL for details"); + public static final ElfSectionHeaderType SHT_ARM_OVERLAYSECTION = + new ElfSectionHeaderType(0x70000005, "SHT_ARM_OVERLAYSECTION", + "See Debugging Overlaid Programs (DBGOVL) for details"); + + @Override + public boolean canHandle(ElfHeader elf) { + return elf.e_machine() == ElfConstants.EM_ARM; + } + + @Override + public boolean canHandle(ElfLoadHelper elfLoadHelper) { + Language language = elfLoadHelper.getProgram().getLanguage(); + return canHandle(elfLoadHelper.getElfHeader()) && + "ARM".equals(language.getProcessor().toString()); + } + + @Override + public String getDataTypeSuffix() { + return "_ARM"; + } + + @Override + public void processElf(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + Register tmodeRegister = elfLoadHelper.getProgram().getRegister("TMode"); + if (tmodeRegister == null) { + elfLoadHelper.log("WARNING: TMode register not found - Thumb mode not supported"); + } + // TODO: markup PT_ARM_EXIDX ?s + } + + @Override + public Address creatingFunction(ElfLoadHelper elfLoadHelper, Address functionAddress) { + Program program = elfLoadHelper.getProgram(); + if ((functionAddress.getOffset() & 1) != 0) { + Register tmodeRegister = program.getRegister("TMode"); + if (tmodeRegister == null) { + elfLoadHelper.log("TMode mode not supported, unable to mark address as Thumb: " + + functionAddress); + return functionAddress; + } + functionAddress = functionAddress.previous(); // align address + try { + program.getProgramContext().setValue(tmodeRegister, functionAddress, + functionAddress, BigInteger.ONE); + } + catch (ContextChangeException e) { + // ignore since should not be instructions at time of import + } + } + if ((functionAddress.getOffset() % 4) == 2) {//The combination bit[1:0] = 0b10 is reserved. + elfLoadHelper.log("Function address is two bit aligned (reserved per ARM manual): " + + functionAddress); + } + return functionAddress; + } + + @Override + public Address evaluateElfSymbol(ElfLoadHelper elfLoadHelper, ElfSymbol elfSymbol, + Address address, boolean isExternal) { + + if (isExternal) { + return address; + } + + Program program = elfLoadHelper.getProgram(); + + String symName = elfSymbol.getNameAsString(); + + try { + Register tmodeRegister = program.getRegister("TMode"); + + // ELF ARM - tags ARM code with $a and Thumb code with $t + // + if (tmodeRegister == null) { + // Thumb Mode not supported by language + } + else if ("$t".equals(symName) || symName.startsWith("$t.")) { + // is thumb mode + program.getProgramContext().setValue(tmodeRegister, address, address, + BigInteger.valueOf(1)); + elfLoadHelper.markAsCode(address); + + // do not retain $t symbols in program due to potential function/thunk naming interference + elfLoadHelper.setElfSymbolAddress(elfSymbol, address); + return null; + } + else if ("$a".equals(symName) || symName.startsWith("$a.")) { + // is arm mode + program.getProgramContext().setValue(tmodeRegister, address, address, + BigInteger.valueOf(0)); + elfLoadHelper.markAsCode(address); + + // do not retain $a symbols in program due to potential function/thunk naming interference + elfLoadHelper.setElfSymbolAddress(elfSymbol, address); + return null; + } + else if ("$b".equals(symName)) { + // don't do anything this is data + } + else if ("$d".equals(symName) || symName.startsWith("$d.")) { + // is data, need to protect as data + elfLoadHelper.createUndefinedData(address, (int) elfSymbol.getSize()); + + // do not retain $d symbols in program due to excessive duplicate symbols + elfLoadHelper.setElfSymbolAddress(elfSymbol, address); + return null; + } + if (elfSymbol.getType() == ElfSymbol.STT_FUNC) { + long symVal = address.getOffset(); + if ((symVal & 1) != 0 && tmodeRegister != null) { + address = address.previous(); + program.getProgramContext().setValue(tmodeRegister, address, address, + BigInteger.valueOf(1)); + } + } + } + catch (ContextChangeException e) { + // ignore since should not be instructions at time of import + } + return address; + } + +// @Override +// public void processGotPlt(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) throws CancelledException { +// TODO override GOT markup. PLT handled by R_ARM_JUMP_SLOT relocation processing. +// } + +} diff --git a/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/extend/ARM_ElfProgramHeaderConstants.java b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/extend/ARM_ElfProgramHeaderConstants.java new file mode 100644 index 00000000..37cecebd --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/extend/ARM_ElfProgramHeaderConstants.java @@ -0,0 +1,99 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.extend; + +public class ARM_ElfProgramHeaderConstants { + + //////////////////////////////////////////////////////////////////////////////// + + /** Masks bits describing the format of data in subsequent words. The masked value is described in Table 5-3, below. */ + public static final int PT_ARM_ARCHEXT_FMTMSK = 0xff000000; + /** Masks bits describing the architecture profile required by the executable. The masked value is described in Table 5-4, below. */ + public static final int PT_ARM_ARCHEXT_PROFMSK = 0x00ff0000; + /** Masks bits describing the base architecture required by the executable. The masked value is described in Table 5-5, below. */ + public static final int PT_ARM_ARCHEXT_ARCHMSK = 0x000000ff; + + //////////////////////////////////////////////////////////////////////////////// + + // Table 5-3, Architecture compatibility data formats lists the architecture + // compatibility data formats defined by this ABI. All other format + // identifiers are reserved to future revisions of this specification. + + /** There are no additional words of data. However, if EF_OSABI is non-zero, the relevant platform ABI may define additional data that follows the initial word. */ + public static final int PT_ARM_ARCHEXT_FMT_OS = 0x00000000; + /** 5.2.1.1, below describes the format of the following data words. */ + public static final int PT_ARM_ARCHEXT_FMT_ABI = 0x01000000; + + //////////////////////////////////////////////////////////////////////////////// + + // Table 5-4, Architecture profile compatibility data. + // Lists the values specifying the architectural profile needed by an executable file. + + /** The architecture has no profile variants, or the image has no profile-specific constraints */ + public static final int PT_ARM_ARCHEXT_PROF_NONE = 0x0; + /** The executable file requires the Application profile */ + public static final int PT_ARM_ARCHEXT_PROF_ARM = 'A' << 16; + /** The executable file requires the Real-Time profile */ + public static final int PT_ARM_ARCHEXT_PROF_RT = 'R' << 16; + /** The executable file requires the Microcontroller profile */ + public static final int PT_ARM_ARCHEXT_PROF_MC = 'M' << 16; + /** The executable file requires the 'classic' ('A' or 'R' profile) exception model. */ + public static final int PT_ARM_ARCHEXT_PROF_CLASSIC = 'S' << 16; + + //////////////////////////////////////////////////////////////////////////////// + + //Table 5-5, Architecture version compatibility data defines the values that + //specify the minimum architecture version needed by this executable file. + //These values are identical to those of the Tag_CPU_arch attribute used + //in the attributes section of a relocatable file. + + /** The needed architecture is unknown or specified in some other way */ + public static final int PT_ARM_ARCHEXT_ARCH_UNKN = 0x00; + /** Architecture v4 */ + public static final int PT_ARM_ARCHEXT_ARCHv4 = 0x01; + /** Architecture v4T */ + public static final int PT_ARM_ARCHEXT_ARCHv4T = 0x02; + /** Architecture v5T */ + public static final int PT_ARM_ARCHEXT_ARCHv5T = 0x03; + /** Architecture v5TE */ + public static final int PT_ARM_ARCHEXT_ARCHv5TE = 0x04; + /** Architecture v5TEJ */ + public static final int PT_ARM_ARCHEXT_ARCHv5TEJ = 0x05; + /** Architecture v6 */ + public static final int PT_ARM_ARCHEXT_ARCHv6 = 0x06; + /** Architecture v6KZ */ + public static final int PT_ARM_ARCHEXT_ARCHv6KZ = 0x07; + /** Architecture v6T2 */ + public static final int PT_ARM_ARCHEXT_ARCHv6T2 = 0x08; + /** Architecture v6K */ + public static final int PT_ARM_ARCHEXT_ARCHv6K = 0x09; + /** Architecture v7 (in this case the architecture profile may also be required to fully specify the needed execution environment) */ + public static final int PT_ARM_ARCHEXT_ARCHv7 = 0x0A; + /** Architecture v6M (e.g. Cortex M0) */ + public static final int PT_ARM_ARCHEXT_ARCHv6M = 0x0B; + /** Architecture v6S-M (e.g. Cortex M0) */ + public static final int PT_ARM_ARCHEXT_ARCHv6SM = 0x0C; + /** Architecture v7E-M */ + public static final int PT_ARM_ARCHEXT_ARCHv7EM = 0x0D; + + // FLAGS + + /** This masks an 8-bit version number, the version of the ABI to which this ELF file conforms. This ABI is version 5. A value of 0 denotes unknown conformance. */ + public static final int EF_ARM_EABIMASK = 0xFF000000; + /** The ELF file contains BE-8 code, suitable for execution on an ARM Architecture v6 processor. This flag must only be set on an executable file. */ + public static final int EF_ARM_BE8 = 0x00800000; + +} diff --git a/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/relocation/ARM_ElfRelocationConstants.java b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/relocation/ARM_ElfRelocationConstants.java new file mode 100644 index 00000000..74e5505c --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/relocation/ARM_ElfRelocationConstants.java @@ -0,0 +1,285 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.relocation; + +public class ARM_ElfRelocationConstants { + + /** No operation needed */ + public static final int R_ARM_NONE = 0; + /** ((S + A) | T) - P [DEPRECATED] */ + public static final int R_ARM_PC24 = 1; + /** (S + A) | T */ + public static final int R_ARM_ABS32 = 2; + /** ((S + A) | T) - P */ + public static final int R_ARM_REL32 = 3; + /** S + A - P */ + public static final int R_ARM_LDR_PC_G0 = 4; + /** S + A */ + public static final int R_ARM_ABS16 = 5; + /** S + A */ + public static final int R_ARM_ABS12 = 6; + /** S + A */ + public static final int R_ARM_THM_ABS5 = 7; + /** S + A */ + public static final int R_ARM_ABS_8 = 8; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_SBREL32 = 9; + /** ((S + A) | T) - P */ + public static final int R_ARM_THM_CALL = 10; + /** S + A - Pa */ + public static final int R_ARM_THM_PC8 = 11; + /** DELTA(B(S)) + A */ + public static final int R_ARM_BREL_ADJ = 12; + public static final int R_ARM_TLS_DESC = 13; + /** [OBSOLETE] */ + public static final int R_ARM_THM_SWI8 = 14; + /** [OBSOLETE] */ + public static final int R_ARM_XPC25 = 15; + /** [OBSOLETE] */ + public static final int R_ARM_THM_XPC22 = 16; + /** Module[S] */ + public static final int R_ARM_TLS_DTPMOD32 = 17; + /** S + A - TLS */ + public static final int R_ARM_TLS_DTPOFF32 = 18; + /** S + A - TLS */ + public static final int R_ARM_TLS_TPOFF32 = 19; + /** Miscellaneous */ + public static final int R_ARM_COPY = 20; + /** (S + A) | T */ + public static final int R_ARM_GLOB_DAT = 21; + /** (S + A) | T */ + public static final int R_ARM_JUMP_SLOT = 22; + /** B(S) + A [Note: see Table 4-16] */ + public static final int R_ARM_RELATIVE = 23; + /** ((S + A) | T) - GOT_ORG */ + public static final int R_ARM_GOTOFF32 = 24; + /** B(S) + A - P */ + public static final int R_ARM_BASE_PREL = 25; + /** GOT(S) + A - GOT_ORG */ + public static final int R_ARM_GOT_BREL = 26; + /** ((S + A) | T) - P */ + public static final int R_ARM_GOT_PLT32 = 27; + /** ((S + A) | T) - P */ + public static final int R_ARM_CALL = 28; + /** ((S + A) | T) - P */ + public static final int R_ARM_JUMP24 = 29; + /** ((S + A) | T) - P */ + public static final int R_ARM_THM_JUMP24 = 30; + /** B(S) + A */ + public static final int R_ARM_BASE_ABS = 31; + /** Obsolete */ + public static final int R_ARM_ALU_PCREL_7_0 = 32; + /** Obsolete */ + public static final int R_ARM_ALU_PCREL_15_8 = 33; + /** Obsolete */ + public static final int R_ARM_ALU_PCREL_23_15 = 34; + /** S + A - B(S) */ + public static final int R_ARM_LDR_SBREL_11_0_NC = 35; + /** S + A - B(S) */ + public static final int R_ARM_ALU_SBREL_19_12_NC = 36; + /** S + A - B(S) */ + public static final int R_ARM_ALU_SBREL_27_20_CK = 37; + /** (S + A) | T or ((S + A) | T) - P */ + public static final int R_ARM_TARGET1 = 38; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_SBREL31 = 39; + /** Miscellaneous */ + public static final int R_ARM_V4BX = 40; + /** Miscellaneous */ + public static final int R_ARM_TARGET2 = 41; + /** ((S + A) | T) - P */ + public static final int R_ARM_PREL31 = 42; + /** (S + A) | T */ + public static final int R_ARM_MOVW_ABS_NC = 43; + /** S + A */ + public static final int R_ARM_MOVT_ABS = 44; + /** ((S + A) | T) - P */ + public static final int R_ARM_MOVW_PREL_NC = 45; + /** S + A - P */ + public static final int R_ARM_MOVT_PREL = 46; + /** (S + A) | T */ + public static final int R_ARM_THM_MOVW_ABS_NC = 47; + /** S + A */ + public static final int R_ARM_THM_MOVT_ABS = 48; + /** ((S + A) | T) - P */ + public static final int R_ARM_THM_MOVW_PREL_NC = 49; + /** S + A - P */ + public static final int R_ARM_THM_MOVT_PREL = 50; + /** ((S + A) | T) - P */ + public static final int R_ARM_THM_JUMP19 = 51; + /** S + A - P */ + public static final int R_ARM_THM_JUMP6 = 52; + /** ((S + A) | T) - Pa */ + public static final int R_ARM_THM_ALU_PREL_11_0 = 53; + /** S + A - Pa */ + public static final int R_ARM_THM_PC12 = 54; + /** S + A */ + public static final int R_ARM_ABS32_NOI = 55; + /** S + A - P */ + public static final int R_ARM_REL32_NOI = 56; + /** ((S + A) | T) - P */ + public static final int R_ARM_ALU_PC_G0_NC = 57; + /** ((S + A) | T) - P */ + public static final int R_ARM_ALU_PC_G0 = 58; + /** ((S + A) | T) - P */ + public static final int R_ARM_ALU_PC_G1_NC = 59; + /** ((S + A) | T) - P */ + public static final int R_ARM_ALU_PC_G1 = 60; + /** ((S + A) | T) - P */ + public static final int R_ARM_ALU_PC_G2 = 61; + /** S + A - P */ + public static final int R_ARM_LDR_PC_G1 = 62; + /** S + A - P */ + public static final int R_ARM_LDR_PC_G2 = 63; + /** S + A - P */ + public static final int R_ARM_LDRS_PC_G0 = 64; + /** S + A - P */ + public static final int R_ARM_LDRS_PC_G1 = 65; + /** S + A - P */ + public static final int R_ARM_LDRS_PC_G2 = 66; + /** S + A - P */ + public static final int R_ARM_LDC_PC_G0 = 67; + /** S + A - P */ + public static final int R_ARM_LDC_PC_G1 = 68; + /** S + A - P */ + public static final int R_ARM_LDC_PC_G2 = 69; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_ALU_SB_G0_NC = 70; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_ALU_SB_G0 = 71; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_ALU_SB_G1_NC = 72; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_ALU_SB_G1 = 73; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_ALU_SB_G2 = 74; + /** S + A - B(S) */ + public static final int R_ARM_LDR_SB_G0 = 75; + /** S + A - B(S) */ + public static final int R_ARM_LDR_SB_G1 = 76; + /** S + A - B(S) */ + public static final int R_ARM_LDR_SB_G2 = 77; + /** S + A - B(S) */ + public static final int R_ARM_LDRS_SB_G0 = 78; + /** S + A - B(S) */ + public static final int R_ARM_LDRS_SB_G1 = 79; + /** S + A - B(S) */ + public static final int R_ARM_LDRS_SB_G2 = 80; + /** S + A - B(S) */ + public static final int R_ARM_LDC_SB_G0 = 81; + /** S + A - B(S) */ + public static final int R_ARM_LDC_SB_G1 = 82; + /** S + A - B(S) */ + public static final int R_ARM_LDC_SB_G2 = 83; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_MOVW_BREL_NC = 84; + /** S + A - B(S) */ + public static final int R_ARM_MOVT_BREL = 85; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_MOVW_BREL = 86; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_THM_MOVW_BREL_NC = 87; + /** S + A - B(S) */ + public static final int R_ARM_THM_MOVT_BREL = 88; + /** ((S + A) | T) - B(S) */ + public static final int R_ARM_THM_MOVW_BREL = 89; + /** ? */ + public static final int R_ARM_TLS_GOTDESC = 90; + /** ? */ + public static final int R_ARM_TLS_CALL = 91; + /** TLS relaxation */ + public static final int R_ARM_TLS_DESCSEQ = 92; + /** ? */ + public static final int R_ARM_THM_TLS_CALL = 93; + /** PLT(S) + A */ + public static final int R_ARM_PLT32_ABS = 94; + /** GOT(S) + A */ + public static final int R_ARM_GOT_ABS = 95; + /** GOT(S) + A - P */ + public static final int R_ARM_GOT_PREL = 96; + /** GOT(S) + A - GOT_ORG */ + public static final int R_ARM_GOT_BREL12 = 97; + /** S + A - GOT_ORG */ + public static final int R_ARM_GOTOFF12 = 98; + /** ? */ + public static final int R_ARM_GOTRELAX = 99; + /** ? */ + public static final int R_ARM_GNU_VTENTRY = 100; + /** ? */ + public static final int R_ARM_GNU_VTINHERIT = 101; + /** S + A - P */ + public static final int R_ARM_THM_JUMP11 = 102; + /** S + A - P */ + public static final int R_ARM_THM_JUMP8 = 103; + /** GOT(S) + A - P */ + public static final int R_ARM_TLS_GD32 = 104; + /** GOT(S) + A - P */ + public static final int R_ARM_TLS_LDM32 = 105; + /** S + A - TLS */ + public static final int R_ARM_TLS_LDO32 = 106; + /** GOT(S) + A - P */ + public static final int R_ARM_TLS_IE32 = 107; + /** S + A - tp */ + public static final int R_ARM_TLS_LE32 = 108; + /** S + A - TLS */ + public static final int R_ARM_TLS_LDO12 = 109; + /** S + A - tp */ + public static final int R_ARM_TLS_LE12 = 110; + /** GOT(S) + A - GOT_ORG */ + public static final int R_ARM_TLS_IE12GP = 111; + /** ? */ + public static final int R_ARM_PRIVATE_0 = 112; + /** ? */ + public static final int R_ARM_PRIVATE_1 = 113; + /** ? */ + public static final int R_ARM_PRIVATE_2 = 114; + /** ? */ + public static final int R_ARM_PRIVATE_3 = 115; + /** ? */ + public static final int R_ARM_PRIVATE_4 = 116; + /** ? */ + public static final int R_ARM_PRIVATE_5 = 117; + /** ? */ + public static final int R_ARM_PRIVATE_6 = 118; + /** ? */ + public static final int R_ARM_PRIVATE_7 = 119; + /** ? */ + public static final int R_ARM_PRIVATE_8 = 120; + /** ? */ + public static final int R_ARM_PRIVATE_9 = 121; + /** ? */ + public static final int R_ARM_PRIVATE_10 = 122; + /** ? */ + public static final int R_ARM_PRIVATE_11 = 123; + /** ? */ + public static final int R_ARM_PRIVATE_12 = 124; + /** ? */ + public static final int R_ARM_PRIVATE_13 = 125; + /** ? */ + public static final int R_ARM_PRIVATE_14 = 126; + /** ? */ + public static final int R_ARM_PRIVATE_15 = 127; + /** ? */ + public static final int R_ARM_ME_TOO = 128; + /** ? */ + public static final int R_ARM_THM_TLS_DESCSEQ16 = 129; + /** ? */ + public static final int R_ARM_THM_TLS_DESCSEQ32 = 130; + + private ARM_ElfRelocationConstants() { + // no construct + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/relocation/ARM_ElfRelocationHandler.java b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/relocation/ARM_ElfRelocationHandler.java new file mode 100644 index 00000000..7069a491 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/relocation/ARM_ElfRelocationHandler.java @@ -0,0 +1,658 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.relocation; + +import ghidra.app.util.bin.format.elf.*; +import ghidra.program.model.address.Address; +import ghidra.program.model.listing.Function; +import ghidra.program.model.listing.Program; +import ghidra.program.model.mem.*; +import ghidra.util.exception.NotFoundException; + +public class ARM_ElfRelocationHandler extends ElfRelocationHandler { + + @Override + public boolean canRelocate(ElfHeader elf) { + return elf.e_machine() == ElfConstants.EM_ARM; + } + + @Override + public int getRelrRelocationType() { + return ARM_ElfRelocationConstants.R_ARM_RELATIVE; + } + + @Override + public void relocate(ElfRelocationContext elfRelocationContext, ElfRelocation relocation, + Address relocationAddress) throws MemoryAccessException, NotFoundException { + + ElfHeader elf = elfRelocationContext.getElfHeader(); + if (elf.e_machine() != ElfConstants.EM_ARM) { + return; + } + + Program program = elfRelocationContext.getProgram(); + + Memory memory = program.getMemory(); + + boolean instructionBigEndian = program.getLanguage().getLanguageDescription().getInstructionEndian().isBigEndian(); + + int type = relocation.getType(); + if (type == ARM_ElfRelocationConstants.R_ARM_NONE) { + return; + } + int symbolIndex = relocation.getSymbolIndex(); + + long addend = relocation.getAddend(); // will be 0 for REL case + + ElfSymbol sym = elfRelocationContext.getSymbol(symbolIndex); + String symbolName = sym.getNameAsString(); + + boolean isThumb = isThumb(sym); + + long offset = (int) relocationAddress.getOffset(); + + Address symbolAddr = elfRelocationContext.getSymbolAddress(sym); + long symbolValue = elfRelocationContext.getSymbolValue(sym); + + int newValue = 0; + + switch (type) { + case ARM_ElfRelocationConstants.R_ARM_PC24: { // Target class: ARM Instruction + int oldValue = memory.getInt(relocationAddress, instructionBigEndian); + if (elfRelocationContext.extractAddend()) { + addend = (oldValue << 8 >> 6); // extract addend and sign-extend with *4 factor + } + newValue = (int) (symbolValue - offset + addend); + // if this a BLX instruction, must set bit24 to identify half-word + if ((oldValue & 0xf0000000) == 0xf0000000) { + newValue = (oldValue & 0xfe000000) | (((newValue >> 1) & 1) << 24) | + ((newValue >> 2) & 0x00ffffff); + } + else { + newValue = (oldValue & 0xff000000) | ((newValue >> 2) & 0x00ffffff); + } + memory.setInt(relocationAddress, newValue, instructionBigEndian); + break; + } + case ARM_ElfRelocationConstants.R_ARM_ABS32: { // Target class: Data + if (elfRelocationContext.extractAddend()) { + addend = memory.getInt(relocationAddress); + } + if (addend != 0 && isUnsupportedExternalRelocation(program, relocationAddress, + symbolAddr, symbolName, addend, elfRelocationContext.getLog())) { + addend = 0; // prefer bad fixup for EXTERNAL over really-bad fixup + } + newValue = (int) (symbolValue + addend); + if (isThumb) { + newValue |= 1; + } + memory.setInt(relocationAddress, newValue); + break; + } + case ARM_ElfRelocationConstants.R_ARM_REL32: { // Target class: Data + if (elfRelocationContext.extractAddend()) { + addend = memory.getInt(relocationAddress); + } + newValue = (int) (symbolValue + addend); + newValue -= offset; // PC relative + if (isThumb) { + newValue |= 1; + } + memory.setInt(relocationAddress, newValue); + break; + } + case ARM_ElfRelocationConstants.R_ARM_PREL31: { // Target class: Data + int oldValue = memory.getInt(relocationAddress); + if (elfRelocationContext.extractAddend()) { + addend = (oldValue << 1) >> 1; + } + newValue = (int) (symbolValue + addend); + newValue -= offset; // PC relative + if (isThumb) { + newValue |= 1; + } + newValue = (newValue & 0x7fffffff) + (oldValue & 0x80000000); + memory.setInt(relocationAddress, newValue); + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDR_PC_G0: { // Target class: ARM Instruction + int oldValue = memory.getInt(relocationAddress, instructionBigEndian); + newValue = (int) (symbolValue + addend); + newValue -= (offset + 8); // PC relative, PC will be 8 bytes after inst start + newValue = (oldValue & 0xff7ff000) | ((~(newValue >> 31) & 1) << 23) | + ((newValue >> 2) & 0xfff); + memory.setInt(relocationAddress, newValue, instructionBigEndian); + break; + } + case ARM_ElfRelocationConstants.R_ARM_ABS16: { // Target class: Data + short sValue = (short) (symbolValue + addend); + memory.setShort(relocationAddress, sValue); + break; + } + case ARM_ElfRelocationConstants.R_ARM_ABS12: { // Target class: ARM Instruction + int oldValue = memory.getInt(relocationAddress, instructionBigEndian); + newValue = (int) (symbolValue + addend); + newValue = (oldValue & 0xfffff000) | (newValue & 0x00000fff); + memory.setInt(relocationAddress, newValue, instructionBigEndian); + break; + } + /* + case ARM_ElfRelocationConstants.R_ARM_THM_ABS5: { + break; + } + */ + case ARM_ElfRelocationConstants.R_ARM_ABS_8: { // Target class: Data + byte bValue = (byte) (symbolValue + addend); + memory.setByte(relocationAddress, bValue); + break; + } + /* + case ARM_ElfRelocationConstants.R_ARM_SBREL32: { + break; + } + */ + case ARM_ElfRelocationConstants.R_ARM_THM_JUMP24: // // Target class: Thumb32 Instruction + case ARM_ElfRelocationConstants.R_ARM_THM_CALL: { + + newValue = (int) (symbolValue + addend); + // since it is adding in the oldvalue below, don't need to add in 4 for pc offset + newValue -= (offset); + + short oldValueH = memory.getShort(relocationAddress, instructionBigEndian); + short oldValueL = memory.getShort(relocationAddress.add(2), instructionBigEndian); + boolean isBLX = (oldValueL & 0x1000) == 0; + + int s = (oldValueH & (1 << 10)) >> 10; + int upper = oldValueH & 0x3ff; + int lower = oldValueL & 0x7ff; + int j1 = (oldValueL & (1 << 13)) >> 13; + int j2 = (oldValueL & (1 << 11)) >> 11; + int i1 = (j1 != s) ? 0 : 1; + int i2 = (j2 != s) ? 0 : 1; + int origaddend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1); + origaddend = (origaddend | ((s ^ 1) << 24)) - (1 << 24); + + newValue = newValue + origaddend; + + newValue = newValue >> 1; + // for Thumb, have to be careful, LE is swapped on 2 bytes + short newValueH = (short) ((oldValueH & 0xf800) | (((newValue >> 11) & 0x00007ff))); + short newValueL = (short) ((oldValueL & 0xf800) | (newValue & 0x00007ff)); + + if (isBLX) { + newValueL &= 0xfffe; + } + + memory.setShort(relocationAddress, newValueH, instructionBigEndian); + memory.setShort(relocationAddress.add(2), newValueL, instructionBigEndian); + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_PC8: { // Target class: Thumb16 Instruction + short oldValue = memory.getShort(relocationAddress, instructionBigEndian); + newValue = (int) (symbolValue + addend); + newValue -= (offset + 4); // PC relative, PC will be 4 bytes past inst start + newValue = newValue >> 1; + short sValue = (short) ((oldValue & 0xff00) | (newValue & 0x00ff)); + memory.setShort(relocationAddress, sValue, instructionBigEndian); + break; + } + /* + case ARM_ElfRelocationConstants.R_ARM_BREL_ADJ: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_DESC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_SWI8: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_XPC25: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_XPC22: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_DTPMOD32: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_DTPOFF32: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_TPOFF32: { + break; + } + */ + + case ARM_ElfRelocationConstants.R_ARM_GLOB_DAT: { + // Corresponds to resolved local/EXTERNAL symbols within GOT + if (elfRelocationContext.extractAddend()) { + addend = memory.getInt(relocationAddress); + } + newValue = (int) (symbolValue + addend); + if (isThumb) { + newValue |= 1; + } + memory.setInt(relocationAddress, newValue); + break; + } + + case ARM_ElfRelocationConstants.R_ARM_JUMP_SLOT: { // Target class: Data + // Corresponds to lazy dynamically linked external symbols within + // GOT/PLT symbolValue corresponds to PLT entry for which we need to + // create and external function location. Don't bother changing + // GOT entry bytes if it refers to .plt block + Address symAddress = elfRelocationContext.getSymbolAddress(sym); + MemoryBlock block = memory.getBlock(symAddress); + boolean isPltSym = block != null && block.getName().startsWith(".plt"); + boolean isExternalSym = + block != null && MemoryBlock.EXTERNAL_BLOCK_NAME.equals(block.getName()); + if (!isPltSym) { + memory.setInt(relocationAddress, (int) symAddress.getOffset()); + } + if (isPltSym || isExternalSym) { + Function extFunction = + elfRelocationContext.getLoadHelper().createExternalFunctionLinkage( + symbolName, symAddress, null); + if (extFunction == null) { + markAsError(program, relocationAddress, "R_ARM_JUMP_SLOT", symbolName, + "Failed to create R_ARM_JUMP_SLOT external function", + elfRelocationContext.getLog()); + return; + } + } + break; + } + + case ARM_ElfRelocationConstants.R_ARM_RELATIVE: { // Target class: Data + if (elfRelocationContext.extractAddend()) { + addend = memory.getInt(relocationAddress); + } + newValue = + (int) elfRelocationContext.getImageBaseWordAdjustmentOffset() + (int) addend; + memory.setInt(relocationAddress, newValue); + break; + } + /* + case ARM_ElfRelocationConstants.R_ARM_GOTOFF32: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_BASE_PREL: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_GOT_BREL: { + break; + } + */ + + case ARM_ElfRelocationConstants.R_ARM_JUMP24: // Target class: ARM Instruction + case ARM_ElfRelocationConstants.R_ARM_CALL: + case ARM_ElfRelocationConstants.R_ARM_GOT_PLT32: + int oldValue = memory.getInt(relocationAddress, instructionBigEndian); + newValue = (int) (symbolValue + addend); + + newValue -= (offset + 8); // PC relative, PC will be 8 bytes past inst start + + // is this a BLX instruction, must put the lower half word in bit24 + // TODO: this might not appear on a BLX, but just in case + if ((oldValue & 0xff000000) == 0xfb000000) { + newValue = (oldValue & 0xfe000000) | (((newValue >> 1) & 1) << 24) | + ((newValue >> 2) & 0x00ffffff); + } + else { + newValue = (oldValue & 0xff000000) | ((newValue >> 2) & 0x00ffffff); + } + memory.setInt(relocationAddress, newValue, instructionBigEndian); + break; + + /* + case ARM_ElfRelocationConstants.R_ARM_BASE_ABS: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_PCREL_7_0: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_PCREL_15_8: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_PCREL_23_15: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDR_SBREL_11_0_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_SBREL_19_12_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_SBREL_27_20_CK: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TARGET1: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_SBREL31: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_V4BX: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TARGET2: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PREL31: { + break; + } +*/ + case ARM_ElfRelocationConstants.R_ARM_MOVW_ABS_NC: + case ARM_ElfRelocationConstants.R_ARM_MOVT_ABS: { // Target Class: ARM Instruction + oldValue = memory.getInt(relocationAddress, instructionBigEndian); + newValue = oldValue; + + oldValue = ((oldValue & 0xf0000) >> 4) | (oldValue & 0xfff); + oldValue = (oldValue ^ 0x8000) - 0x8000; + + oldValue += symbolValue; + if (type == ARM_ElfRelocationConstants.R_ARM_MOVT_ABS) { + oldValue >>= 16; + } + + newValue &= 0xfff0f000; + newValue |= ((oldValue & 0xf000) << 4) | + (oldValue & 0x0fff); + + memory.setInt(relocationAddress, newValue, instructionBigEndian); + + break; + } +/* + case ARM_ElfRelocationConstants.R_ARM_MOVW_PREL_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_MOVT_PREL: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_MOVW_ABS_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_MOVT_ABS: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_MOVW_PREL_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_MOVT_PREL: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_JUMP19: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_JUMP6: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_ALU_PREL_11_0: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_PC12: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ABS32_NOI: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_REL32_NOI: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_PC_G0_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_PC_G0: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_PC_G1_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_PC_G1: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_PC_G2: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDR_PC_G1: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDR_PC_G2: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDRS_PC_G0: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDRS_PC_G1: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDRS_PC_G2: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDC_PC_G0: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDC_PC_G1: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDC_PC_G2: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_SB_G0_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_SB_G0: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_SB_G1_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_SB_G1: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_ALU_SB_G2: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDR_SB_G0: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDR_SB_G1: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDR_SB_G2: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDRS_SB_G0: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDRS_SB_G1: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDRS_SB_G2: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDC_SB_G0: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDC_SB_G1: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_LDC_SB_G2: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_MOVW_BREL_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_MOVT_BREL: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_MOVW_BREL: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_MOVW_BREL_NC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_MOVT_BREL: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_MOVW_BREL: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_GOTDESC: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_CALL: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_DESCSEQ: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_TLS_CALL: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PLT32_ABS: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_GOT_ABS: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_GOT_PREL: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_GOT_BREL12: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_GOTOFF12: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_GOTRELAX: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_GNU_VTENTRY: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_GNU_VTINHERIT: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_JUMP11: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_JUMP8: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_GD32: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_LDM32: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_LDO32: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_IE32: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_LE32: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_LDO12: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_LE12: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_TLS_IE12GP: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_0: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_1: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_2: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_3: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_4: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_5: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_6: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_7: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_8: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_9: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_10: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_11: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_12: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_13: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_14: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_PRIVATE_15: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_TLS_DESCSEQ16: { + break; + } + case ARM_ElfRelocationConstants.R_ARM_THM_TLS_DESCSEQ32: { + break; + } + */ + + case ARM_ElfRelocationConstants.R_ARM_COPY: { + markAsWarning(program, relocationAddress, "R_ARM_COPY", symbolName, symbolIndex, + "Runtime copy not supported", elfRelocationContext.getLog()); + break; + } + + default: { + markAsUnhandled(program, relocationAddress, type, symbolIndex, symbolName, + elfRelocationContext.getLog()); + break; + } + } + } + + private boolean isThumb(ElfSymbol symbol) { + if (symbol.isFunction() && (symbol.getValue() % 1) == 1) { + return true; + } + return false; + } + +} diff --git a/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/relocation/ElfArmRelocationFixupHandler.java b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/relocation/ElfArmRelocationFixupHandler.java new file mode 100644 index 00000000..f2840e30 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/elf/relocation/ElfArmRelocationFixupHandler.java @@ -0,0 +1,185 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.relocation; + +import ghidra.app.plugin.core.reloc.RelocationFixupHandler; +import ghidra.app.util.opinion.ElfLoader; +import ghidra.program.model.address.Address; +import ghidra.program.model.lang.Language; +import ghidra.program.model.lang.Processor; +import ghidra.program.model.listing.Program; +import ghidra.program.model.mem.MemoryAccessException; +import ghidra.program.model.reloc.Relocation; +import ghidra.program.model.util.CodeUnitInsertionException; + +public class ElfArmRelocationFixupHandler extends RelocationFixupHandler { + + @Override + public boolean processRelocation(Program program, Relocation relocation, Address oldImageBase, + Address newImageBase) throws MemoryAccessException, CodeUnitInsertionException { + + switch (relocation.getType()) { + case ARM_ElfRelocationConstants.R_ARM_NONE: + case ARM_ElfRelocationConstants.R_ARM_ABS32: + case ARM_ElfRelocationConstants.R_ARM_REL32: + case ARM_ElfRelocationConstants.R_ARM_GLOB_DAT: +// case ARM_ElfRelocationConstants.R_ARM_JUMP_SLOT: + case ARM_ElfRelocationConstants.R_ARM_RELATIVE: + case ARM_ElfRelocationConstants.R_ARM_GOT_PLT32: + case ARM_ElfRelocationConstants.R_ARM_CALL: + case ARM_ElfRelocationConstants.R_ARM_JUMP24: + case ARM_ElfRelocationConstants.R_ARM_THM_JUMP24: + return process32BitRelocation(program, relocation, oldImageBase, newImageBase); + +// case ARM_ElfRelocationConstants.R_ARM_PC24: +// case ARM_ElfRelocationConstants.R_ARM_LDR_PC_G0: +// case ARM_ElfRelocationConstants.R_ARM_ABS16: +// case ARM_ElfRelocationConstants.R_ARM_ABS12: +// case ARM_ElfRelocationConstants.R_ARM_THM_ABS5: +// case ARM_ElfRelocationConstants.R_ARM_ABS_8: +// case ARM_ElfRelocationConstants.R_ARM_SBREL32: +// case ARM_ElfRelocationConstants.R_ARM_THM_CALL: +// case ARM_ElfRelocationConstants.R_ARM_THM_PC8: +// case ARM_ElfRelocationConstants.R_ARM_BREL_ADJ: +// case ARM_ElfRelocationConstants.R_ARM_TLS_DESC: +// case ARM_ElfRelocationConstants.R_ARM_THM_SWI8: +// case ARM_ElfRelocationConstants.R_ARM_XPC25: +// case ARM_ElfRelocationConstants.R_ARM_THM_XPC22: +// case ARM_ElfRelocationConstants.R_ARM_TLS_DTPMOD32: +// case ARM_ElfRelocationConstants.R_ARM_TLS_DTPOFF32: +// case ARM_ElfRelocationConstants.R_ARM_TLS_TPOFF32: +// case ARM_ElfRelocationConstants.R_ARM_COPY: +// case ARM_ElfRelocationConstants.R_ARM_GOTOFF32: +// case ARM_ElfRelocationConstants.R_ARM_BASE_PREL: +// case ARM_ElfRelocationConstants.R_ARM_GOT_BREL: +// case ARM_ElfRelocationConstants.R_ARM_BASE_ABS: +// case ARM_ElfRelocationConstants.R_ARM_ALU_PCREL_7_0: +// case ARM_ElfRelocationConstants.R_ARM_ALU_PCREL_15_8: +// case ARM_ElfRelocationConstants.R_ARM_ALU_PCREL_23_15: +// case ARM_ElfRelocationConstants.R_ARM_LDR_SBREL_11_0_NC: +// case ARM_ElfRelocationConstants.R_ARM_ALU_SBREL_19_12_NC: +// case ARM_ElfRelocationConstants.R_ARM_ALU_SBREL_27_20_CK: +// case ARM_ElfRelocationConstants.R_ARM_TARGET1: +// case ARM_ElfRelocationConstants.R_ARM_SBREL31: +// case ARM_ElfRelocationConstants.R_ARM_V4BX: +// case ARM_ElfRelocationConstants.R_ARM_TARGET2: +// case ARM_ElfRelocationConstants.R_ARM_PREL31: +// case ARM_ElfRelocationConstants.R_ARM_MOVW_ABS_NC: +// case ARM_ElfRelocationConstants.R_ARM_MOVT_ABS: +// case ARM_ElfRelocationConstants.R_ARM_MOVW_PREL_NC: +// case ARM_ElfRelocationConstants.R_ARM_MOVT_PREL: +// case ARM_ElfRelocationConstants.R_ARM_THM_MOVW_ABS_NC: +// case ARM_ElfRelocationConstants.R_ARM_THM_MOVT_ABS: +// case ARM_ElfRelocationConstants.R_ARM_THM_MOVW_PREL_NC: +// case ARM_ElfRelocationConstants.R_ARM_THM_MOVT_PREL: +// case ARM_ElfRelocationConstants.R_ARM_THM_JUMP19: +// case ARM_ElfRelocationConstants.R_ARM_THM_JUMP6: +// case ARM_ElfRelocationConstants.R_ARM_THM_ALU_PREL_11_0: +// case ARM_ElfRelocationConstants.R_ARM_THM_PC12: +// case ARM_ElfRelocationConstants.R_ARM_ABS32_NOI: +// case ARM_ElfRelocationConstants.R_ARM_REL32_NOI: +// case ARM_ElfRelocationConstants.R_ARM_ALU_PC_G0_NC: +// case ARM_ElfRelocationConstants.R_ARM_ALU_PC_G0: +// case ARM_ElfRelocationConstants.R_ARM_ALU_PC_G1_NC: +// case ARM_ElfRelocationConstants.R_ARM_ALU_PC_G1: +// case ARM_ElfRelocationConstants.R_ARM_ALU_PC_G2: +// case ARM_ElfRelocationConstants.R_ARM_LDR_PC_G1: +// case ARM_ElfRelocationConstants.R_ARM_LDR_PC_G2: +// case ARM_ElfRelocationConstants.R_ARM_LDRS_PC_G0: +// case ARM_ElfRelocationConstants.R_ARM_LDRS_PC_G1: +// case ARM_ElfRelocationConstants.R_ARM_LDRS_PC_G2: +// case ARM_ElfRelocationConstants.R_ARM_LDC_PC_G0: +// case ARM_ElfRelocationConstants.R_ARM_LDC_PC_G1: +// case ARM_ElfRelocationConstants.R_ARM_LDC_PC_G2: +// case ARM_ElfRelocationConstants.R_ARM_ALU_SB_G0_NC: +// case ARM_ElfRelocationConstants.R_ARM_ALU_SB_G0: +// case ARM_ElfRelocationConstants.R_ARM_ALU_SB_G1_NC: +// case ARM_ElfRelocationConstants.R_ARM_ALU_SB_G1: +// case ARM_ElfRelocationConstants.R_ARM_ALU_SB_G2: +// case ARM_ElfRelocationConstants.R_ARM_LDR_SB_G0: +// case ARM_ElfRelocationConstants.R_ARM_LDR_SB_G1: +// case ARM_ElfRelocationConstants.R_ARM_LDR_SB_G2: +// case ARM_ElfRelocationConstants.R_ARM_LDRS_SB_G0: +// case ARM_ElfRelocationConstants.R_ARM_LDRS_SB_G1: +// case ARM_ElfRelocationConstants.R_ARM_LDRS_SB_G2: +// case ARM_ElfRelocationConstants.R_ARM_LDC_SB_G0: +// case ARM_ElfRelocationConstants.R_ARM_LDC_SB_G1: +// case ARM_ElfRelocationConstants.R_ARM_LDC_SB_G2: +// case ARM_ElfRelocationConstants.R_ARM_MOVW_BREL_NC: +// case ARM_ElfRelocationConstants.R_ARM_MOVT_BREL: +// case ARM_ElfRelocationConstants.R_ARM_MOVW_BREL: +// case ARM_ElfRelocationConstants.R_ARM_THM_MOVW_BREL_NC: +// case ARM_ElfRelocationConstants.R_ARM_THM_MOVT_BREL: +// case ARM_ElfRelocationConstants.R_ARM_THM_MOVW_BREL: +// case ARM_ElfRelocationConstants.R_ARM_TLS_GOTDESC: +// case ARM_ElfRelocationConstants.R_ARM_TLS_CALL: +// case ARM_ElfRelocationConstants.R_ARM_TLS_DESCSEQ: +// case ARM_ElfRelocationConstants.R_ARM_THM_TLS_CALL: +// case ARM_ElfRelocationConstants.R_ARM_PLT32_ABS: +// case ARM_ElfRelocationConstants.R_ARM_GOT_ABS: +// case ARM_ElfRelocationConstants.R_ARM_GOT_PREL: +// case ARM_ElfRelocationConstants.R_ARM_GOT_BREL12: +// case ARM_ElfRelocationConstants.R_ARM_GOTOFF12: +// case ARM_ElfRelocationConstants.R_ARM_GOTRELAX: +// case ARM_ElfRelocationConstants.R_ARM_GNU_VTENTRY: +// case ARM_ElfRelocationConstants.R_ARM_GNU_VTINHERIT: +// case ARM_ElfRelocationConstants.R_ARM_THM_JUMP11: +// case ARM_ElfRelocationConstants.R_ARM_THM_JUMP8: +// case ARM_ElfRelocationConstants.R_ARM_TLS_GD32: +// case ARM_ElfRelocationConstants.R_ARM_TLS_LDM32: +// case ARM_ElfRelocationConstants.R_ARM_TLS_LDO32: +// case ARM_ElfRelocationConstants.R_ARM_TLS_IE32: +// case ARM_ElfRelocationConstants.R_ARM_TLS_LE32: +// case ARM_ElfRelocationConstants.R_ARM_TLS_LDO12: +// case ARM_ElfRelocationConstants.R_ARM_TLS_LE12: +// case ARM_ElfRelocationConstants.R_ARM_TLS_IE12GP: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_0: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_1: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_2: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_3: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_4: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_5: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_6: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_7: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_8: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_9: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_10: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_11: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_12: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_13: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_14: +// case ARM_ElfRelocationConstants.R_ARM_PRIVATE_15: +// case ARM_ElfRelocationConstants.R_ARM_THM_TLS_DESCSEQ16: +// case ARM_ElfRelocationConstants.R_ARM_THM_TLS_DESCSEQ32: +// return false; + } + return false; + } + + @Override + public boolean handlesProgram(Program program) { + if (!ElfLoader.ELF_NAME.equals(program.getExecutableFormat())) { + return false; + } + Language language = program.getLanguage(); + if (language.getLanguageDescription().getSize() != 32) { + return false; + } + Processor processor = language.getProcessor(); + return (processor.equals(Processor.findOrPossiblyCreateProcessor("ARM"))); + } + +} diff --git a/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/macho/relocation/ARM_MachoRelocationConstants.java b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/macho/relocation/ARM_MachoRelocationConstants.java new file mode 100644 index 00000000..b8728fba --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/macho/relocation/ARM_MachoRelocationConstants.java @@ -0,0 +1,82 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.macho.relocation; + +/** + * {@link ARM_MachoRelocationHandler} constants + * + * @see mach-o/arm/reloc.h + */ +public class ARM_MachoRelocationConstants { + + /** + * Generic relocation as described above + */ + public final static int ARM_RELOC_VANILLA = 0; + + /** + * The second relocation entry of a pair + */ + public final static int ARM_RELOC_PAIR = 1; + + /** + * A PAIR follows with subtract symbol value + */ + public final static int ARM_RELOC_SECTDIFF = 2; + + /** + * Like ARM_RELOC_SECTDIFF, but the symbol referenced was local + */ + public final static int ARM_RELOC_LOCAL_SECTDIFF = 3; + + /** + * Pre-bound lazy pointer + */ + public final static int ARM_RELOC_PB_LA_PTR = 4; + + /** + * 24 bit branch displacement (to a word address) + */ + public final static int ARM_RELOC_BR24 = 5; + + /** + * 22 bit branch displacement (to a half-word address) + */ + public final static int ARM_THUMB_RELOC_BR22 = 6; + + /** + * Obsolete - a thumb 32-bit branch instruction possibly needing page-spanning branch workaround + */ + public final static int ARM_THUMB_32BIT_BRANCH = 7; + + /** + * For these two r_type relocations they always have a pair following them and the r_length bits + * are used differently. The encoding of the r_length is as follows: + * + * low bit of r_length: + * 0 - :lower16: for movw instructions + * 1 - :upper16: for movt instructions + * + * high bit of r_length: + * 0 - arm instructions + * 1 - thumb instructions + * + * The other half of the relocated expression is in the following pair relocation entry in the + * low 16 bits of r_address field. + */ + public final static int ARM_RELOC_HALF = 8; + public final static int ARM_RELOC_HALF_SECTDIFF = 9; +} diff --git a/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/macho/relocation/ARM_MachoRelocationHandler.java b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/macho/relocation/ARM_MachoRelocationHandler.java new file mode 100644 index 00000000..005c3c11 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/app/util/bin/format/macho/relocation/ARM_MachoRelocationHandler.java @@ -0,0 +1,105 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.macho.relocation; + +import static ghidra.app.util.bin.format.macho.relocation.ARM_MachoRelocationConstants.*; + +import ghidra.app.util.bin.format.macho.*; +import ghidra.program.model.address.Address; +import ghidra.program.model.mem.MemoryAccessException; +import ghidra.util.exception.NotFoundException; + +/** + * A {@link MachoRelocationHandler} for ARM + * + * @see mach-o/arm/reloc.h + */ +public class ARM_MachoRelocationHandler extends MachoRelocationHandler { + + @Override + public boolean canRelocate(MachHeader header) { + return header.getCpuType() == CpuTypes.CPU_TYPE_ARM; + } + + @Override + public boolean isPairedRelocation(RelocationInfo relocation) { + return relocation.getType() == ARM_RELOC_SECTDIFF || + relocation.getType() == ARM_RELOC_LOCAL_SECTDIFF || + relocation.getType() == ARM_RELOC_HALF || + relocation.getType() == ARM_RELOC_HALF_SECTDIFF; + } + + @Override + public void relocate(MachoRelocation relocation) + throws MemoryAccessException, NotFoundException { + + if (!relocation.requiresRelocation()) { + return; + } + + RelocationInfo relocationInfo = relocation.getRelocationInfo(); + Address targetAddr = relocation.getTargetAddress(); + long orig = read(relocation); + + switch (relocationInfo.getType()) { + case ARM_RELOC_VANILLA: + if (!relocationInfo.isPcRelocated()) { + write(relocation, targetAddr.getOffset()); + } + else { + throw new NotFoundException("Unimplemented relocation"); + } + break; + case ARM_THUMB_RELOC_BR22: { + // BL and BLX + boolean blx = (orig & 0xd000f800) == 0xc000f000; + long s = (orig >> 10) & 0x1; + long j1 = (orig >> 29) & 0x1; + long j2 = (orig >> 27) & 0x1; + long i1 = ~(j1 ^ s) & 0x1; + long i2 = ~(j2 ^ s) & 0x1; + long imm10 = orig & 0x3ff; + long imm11 = (orig >> 16) & 0x7ff; + long addend = (s << 24) | (i1 << 23) | (i2 << 22) | (imm10 << 12) | (imm11 << 1); + addend |= s == 1 ? 0xfe000000 : 0; // sign extend + addend &= blx ? ~0x3 : ~0; // 4-byte align BLX + long value = targetAddr.getOffset() + addend; + s = (value >> 24) & 0x1; + i1 = (value >> 23) & 0x1; + i2 = (value >> 22) & 0x1; + j1 = ~(i1 ^ s) & 0x1; + j2 = ~(i2 ^ s) & 0x1; + imm10 = (value >> 12) & 0x3ff; + imm11 = (value >> 1) & 0x7ff; + long instr = orig & (blx ? 0xc000f800 : 0xd000f800); + instr |= (j1 << 29) | (j2 << 27) | (imm11 << 16) | (s << 10) | imm10; + write(relocation, instr); + break; + } + + case ARM_RELOC_PAIR: // should never see on its own here + case ARM_RELOC_SECTDIFF: // relocation not required (scattered) + case ARM_RELOC_LOCAL_SECTDIFF: // relocation not required (scattered) + case ARM_RELOC_PB_LA_PTR: // not seen yet + case ARM_RELOC_BR24: // not seen yet + case ARM_THUMB_32BIT_BRANCH: // not seen yet + case ARM_RELOC_HALF: // relocation not required (scattered) + case ARM_RELOC_HALF_SECTDIFF: // relocation not required (scattered) + default: + throw new NotFoundException("Unimplemented relocation"); + } + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/program/emulation/ARMEmulateInstructionStateModifier.java b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/program/emulation/ARMEmulateInstructionStateModifier.java new file mode 100644 index 00000000..f5ce6859 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/main/java/ghidra/program/emulation/ARMEmulateInstructionStateModifier.java @@ -0,0 +1,176 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.program.emulation; + +import java.math.BigInteger; + +import ghidra.pcode.emulate.Emulate; +import ghidra.pcode.emulate.EmulateInstructionStateModifier; +import ghidra.pcode.emulate.callother.CountLeadingZerosOpBehavior; +import ghidra.pcode.error.LowlevelError; +import ghidra.program.model.address.Address; +import ghidra.program.model.lang.Register; +import ghidra.program.model.lang.RegisterValue; +import ghidra.program.model.pcode.PcodeOp; + +public class ARMEmulateInstructionStateModifier extends EmulateInstructionStateModifier { + + private Register TModeReg; + private Register TBreg; + private RegisterValue tMode; + private RegisterValue aMode; + + public ARMEmulateInstructionStateModifier(Emulate emu) { + super(emu); + TModeReg = language.getRegister("TMode"); + TBreg = language.getRegister("ISAModeSwitch"); // generic register which mirrors TB register value + if (TModeReg != null) { + if (TBreg == null) { + throw new RuntimeException("Expected language " + language.getLanguageID() + + " to have ISAModeSwitch register defined"); + } + tMode = new RegisterValue(TModeReg, BigInteger.ONE); + aMode = new RegisterValue(TModeReg, BigInteger.ZERO); + } + + registerPcodeOpBehavior("count_leading_zeroes", new CountLeadingZerosOpBehavior()); + + /** + * We could registerPcodeOpBehavior for one or more of the following pcodeop's: + * + Absolute + ClearExclusiveLocal + DataMemoryBarrier + DataSynchronizationBarrier + ExclusiveAccess + HintDebug + HintPreloadData + HintPreloadDataForWrite + HintPreloadInstruction + HintYield + IndexCheck + InstructionSynchronizationBarrier + ReverseBitOrder + SendEvent + SignedDoesSaturate + SignedSaturate + UnsignedDoesSaturate + UnsignedSaturate + WaitForEvent + WaitForInterrupt + coprocessor_function + coprocessor_function2 + coprocessor_load + coprocessor_load2 + coprocessor_loadlong + coprocessor_loadlong2 + coprocessor_movefrom + coprocessor_movefrom2 + coprocessor_moveto + coprocessor_moveto2 + coprocessor_store + coprocessor_store2 + coprocessor_storelong + coprocessor_storelong2 + count_leading_zeroes + disableDataAbortInterrupts + disableFIQinterrupts + disableIRQinterrupts + enableDataAbortInterrupts + enableFIQinterrupts + enableIRQinterrupts + hasExclusiveAccess + isCurrentModePrivileged + isFIQinterruptsEnabled + isIRQinterruptsEnabled + isThreadMode + jazelle_branch + setAbortMode + setFIQMode + setIRQMode + setSupervisorMode + setSystemMode + setThreadModePrivileged + setUndefinedMode + setUserMode + software_breakpoint + software_interrupt + * + */ + } + + /** + * Initialize TB register based upon context-register state before first instruction is executed. + */ + @Override + public void initialExecuteCallback(Emulate emulate, Address current_address, RegisterValue contextRegisterValue) throws LowlevelError { + if (TModeReg == null) { + return; // Thumb mode not supported + } + BigInteger tModeValue = BigInteger.ZERO; + if (contextRegisterValue != null) { + tModeValue = + contextRegisterValue.getRegisterValue(TModeReg).getUnsignedValueIgnoreMask(); + } + if (!BigInteger.ZERO.equals(tModeValue)) { + tModeValue = BigInteger.ONE; + } + emu.getMemoryState().setValue(TBreg, tModeValue); + } + + /** + * Handle odd addresses which may occur when jumping/returning indirectly + * to Thumb mode. It is assumed that language will properly handle + * context changes during the flow of execution, we need only fix + * the current program counter. + */ + @Override + public void postExecuteCallback(Emulate emulate, Address lastExecuteAddress, + PcodeOp[] lastExecutePcode, int lastPcodeIndex, Address currentAddress) + throws LowlevelError { + if (TModeReg == null) { + return; // Thumb mode not supported + } + if (lastPcodeIndex < 0) { + // ignore fall-through condition + return; + } + int lastOp = lastExecutePcode[lastPcodeIndex].getOpcode(); + if (lastOp != PcodeOp.BRANCH && lastOp != PcodeOp.CBRANCH && lastOp != PcodeOp.BRANCHIND && + lastOp != PcodeOp.CALL && lastOp != PcodeOp.CALLIND && lastOp != PcodeOp.RETURN) { + // only concerned with Branch, Call or Return ops + return; + } + long tbValue = emu.getMemoryState().getValue(TBreg); + if (tbValue == 1) { + // Thumb mode + emu.setContextRegisterValue(tMode); // change context to be consistent with TB value + if ((currentAddress.getOffset() & 0x1) == 1) { + emulate.setExecuteAddress(currentAddress.previous()); + } + } + else if (tbValue == 0) { + + if ((currentAddress.getOffset() & 0x1) == 1) { + throw new LowlevelError( + "Flow to odd address occurred without setting TB register (Thumb mode)"); + } + + // ARM mode + emu.setContextRegisterValue(aMode); // change context to be consistent with TB value + } + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM10e_O0_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM10e_O0_EmulatorTest.java new file mode 100644 index 00000000..6be91653 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM10e_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARM10e_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:LE:32:v8"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARM10e_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARM10e_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(ARM10e_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM10e_O3_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM10e_O3_EmulatorTest.java new file mode 100644 index 00000000..ff09840d --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM10e_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARM10e_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:LE:32:v8"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARM10e_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARM10e_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(ARM10e_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_O0_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_O0_EmulatorTest.java new file mode 100644 index 00000000..b0046db9 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARM_BE_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:BE:32:v8"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARM_BE_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARM_BE_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(ARM_BE_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_O3_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_O3_EmulatorTest.java new file mode 100644 index 00000000..b2f7f143 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARM_BE_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:BE:32:v8"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARM_BE_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARM_BE_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(ARM_BE_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_thumb_O0_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_thumb_O0_EmulatorTest.java new file mode 100644 index 00000000..92ca1e8a --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_thumb_O0_EmulatorTest.java @@ -0,0 +1,41 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARM_BE_thumb_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:BE:32:v8"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARM_BE_thumb_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARM_BE_thumb_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite( + ARM_BE_thumb_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_thumb_O3_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_thumb_O3_EmulatorTest.java new file mode 100644 index 00000000..c7d6fc69 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_BE_thumb_O3_EmulatorTest.java @@ -0,0 +1,41 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARM_BE_thumb_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:BE:32:v8"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARM_BE_thumb_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARM_BE_thumb_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite( + ARM_BE_thumb_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_O0_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_O0_EmulatorTest.java new file mode 100644 index 00000000..a40605ba --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARM_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:LE:32:v8"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARM_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARM_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(ARM_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_O3_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_O3_EmulatorTest.java new file mode 100644 index 00000000..b1fc3958 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARM_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:LE:32:v8"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARM_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARM_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(ARM_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_thumb_O0_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_thumb_O0_EmulatorTest.java new file mode 100644 index 00000000..b7556b33 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_thumb_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARM_thumb_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:LE:32:v8"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARM_thumb_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARM_thumb_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(ARM_thumb_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_thumb_O3_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_thumb_O3_EmulatorTest.java new file mode 100644 index 00000000..f58ba1a9 --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARM_thumb_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARM_thumb_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:LE:32:v8"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARM_thumb_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARM_thumb_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(ARM_thumb_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARMv5_O0_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARMv5_O0_EmulatorTest.java new file mode 100644 index 00000000..f5d11b8a --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARMv5_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARMv5_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:LE:32:v5"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARMv5_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARMv5_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(ARM_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARMv5_O3_EmulatorTest.java b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARMv5_O3_EmulatorTest.java new file mode 100644 index 00000000..a4743a0a --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.processors/java/ghidra/test/processors/ARMv5_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class ARMv5_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "ARM:LE:32:v5"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public ARMv5_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ARMv5_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(ARM_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/ARM/src/test.slow/java/ghidra/app/plugin/core/analysis/ArmThumbChangeDisassemblyTest.java b/src/third-party/sleigh/processors/ARM/src/test.slow/java/ghidra/app/plugin/core/analysis/ArmThumbChangeDisassemblyTest.java new file mode 100644 index 00000000..0ea7490e --- /dev/null +++ b/src/third-party/sleigh/processors/ARM/src/test.slow/java/ghidra/app/plugin/core/analysis/ArmThumbChangeDisassemblyTest.java @@ -0,0 +1,99 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.plugin.core.analysis; + +import static org.junit.Assert.assertEquals; + +import org.junit.*; + +import ghidra.framework.options.Options; +import ghidra.program.database.ProgramBuilder; +import ghidra.program.model.address.Address; +import ghidra.program.model.lang.RegisterValue; +import ghidra.program.model.listing.Instruction; +import ghidra.program.model.listing.Program; +import ghidra.program.model.symbol.Reference; +import ghidra.test.AbstractGhidraHeadedIntegrationTest; +import ghidra.test.TestEnv; + +/** + * Test the changing of the ARM/Thumb bit for code flow + * + * ARM code: + * ADR r12=addr + * bx r12 + * addr: + * Thumb code... + * + * Also tests that analysis puts on the correct reference on the r12, and not on the BX + */ +public class ArmThumbChangeDisassemblyTest extends AbstractGhidraHeadedIntegrationTest { + + private TestEnv env; + + private Program program; + + @Before + public void setUp() throws Exception { + env = new TestEnv(); + } + + @After + public void tearDown() { + if (program != null) + env.release(program); + program = null; + env.dispose(); + } + + protected void setAnalysisOptions(String optionName) { + int txId = program.startTransaction("Analyze"); + Options analysisOptions = program.getOptions(Program.ANALYSIS_PROPERTIES); + analysisOptions.setBoolean(optionName, false); + program.endTransaction(txId, true); + } + + + @Test + public void testCorrectDisassembly() throws Exception { + ProgramBuilder programBuilder = new ProgramBuilder("Test", ProgramBuilder._ARM); + program = programBuilder.getProgram(); + int txId = program.startTransaction("Add Memory");// leave open until tearDown + programBuilder.createMemory(".text", "1000", 64).setExecute(true);// initialized + programBuilder.setBytes("1000", "ff ff ff ea 01 c0 8f e2 1c ff 2f e1 82 08 30 b5 70 47"); + + programBuilder.disassemble("1000", 11, true); + programBuilder.analyze(); + + // should disassemble as ARM, then transition to Thumb + Address instrAddr = programBuilder.addr("100c"); + Instruction instructionAt = program.getListing().getInstructionAt(instrAddr); + Assert.assertNotEquals(null,instructionAt); + + assertEquals(6, program.getListing().getNumInstructions()); + + RegisterValue registerValue = program.getProgramContext().getRegisterValue(program.getRegister("TMode"), instrAddr); + + assertEquals(1,registerValue.getUnsignedValue().intValue()); + + // make sure reference put on operand 0, not mnemonic + instrAddr = programBuilder.addr("1008"); + instructionAt = program.getListing().getInstructionAt(instrAddr); + Reference[] operandReferences = instructionAt.getOperandReferences(0); + assertEquals(1,operandReferences.length); + assertEquals(0x100cL, operandReferences[0].getToAddress().getOffset()); + } +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index c13490e2..e9a8cfe9 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,5 +1,4 @@ cmake_minimum_required(VERSION 3.14) - project(maatTests LANGUAGES CXX) include(../cmake/project-is-top-level.cmake) include(../cmake/folders.cmake) @@ -12,6 +11,7 @@ endif() # Unit tests add_executable(unit-tests unit-tests/test_all.cpp + unit-tests/test_archARM64.cpp unit-tests/test_archEVM.cpp unit-tests/test_archX64.cpp unit-tests/test_archX86.cpp diff --git a/tests/unit-tests/test_all.cpp b/tests/unit-tests/test_all.cpp index 4d85bea5..a24011d1 100644 --- a/tests/unit-tests/test_all.cpp +++ b/tests/unit-tests/test_all.cpp @@ -12,6 +12,7 @@ void test_symbolic_memory(); void test_ir(); void test_archX86(); void test_archX64(); +void test_archARM64(); void test_events(); void test_snapshots(); void test_solver(); @@ -52,6 +53,7 @@ int main(int argc, char ** argv) test_archX86(); test_archX64(); test_archEVM(); + test_archARM64(); test_solver(); test_loader(); test_serialization(); @@ -82,6 +84,8 @@ int main(int argc, char ** argv) test_archX64(); else if( !strcmp(argv[i], "EVM")) test_archEVM(); + else if( !strcmp(argv[i], "ARM64")) + test_archARM64(); else if( !strcmp(argv[i], "event")) test_events(); else if( !strcmp(argv[i], "snap")) @@ -92,9 +96,8 @@ int main(int argc, char ** argv) test_loader(); else if( !strcmp(argv[i], "serial")) test_serialization(); + /* - else if( !strcmp(argv[i], "ARM64")) - test_archARM64(); else if( !strcmp(argv[i], "env")) test_env(); */ diff --git a/tests/unit-tests/test_archARM64.cpp b/tests/unit-tests/test_archARM64.cpp new file mode 100644 index 00000000..f14e6108 --- /dev/null +++ b/tests/unit-tests/test_archARM64.cpp @@ -0,0 +1,451 @@ +/* +Commonwealth of Australia represented by the Department of Defence + +Produced by Nathan Do, Student Intern at DSTG (Defence Science and Technology Group) +*/ + +#include "maat/arch.hpp" +#include "maat/varcontext.hpp" +#include "maat/engine.hpp" +#include "maat/exception.hpp" +#include +#include +#include +#include + +using std::cout; +using std::endl; +using std::string; + +namespace test +{ +namespace archARM64 +{ + using namespace maat; + // assert test if true + unsigned int _assert(bool val, const string& msg){ + if( !val){ + cout << "\nFail: " << msg << std::endl; + throw test_exception(); + } + return 1; + } + + unsigned int disass_addition(MaatEngine& sym) + { + unsigned int ret_value = 0; + string code; + sym.cpu.ctx().set(ARM64::R2, exprcst(64,15)); + sym.cpu.ctx().set(ARM64::R1, exprcst(64,25)); + + code = string("\x20\x00\x02\x8b", 4); // add x0, x1, x2 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 40, "ArchARM64: failed to disassembly and/or execute add"); + + /* lets test the carry bit + load a 64-bit constant in x0*/ + code = string("\x80\x46\x82\xd2", 4); // movz x0, #0x1234 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + code = string("\xA0\x79\xb5\xf2", 4); // movk x0, #0xABCD, LSL #16 + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + + code = string("\xe0\xff\xdf\xf2", 4); // movk x0, #0xFFFF, LSL #32 + sym.mem->write_buffer(0x1008, (uint8_t*)code.c_str(), code.size()); + + code = string("\xe0\xff\xff\xf2", 4); // movk x0, #0xFFFF, LSL #48 + sym.mem->write_buffer(0x100c, (uint8_t*)code.c_str(), code.size()); + + code = string("\x81\x46\x82\xd2", 4); // movz x1, #0x1234 + sym.mem->write_buffer(0x1010, (uint8_t*)code.c_str(), code.size()); + + code = string("\xa1\x79\xb5\xf2", 4); // movk x1, #0xABCD, LSL #16 + sym.mem->write_buffer(0x1014, (uint8_t*)code.c_str(), code.size()); + + code = string("\xe1\xff\xdf\xf2", 4); // movk x1, #0xFFFF, LSL #32 + sym.mem->write_buffer(0x1018, (uint8_t*)code.c_str(), code.size()); + + code = string("\xe1\xff\xef\xf2", 4); // movk x1, #0x7FFF, LSL #48 + sym.mem->write_buffer(0x101c, (uint8_t*)code.c_str(), code.size()); + + code = string("\x22\x00\x00\xab", 4); // adds x2, x0, x1 + sym.mem->write_buffer(0x1020, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,9); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0xffffffffabcd1234, "1: ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R1).as_uint() == 0x7fffffffabcd1234, "2: ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R2).as_uint() == 0x7fffffff579a2468, "3: ArchARM64: failed to disassembly and/or execute add"); + + ret_value += _assert( sym.cpu.ctx().get(ARM64::CF).as_uint() == 0x1, "4: ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::NF).as_uint() == 0x0, "5: ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::ZF).as_uint() == 0x0, "6: ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::VF).as_uint() == 0x0, "7: ArchARM64: failed to disassembly and/or execute add"); + + /* Check overflow bit now */ + code = string("\xe0\xff\xef\xf2", 4); // movk x0, #0x7FFF, LSL #48 + sym.mem->write_buffer(0x101c, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,9); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0x7fffffffabcd1234, "8: ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R1).as_uint() == 0xffffabcd1234, "9: ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R2).as_uint() == 0x8000ffff579a2468, "10: ArchARM64: failed to disassembly and/or execute add"); + + ret_value += _assert( sym.cpu.ctx().get(ARM64::NF).as_uint() == 0x1, "11:ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::ZF).as_uint() == 0x0, "12:ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::VF).as_uint() == 0x1, "13:ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::CF).as_uint() == 0x0, "14:ArchARM64: failed to disassembly and/or execute add"); + + /* Add a number and its complement then check the Zero Flag */ + code = string("\x00\x00\x80\x92", 4); // mov x0, #-1 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + code = string("\x21\x00\x80\xD2", 4); // mov x1, #1 + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + + code = string("\x02\x00\x01\xAB", 4); // adds x2, x0, x1 + sym.mem->write_buffer(0x1008, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,3); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0xffffffffffffffff, "8: ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R1).as_uint() == 0x1, "9: ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R2).as_uint() == 0x0, "10: ArchARM64: failed to disassembly and/or execute add"); + + ret_value += _assert( sym.cpu.ctx().get(ARM64::NF).as_uint() == 0x0, "11:ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::ZF).as_uint() == 0x1, "12:ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::VF).as_uint() == 0x0, "13:ArchARM64: failed to disassembly and/or execute add"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::CF).as_uint() == 0x1, "14:ArchARM64: failed to disassembly and/or execute add"); + + return ret_value; + } + + unsigned int disass_subtraction(MaatEngine& sym) + { + unsigned int ret_value = 0; + string code; + + code = string("\x40\x00\x80\xd2", 4); // mov x0, #2 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + code = string("\x21\x00\x80\xd2", 4); // mov x1, #1 + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + + code = string("\x02\x00\x01\xeb",4); // subs x0, x1, x2 + sym.mem->write_buffer(0x1008,(uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,3); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0x2, "1 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R1).as_uint() == 0x1, "2 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R2).as_uint() == 0x1, "3 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::CF).as_uint() == 0x1, "4 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::NF).as_uint() == 0x0, "4 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::ZF).as_uint() == 0x0, "4 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::VF).as_uint() == 0x0, "4 ArchARM64: failed to disassembly and/or execute sub"); + + code = string("\x40\x00\x80\xd2", 4); //mov x0 #2 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + code = string("\x41\x01\x80\xD2", 4); // mov x1, #10 + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + + code = string("\x02\x00\x01\xeb", 4); // subs x2, x0, x1 + sym.mem->write_buffer(0x1008, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,3); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0x2, "5 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R1).as_uint() == 0xa, "6 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R2).as_uint() == -0x8, "7 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::CF).as_uint() == 0x0, "8 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::NF).as_uint() == 0x1, "9 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::VF).as_uint() == 0x0, "8 ArchARM64: failed to disassembly and/or execute sub"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::ZF).as_uint() == 0x0, "9 ArchARM64: failed to disassembly and/or execute sub"); + + + code = string("\x02\x00\x01\xDA", 4); // sbc x2, x0, x1 + sym.mem->write_buffer(0x100c, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x100c,1); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R2).as_uint() == -0x9, "10 ArchARM64: failed to disassembly and/or execute sub"); + + + return ret_value; + } + + unsigned int disass_zero(MaatEngine& sym) + { + unsigned int ret_value = 0; + string code; + + sym.cpu.ctx().set(ARM64::R1, exprcst(64,0x3000)); + sym.cpu.ctx().set(ARM64::R2, exprcst(64,0x1234)); + code = string("\x20\x00\x1f\x0b", 4); // add w0, w1, wzr + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0x3000, "ArchARM64: failed to disassembly and/or execute zero"); + return ret_value; + } + + unsigned int disass_branch(MaatEngine& sym) + { + unsigned int ret_value = 0; + string code; + + code = string("\xE0\x01\x80\xD2", 4); // mov x0, #15 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + code = string("\x1f\x28\x00\xf1", 4); // cmp x0, #10 + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + + code = string("\x4d\x00\x00\x54", 4); // b.le #8 + sym.mem->write_buffer(0x1008, (uint8_t*)code.c_str(), code.size()); + + code = string("\x05\x00\x80\x92", 4); // mov x5, #-1 + sym.mem->write_buffer(0x100c, (uint8_t*)code.c_str(), code.size()); + + code = string("\x25\x00\x80\xD2", 4); // mov x5, #1 + sym.mem->write_buffer(0x1010, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,4); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0xf, "1: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R5).as_uint() == -0x1, "2: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::CF).as_uint() == 0x1, "3: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::NF).as_uint() == 0x0, "4: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::VF).as_uint() == 0x0, "5: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::ZF).as_uint() == 0x0, "6: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + + code = string("\xE0\x01\x80\xD2", 4); // mov x0, #15 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + code = string("\x1f\x28\x00\xf1", 4); // cmp x0, #10 + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + + code = string("\x4a\x00\x00\x54", 4); // b.ge #8 + sym.mem->write_buffer(0x1008, (uint8_t*)code.c_str(), code.size()); + + code = string("\x05\x00\x80\x92", 4); // mov x5, #-1 + sym.mem->write_buffer(0x100c, (uint8_t*)code.c_str(), code.size()); + + code = string("\x25\x00\x80\xD2", 4); // mov x5, #1 + sym.mem->write_buffer(0x1010, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,4); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0xf, "7: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R5).as_uint() == 0x1, "8: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + + ret_value += _assert( sym.cpu.ctx().get(ARM64::CF).as_uint() == 0x1, "9: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::NF).as_uint() == 0x0, "10: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::VF).as_uint() == 0x0, "11: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::ZF).as_uint() == 0x0, "12: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + + code = string("\xE0\x01\x80\xD2", 4); // mov x0, #15 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + code = string("\x1F\x3C\x00\xF1", 4); // cmp x0, #15 + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + + code = string("\x40\x00\x00\x54", 4); // b.eq #8 + sym.mem->write_buffer(0x1008, (uint8_t*)code.c_str(), code.size()); + + code = string("\x05\x00\x80\x92", 4); // mov x5, #-1 + sym.mem->write_buffer(0x100c, (uint8_t*)code.c_str(), code.size()); + + code = string("\x25\x00\x80\xD2", 4); // mov x5, #1 + sym.mem->write_buffer(0x1010, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,4); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0xf, "13: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R5).as_uint() == 0x1, "14: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + + ret_value += _assert( sym.cpu.ctx().get(ARM64::CF).as_uint() == 0x1, "15: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::NF).as_uint() == 0x0, "16: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::VF).as_uint() == 0x0, "17: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::ZF).as_uint() == 0x1, "18: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + + // test branch and link + code = string("\xE0\x01\x80\xD2", 4); // mov x0, #15 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + code = string("\x1F\x3C\x00\xF1", 4); // cmp x0, #15 + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + + code = string("\x02\x00\x00\x94", 4); // bl #8 + sym.mem->write_buffer(0x1008, (uint8_t*)code.c_str(), code.size()); + + code = string("\x05\x00\x80\x92", 4); // mov x5, #-1 + sym.mem->write_buffer(0x100c, (uint8_t*)code.c_str(), code.size()); + + code = string("\x25\x00\x80\xd2", 4); // mov x5, #1 + sym.mem->write_buffer(0x1010, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,4); + + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0xf, "19: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R5).as_uint() == 0x1, "21: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + + ret_value += _assert( sym.cpu.ctx().get(ARM64::CF).as_uint() == 0x1, "22: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::NF).as_uint() == 0x0, "23: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::VF).as_uint() == 0x0, "24: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::ZF).as_uint() == 0x1, "25: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + ret_value += _assert( sym.cpu.ctx().get(ARM64::LR).as_uint() == 0x100c, "26: ArchARM64: failed to disassembly and/or execute Branch Conditional"); + + return ret_value; + } + + unsigned int disass_store_load(MaatEngine& sym) + { + unsigned int ret_value = 0; + string code; + // Set Registers + sym.cpu.ctx().set(ARM64::LR, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R0, exprcst(64,0xDEADBEEF)); + sym.cpu.ctx().set(ARM64::R1, exprcst(64,0x110000)); + sym.cpu.ctx().set(ARM64::R2, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R3, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R4, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R5, exprcst(64,0)); + + code = string("\x20\x00\x00\xb9", 4); // str w0 [x1] + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert((uint32_t)sym.mem->read(0x110000, 4).as_uint() == 0xDEADBEEF, "1: ArchARM64: failed to disassemble store and load instructions."); + + code = string("\x22\x00\x40\xb9", 4); // ldr w2 [x1] + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + + ret_value += _assert( sym.cpu.ctx().get(ARM64::R2).as_uint() == 0xDEADBEEF, "2: ArchARM64: failed to disassemble store and load instructions."); + + // Set Register + sym.cpu.ctx().set(ARM64::R0, exprcst(64,0xBADC0FFEE0DDF00D)); + sym.cpu.ctx().set(ARM64::R1, exprcst(64,0x110000)); + + code = string("\x20\x00\x00\xf9", 4); // str x0 [x1] + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert((uint32_t)sym.mem->read(0x110000, 4).as_uint() == 0xe0ddf00d, "3: ArchARM64: failed to disassemble store and load instructions."); + ret_value += _assert((uint32_t)sym.mem->read(0x110004, 4).as_uint() == 0xbadc0ffe, "4: ArchARM64: failed to disassemble store and load instructions."); + + code = string("\x22\x00\x40\xf9", 4); // ldr x2 [x1] + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R2).as_uint() == 0xBADC0FFEE0DDF00D, "5: ArchARM64: failed to disassemble store and load instructions."); + + return ret_value; + } + + unsigned int logical_shift(MaatEngine& sym) + { + unsigned int ret_value = 0; + string code; + // Set Registers + sym.cpu.ctx().set(ARM64::LR, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R0, exprcst(64,0xDEAD)); + sym.cpu.ctx().set(ARM64::R1, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R2, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R3, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R4, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R5, exprcst(64,0)); + + code = string("\x01\xf4\x7e\xd3",4); // lsl x1, x0, #2 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + code = string("\x02\xfc\x42\xd3",4); // lsr x2, x0, #2 + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + + code = string("\x03\xfc\x42\xd3",4); // asr x3, x0, #2 + sym.mem->write_buffer(0x1008, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,3); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R1).as_uint() == 0x37ab4, "1: ArchARM64: failed to disassemble Logical Shifts instructions."); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R2).as_uint() == 0x37ab, "2: ArchARM64: failed to disassemble Logical Shifts instructions."); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R3).as_uint() == 0x37ab, "3: ArchARM64: failed to disassemble Logical Shifts instructions."); + + return ret_value; + } + + unsigned int disass_bitwise(MaatEngine& sym) + { + unsigned int ret_value = 0; + string code; + // Set Registers + sym.cpu.ctx().set(ARM64::R0, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R1, exprcst(64,0x6)); + sym.cpu.ctx().set(ARM64::R2, exprcst(64,0xf)); + sym.cpu.ctx().set(ARM64::R3, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R4, exprcst(64,0)); + sym.cpu.ctx().set(ARM64::R5, exprcst(64,0)); + + code = string("\x20\x00\x02\x8a",4); // and x0, x1, x2 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0x6, "1: ArchARM64: failed to disassemble bitwise operations"); + + code = string("\x20\x00\x02\xaa",4); // or x0, x1, x2 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0xf, "2: ArchARM64: failed to disassemble bitwise operations"); + + code = string("\x20\x00\x22\x8a",4); // bic x0, x1, x2 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0x0, "3: ArchARM64: failed to disassemble bitwise operations"); + + code = string("\x20\x00\x22\xaa",4); // orn x0, x1, x2 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0xfffffffffffffff6, "4: ArchARM64: failed to disassemble bitwise operations"); + + code = string("\x20\x00\x02\xca",4); // eor x0, x1, x2 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 9, "5: ArchARM64: failed to disassemble bitwise operations"); + + sym.cpu.ctx().set(ARM64::R2, exprcst(64,0x9)); + code = string("\x20\x00\x22\xca",4); // eon x0, x1, x2 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(ARM64::R0).as_uint() == 0xfffffffffffffff0, "6: ArchARM64: failed to disassemble bitwise operations"); + + return ret_value; + } + +}// Namespace ARM64 +}// Namespace Test +using namespace test::archARM64; + +void test_archARM64() { + unsigned int total = 0; + string green = "\033[1;32m"; + string def = "\033[0m"; + string bold = "\033[1m"; + + // Start testing + std::cout << bold << "[" << green << "+" + << def << bold << "]" << def << std::left << std::setw(34) + << " Testing Arch ARM64 support... " << std::flush; + + MaatEngine engine(Arch::Type::ARM64); + engine.mem->map(0x0, 0x11000); + engine.mem->map(0x110000, 0x130000); + + total += disass_addition(engine); + total += disass_subtraction(engine); + total += disass_zero(engine); + total += disass_branch(engine); + total += disass_store_load(engine); + total += logical_shift(engine); + total += disass_bitwise(engine); + + std::cout << "\t" << total << "/" << total << green << "\t\tOK" << def << std::endl; +} \ No newline at end of file