diff --git a/neon2rvv.h b/neon2rvv.h index 272f9fd8..8cd0f9ec 100644 --- a/neon2rvv.h +++ b/neon2rvv.h @@ -10587,13 +10587,25 @@ FORCE_INLINE uint64x2_t vmull_n_u32(uint32x2_t a, uint32_t b) { return __riscv_vlmul_trunc_v_u64m2_u64m1(__riscv_vwmulu_vx_u64m2(a, b, 4)); } -// FORCE_INLINE int32x4_t vmull_high_n_s16(int16x8_t a, int16_t b); +FORCE_INLINE int32x4_t vmull_high_n_s16(int16x8_t a, int16_t b) { + vint16m1_t a_high = __riscv_vslidedown_vx_i16m1(a, 4, 8); + return __riscv_vlmul_trunc_v_i32m2_i32m1(__riscv_vwmul_vx_i32m2(a_high, b, 4)); +} -// FORCE_INLINE int64x2_t vmull_high_n_s32(int32x4_t a, int32_t b); +FORCE_INLINE int64x2_t vmull_high_n_s32(int32x4_t a, int32_t b) { + vint32m1_t a_high = __riscv_vslidedown_vx_i32m1(a, 2, 4); + return __riscv_vlmul_trunc_v_i64m2_i64m1(__riscv_vwmul_vx_i64m2(a_high, b, 2)); +} -// FORCE_INLINE uint32x4_t vmull_high_n_u16(uint16x8_t a, uint16_t b); +FORCE_INLINE uint32x4_t vmull_high_n_u16(uint16x8_t a, uint16_t b) { + vuint16m1_t a_high = __riscv_vslidedown_vx_u16m1(a, 4, 8); + return __riscv_vlmul_trunc_v_u32m2_u32m1(__riscv_vwmulu_vx_u32m2(a_high, b, 4)); +} -// FORCE_INLINE uint64x2_t vmull_high_n_u32(uint32x4_t a, uint32_t b); +FORCE_INLINE uint64x2_t vmull_high_n_u32(uint32x4_t a, uint32_t b) { + vuint32m1_t a_high = __riscv_vslidedown_vx_u32m1(a, 2, 4); + return __riscv_vlmul_trunc_v_u64m2_u64m1(__riscv_vwmulu_vx_u64m2(a_high, b, 4)); +} FORCE_INLINE int32x4_t vqdmull_n_s16(int16x4_t a, int16_t b) { vint32m2_t ab_mul = __riscv_vwmul_vx_i32m2(a, b, 4); diff --git a/tests/impl.cpp b/tests/impl.cpp index e7390d8d..bad19c1b 100644 --- a/tests/impl.cpp +++ b/tests/impl.cpp @@ -33858,13 +33858,73 @@ result_t test_vmull_n_u32(const NEON2RVV_TEST_IMPL &impl, uint32_t iter) { #endif // ENABLE_TEST_ALL } -result_t test_vmull_high_n_s16(const NEON2RVV_TEST_IMPL &impl, uint32_t iter) { return TEST_UNIMPL; } +result_t test_vmull_high_n_s16(const NEON2RVV_TEST_IMPL &impl, uint32_t iter) { +#ifdef ENABLE_TEST_ALL + const int16_t *_a = (int16_t *)impl.test_cases_int_pointer1; + const int16_t *_b = (int16_t *)impl.test_cases_int_pointer2; + int32_t _c[4]; + for (int i = 0; i < 4; i++) { + _c[i] = (int32_t)_a[i + 4] * (int32_t)_b[0]; + } + + int16x8_t a = vld1q_s16(_a); + int32x4_t c = vmull_high_n_s16(a, _b[0]); + return validate_int32(c, _c[0], _c[1], _c[2], _c[3]); +#else + return TEST_UNIMPL; +#endif // ENABLE_TEST_ALL +} + +result_t test_vmull_high_n_s32(const NEON2RVV_TEST_IMPL &impl, uint32_t iter) { +#ifdef ENABLE_TEST_ALL + const int32_t *_a = (int32_t *)impl.test_cases_int_pointer1; + const int32_t *_b = (int32_t *)impl.test_cases_int_pointer2; + int64_t _c[2]; + for (int i = 0; i < 2; i++) { + _c[i] = (int64_t)_a[i + 2] * (int64_t)_b[0]; + } + + int32x4_t a = vld1q_s32(_a); + int64x2_t c = vmull_high_n_s32(a, _b[0]); + return validate_int64(c, _c[0], _c[1]); +#else + return TEST_UNIMPL; +#endif // ENABLE_TEST_ALL +} -result_t test_vmull_high_n_s32(const NEON2RVV_TEST_IMPL &impl, uint32_t iter) { return TEST_UNIMPL; } +result_t test_vmull_high_n_u16(const NEON2RVV_TEST_IMPL &impl, uint32_t iter) { +#ifdef ENABLE_TEST_ALL + const uint16_t *_a = (uint16_t *)impl.test_cases_int_pointer1; + const uint16_t *_b = (uint16_t *)impl.test_cases_int_pointer2; + uint32_t _c[4]; + for (int i = 0; i < 4; i++) { + _c[i] = (uint32_t)_a[i + 4] * (uint32_t)_b[0]; + } + + uint16x8_t a = vld1q_u16(_a); + uint32x4_t c = vmull_high_n_u16(a, _b[0]); + return validate_uint32(c, _c[0], _c[1], _c[2], _c[3]); +#else + return TEST_UNIMPL; +#endif // ENABLE_TEST_ALL +} -result_t test_vmull_high_n_u16(const NEON2RVV_TEST_IMPL &impl, uint32_t iter) { return TEST_UNIMPL; } +result_t test_vmull_high_n_u32(const NEON2RVV_TEST_IMPL &impl, uint32_t iter) { +#ifdef ENABLE_TEST_ALL + const uint32_t *_a = (uint32_t *)impl.test_cases_int_pointer1; + const uint32_t *_b = (uint32_t *)impl.test_cases_int_pointer2; + uint64_t _c[2]; + for (int i = 0; i < 2; i++) { + _c[i] = (uint64_t)_a[i + 2] * (uint64_t)_b[0]; + } -result_t test_vmull_high_n_u32(const NEON2RVV_TEST_IMPL &impl, uint32_t iter) { return TEST_UNIMPL; } + uint32x4_t a = vld1q_u32(_a); + uint64x2_t c = vmull_high_n_u32(a, _b[0]); + return validate_uint64(c, _c[0], _c[1]); +#else + return TEST_UNIMPL; +#endif // ENABLE_TEST_ALL +} result_t test_vqdmull_n_s16(const NEON2RVV_TEST_IMPL &impl, uint32_t iter) { #ifdef ENABLE_TEST_ALL diff --git a/tests/impl.h b/tests/impl.h index 071b28a7..4d0cad98 100644 --- a/tests/impl.h +++ b/tests/impl.h @@ -2191,10 +2191,10 @@ _(vmull_n_s32) \ _(vmull_n_u16) \ _(vmull_n_u32) \ - /*_(vmull_high_n_s16) */ \ - /*_(vmull_high_n_s32) */ \ - /*_(vmull_high_n_u16) */ \ - /*_(vmull_high_n_u32) */ \ + _(vmull_high_n_s16) \ + _(vmull_high_n_s32) \ + _(vmull_high_n_u16) \ + _(vmull_high_n_u32) \ _(vqdmull_n_s16) \ _(vqdmull_n_s32) \ /*_(vqdmull_high_n_s16) */ \