diff --git a/include/bmat8_impl.hpp b/include/bmat8_impl.hpp index 913e94f2..3b2b0da0 100644 --- a/include/bmat8_impl.hpp +++ b/include/bmat8_impl.hpp @@ -253,7 +253,7 @@ constexpr std::array masks {{ static const epu8 shiftres {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80}; inline void update_bitset(epu8 block, epu8 &set0, epu8 &set1) { - static const epu8 bound08 = simde_mm_slli_epi32(epu8id, 3); // shift for *8 + static const epu8 bound08 = simde_mm_slli_epi32(static_cast(epu8id), 3); // shift for *8 static const epu8 bound18 = bound08 + Epu8(0x80); for (size_t slice8 = 0; slice8 < 16; slice8++) { epu8 bm5 = Epu8(0xf8) & block; /* 11111000 */ diff --git a/include/perm16_impl.hpp b/include/perm16_impl.hpp index 0408fc4d..b2d74a45 100644 --- a/include/perm16_impl.hpp +++ b/include/perm16_impl.hpp @@ -129,7 +129,8 @@ inline Transf16::Transf16(uint64_t compressed) { } inline Transf16::operator uint64_t() const { - epu8 res = static_cast(simde_mm_slli_epi32(v, 4)); + epu8 res = + static_cast(simde_mm_slli_epi32(static_cast(v), 4)); res = HPCombi::permuted(res, hilo_exchng) + v; return simde_mm_extract_epi64(res, 0); } @@ -218,7 +219,9 @@ inline Perm16 Perm16::inverse_sort() const { // G++-7 compile this shift by 3 additions. // epu8 res = (v << 4) + one().v; // I call directly the shift intrinsic - epu8 res = static_cast(simde_mm_slli_epi32(v, 4)) + one().v; + epu8 res = static_cast( + simde_mm_slli_epi32(static_cast(v), 4)) + + one().v; res = sorted(res) & Epu8(0x0F); return res; }