diff --git a/src/amalgam/gen/avxvnni.c b/src/amalgam/gen/avxvnni.c index e69de29bb2d..398cafb63fe 100644 --- a/src/amalgam/gen/avxvnni.c +++ b/src/amalgam/gen/avxvnni.c @@ -0,0 +1,431 @@ +// Copyright 2021 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include + +#include + +#include +#include +#include +#include +#include + + +void xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x8c8__avxvnni_prfm( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + float* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)], + const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS +{ + assert(mr != 0); + assert(mr <= 1); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + kc = round_up_po2(kc, 8 * sizeof(int8_t)); + const int8_t* a0 = a; + float* c0 = c; + + const __m256i vinput_zero_point0 = _mm256_set1_epi32((int) quantization_params[0].zero_point + 128); + const __m256 voutput_min = _mm256_set1_ps(params->avxvnni.min); + const __m256 voutput_max = _mm256_set1_ps(params->avxvnni.max); + const __m256i vsign_mask = _mm256_set1_epi8(params->avxvnni.sign_mask); // 0x80 + const __m256i vvalue_mask = _mm256_set1_epi8(params->avxvnni.mask); // 0xF0 + do { + const __m256i vksum0123456789ABCDEF = _mm256_load_si256(w); + __m256i vsum0x01234567 = _mm256_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point0); + __m256i vacc0x0123 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum0x01234567, 0)); + __m256i vacc0x4567 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum0x01234567, 1)); + __m256i vacc1x0x0123 = _mm256_setzero_si256(); + __m256i vacc1x0x4567 = _mm256_setzero_si256(); + w = (const int32_t*) w + 8; + + size_t k = kc; + while (k >= 16 * sizeof(int8_t)) { + const __m256i va0x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a0)), vsign_mask); + const __m256i va0x89ABCDEF = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a0 + 8)), vsign_mask); + a0 += 16; + + const __m256i vbb01234567x01234567 = _mm256_load_si256(w); + const __m256i vbb89ABCDEFx01234567 = _mm256_load_si256((const __m256i*) ((const int8_t*) w + 32)); + const __m256i vbs01234567x0123 = _mm256_slli_epi32(vbb01234567x01234567, 4); + const __m256i vbs89ABCDEFx0123 = _mm256_slli_epi32(vbb89ABCDEFx01234567, 4); + const __m256i vb01234567x4567 = _mm256_and_si256(vbb01234567x01234567, vvalue_mask); + const __m256i vb89ABCDEFx4567 = _mm256_and_si256(vbb89ABCDEFx01234567, vvalue_mask); + const __m256i vb01234567x0123 = _mm256_and_si256(vbs01234567x0123, vvalue_mask); + const __m256i vb89ABCDEFx0123 = _mm256_and_si256(vbs89ABCDEFx0123, vvalue_mask); + + vacc0x0123 = _mm256_dpbusd_avx_epi32(vacc0x0123, va0x01234567, vb01234567x0123); + vacc0x4567 = _mm256_dpbusd_avx_epi32(vacc0x4567, va0x01234567, vb89ABCDEFx0123); + xnn_prefetch_to_l1((const int8_t*) w + 960); + vacc1x0x0123 = _mm256_dpbusd_avx_epi32(vacc1x0x0123, va0x89ABCDEF, vb01234567x4567); + vacc1x0x4567 = _mm256_dpbusd_avx_epi32(vacc1x0x4567, va0x89ABCDEF, vb89ABCDEFx4567); + + w = (const int8_t*) w + 64; + k -= 16 * sizeof(int8_t); + } + + if (k != 0) { + const __m256i va0x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a0)), vsign_mask); + a0 += 8; + + const __m256i vbb01234567x01234567 = _mm256_load_si256(w); + const __m256i vbb89ABCDEFx01234567 = _mm256_load_si256((const __m256i*) ((const int8_t*) w + 32)); + const __m256i vbs01234567x0123 = _mm256_slli_epi32(vbb01234567x01234567, 4); + const __m256i vbs89ABCDEFx0123 = _mm256_slli_epi32(vbb89ABCDEFx01234567, 4); + const __m256i vb01234567x0123 = _mm256_and_si256(vbs01234567x0123, vvalue_mask); + const __m256i vb89ABCDEFx0123 = _mm256_and_si256(vbs89ABCDEFx0123, vvalue_mask); + + vacc0x0123 = _mm256_dpbusd_avx_epi32(vacc0x0123, va0x01234567, vb01234567x0123); + vacc0x4567 = _mm256_dpbusd_avx_epi32(vacc0x4567, va0x01234567, vb89ABCDEFx0123); + xnn_prefetch_to_l1((const int8_t*) w + 960); + + w = (const int8_t*) w + 64; + k -= 8 * sizeof(int8_t); + } + vacc0x0123 = _mm256_add_epi32(vacc0x0123, vacc1x0x0123); + vacc0x4567 = _mm256_add_epi32(vacc0x4567, vacc1x0x4567); + + // Add adjacent pairs + const __m256i vsum0x02134657 = _mm256_hadd_epi32(vacc0x0123, vacc0x4567); + __m256i vacc0x01234567 = _mm256_permute4x64_epi64(vsum0x02134657, _MM_SHUFFLE(3, 1, 2, 0)); + + vacc0x01234567 = _mm256_srai_epi32(vacc0x01234567, 4); + __m256 vout0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567); + + vout0x01234567 = _mm256_mul_ps(vout0x01234567, _mm256_set1_ps(quantization_params[0].inv_scale)); + + const __m256 vfilter_output_scale0123456789ABCDEF = _mm256_load_ps((const float*) w); + const __m256 vbias0123456789ABCDEF = _mm256_load_ps((const float*) w + 8); + w = (const float*) w + 16; + + vout0x01234567 = _mm256_fmadd_ps(vout0x01234567, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + + vout0x01234567 = _mm256_max_ps(vout0x01234567, voutput_min); + + vout0x01234567 = _mm256_min_ps(vout0x01234567, voutput_max); + + if(nc >= 8) { + _mm256_storeu_ps(c0, vout0x01234567); + a0 = (const int8_t*) ((uintptr_t) a0 - kc); + c0 = (float*) ((uintptr_t) c0 + cn_stride); + nc -= 8; + } else { + __m128 vout0x0123 = _mm256_castps256_ps128(vout0x01234567); + if (nc & 4) { + _mm_storeu_ps(c0, vout0x0123); + c0 += 4; + vout0x0123 = _mm256_extractf128_ps(vout0x01234567, 1); + } + if (nc & 2) { + _mm_storel_pi((__m64*) c0, vout0x0123); + c0 += 2; + vout0x0123 = _mm_movehl_ps(vout0x0123, vout0x0123); + } + if (nc & 1) { + _mm_store_ss(c0, vout0x0123); + } + nc = 0; + } + } while (nc != 0); +} + +void xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x8c8__avxvnni_prfm( + size_t mr, + size_t nc, + size_t kc, + const int8_t* restrict a, + size_t a_stride, + const void* restrict w, + float* restrict c, + size_t cm_stride, + size_t cn_stride, + const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)], + const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS +{ + assert(mr != 0); + assert(mr <= 5); + assert(nc != 0); + assert(kc != 0); + assert(kc % sizeof(int8_t) == 0); + assert(a != NULL); + assert(w != NULL); + assert(c != NULL); + + kc = round_up_po2(kc, 8 * sizeof(int8_t)); + const int8_t* a0 = a; + float* c0 = c; + const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); + float* c1 = (float*) ((uintptr_t) c0 + cm_stride); + if XNN_UNPREDICTABLE(mr < 2) { + a1 = a0; + c1 = c0; + } + const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); + float* c2 = (float*) ((uintptr_t) c1 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 2) { + a2 = a1; + c2 = c1; + } + const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); + float* c3 = (float*) ((uintptr_t) c2 + cm_stride); + if XNN_UNPREDICTABLE(mr < 4) { + a3 = a2; + c3 = c2; + } + const int8_t* a4 = (const int8_t*) ((uintptr_t) a3 + a_stride); + float* c4 = (float*) ((uintptr_t) c3 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 4) { + a4 = a3; + c4 = c3; + } + + const __m256i vinput_zero_point0 = _mm256_set1_epi32((int) quantization_params[0].zero_point + 128); + const __m256i vinput_zero_point1 = _mm256_set1_epi32((int) quantization_params[1].zero_point + 128); + const __m256i vinput_zero_point2 = _mm256_set1_epi32((int) quantization_params[2].zero_point + 128); + const __m256i vinput_zero_point3 = _mm256_set1_epi32((int) quantization_params[3].zero_point + 128); + const __m256i vinput_zero_point4 = _mm256_set1_epi32((int) quantization_params[4].zero_point + 128); + const __m256 voutput_min = _mm256_set1_ps(params->avxvnni.min); + const __m256 voutput_max = _mm256_set1_ps(params->avxvnni.max); + const __m256i vsign_mask = _mm256_set1_epi8(params->avxvnni.sign_mask); // 0x80 + const __m256i vvalue_mask = _mm256_set1_epi8(params->avxvnni.mask); // 0xF0 + do { + const __m256i vksum0123456789ABCDEF = _mm256_load_si256(w); + __m256i vsum0x01234567 = _mm256_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point0); + __m256i vacc0x0123 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum0x01234567, 0)); + __m256i vacc0x4567 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum0x01234567, 1)); + __m256i vsum1x01234567 = _mm256_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point1); + __m256i vacc1x0123 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum1x01234567, 0)); + __m256i vacc1x4567 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum1x01234567, 1)); + __m256i vsum2x01234567 = _mm256_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point2); + __m256i vacc2x0123 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum2x01234567, 0)); + __m256i vacc2x4567 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum2x01234567, 1)); + __m256i vsum3x01234567 = _mm256_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point3); + __m256i vacc3x0123 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum3x01234567, 0)); + __m256i vacc3x4567 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum3x01234567, 1)); + __m256i vsum4x01234567 = _mm256_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point4); + __m256i vacc4x0123 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum4x01234567, 0)); + __m256i vacc4x4567 = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(vsum4x01234567, 1)); + w = (const int32_t*) w + 8; + + size_t k = kc; + while (k >= 16 * sizeof(int8_t)) { + const __m256i va0x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a0)), vsign_mask); + const __m256i va0x89ABCDEF = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a0 + 8)), vsign_mask); + a0 += 16; + const __m256i va1x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a1)), vsign_mask); + const __m256i va1x89ABCDEF = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a1 + 8)), vsign_mask); + a1 += 16; + const __m256i va2x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a2)), vsign_mask); + const __m256i va2x89ABCDEF = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a2 + 8)), vsign_mask); + a2 += 16; + const __m256i va3x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a3)), vsign_mask); + const __m256i va3x89ABCDEF = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a3 + 8)), vsign_mask); + a3 += 16; + const __m256i va4x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a4)), vsign_mask); + const __m256i va4x89ABCDEF = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a4 + 8)), vsign_mask); + a4 += 16; + + const __m256i vbb01234567x01234567 = _mm256_load_si256(w); + const __m256i vbb89ABCDEFx01234567 = _mm256_load_si256((const __m256i*) ((const int8_t*) w + 32)); + const __m256i vbs01234567x0123 = _mm256_slli_epi32(vbb01234567x01234567, 4); + const __m256i vbs89ABCDEFx0123 = _mm256_slli_epi32(vbb89ABCDEFx01234567, 4); + const __m256i vb01234567x4567 = _mm256_and_si256(vbb01234567x01234567, vvalue_mask); + const __m256i vb89ABCDEFx4567 = _mm256_and_si256(vbb89ABCDEFx01234567, vvalue_mask); + const __m256i vb01234567x0123 = _mm256_and_si256(vbs01234567x0123, vvalue_mask); + const __m256i vb89ABCDEFx0123 = _mm256_and_si256(vbs89ABCDEFx0123, vvalue_mask); + + vacc0x0123 = _mm256_dpbusd_avx_epi32(vacc0x0123, va0x01234567, vb01234567x0123); + vacc0x4567 = _mm256_dpbusd_avx_epi32(vacc0x4567, va0x01234567, vb89ABCDEFx0123); + vacc1x0123 = _mm256_dpbusd_avx_epi32(vacc1x0123, va1x01234567, vb01234567x0123); + vacc1x4567 = _mm256_dpbusd_avx_epi32(vacc1x4567, va1x01234567, vb89ABCDEFx0123); + vacc2x0123 = _mm256_dpbusd_avx_epi32(vacc2x0123, va2x01234567, vb01234567x0123); + vacc2x4567 = _mm256_dpbusd_avx_epi32(vacc2x4567, va2x01234567, vb89ABCDEFx0123); + vacc3x0123 = _mm256_dpbusd_avx_epi32(vacc3x0123, va3x01234567, vb01234567x0123); + vacc3x4567 = _mm256_dpbusd_avx_epi32(vacc3x4567, va3x01234567, vb89ABCDEFx0123); + vacc4x0123 = _mm256_dpbusd_avx_epi32(vacc4x0123, va4x01234567, vb01234567x0123); + vacc4x4567 = _mm256_dpbusd_avx_epi32(vacc4x4567, va4x01234567, vb89ABCDEFx0123); + xnn_prefetch_to_l1((const int8_t*) w + 960); + vacc0x0123 = _mm256_dpbusd_avx_epi32(vacc0x0123, va0x89ABCDEF, vb01234567x4567); + vacc0x4567 = _mm256_dpbusd_avx_epi32(vacc0x4567, va0x89ABCDEF, vb89ABCDEFx4567); + vacc1x0123 = _mm256_dpbusd_avx_epi32(vacc1x0123, va1x89ABCDEF, vb01234567x4567); + vacc1x4567 = _mm256_dpbusd_avx_epi32(vacc1x4567, va1x89ABCDEF, vb89ABCDEFx4567); + vacc2x0123 = _mm256_dpbusd_avx_epi32(vacc2x0123, va2x89ABCDEF, vb01234567x4567); + vacc2x4567 = _mm256_dpbusd_avx_epi32(vacc2x4567, va2x89ABCDEF, vb89ABCDEFx4567); + vacc3x0123 = _mm256_dpbusd_avx_epi32(vacc3x0123, va3x89ABCDEF, vb01234567x4567); + vacc3x4567 = _mm256_dpbusd_avx_epi32(vacc3x4567, va3x89ABCDEF, vb89ABCDEFx4567); + vacc4x0123 = _mm256_dpbusd_avx_epi32(vacc4x0123, va4x89ABCDEF, vb01234567x4567); + vacc4x4567 = _mm256_dpbusd_avx_epi32(vacc4x4567, va4x89ABCDEF, vb89ABCDEFx4567); + + w = (const int8_t*) w + 64; + k -= 16 * sizeof(int8_t); + } + + if (k != 0) { + const __m256i va0x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a0)), vsign_mask); + a0 += 8; + const __m256i va1x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a1)), vsign_mask); + a1 += 8; + const __m256i va2x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a2)), vsign_mask); + a2 += 8; + const __m256i va3x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a3)), vsign_mask); + a3 += 8; + const __m256i va4x01234567 = _mm256_xor_si256(_mm256_set1_epi64x((int64_t) unaligned_load_u64(a4)), vsign_mask); + a4 += 8; + + const __m256i vbb01234567x01234567 = _mm256_load_si256(w); + const __m256i vbb89ABCDEFx01234567 = _mm256_load_si256((const __m256i*) ((const int8_t*) w + 32)); + const __m256i vbs01234567x0123 = _mm256_slli_epi32(vbb01234567x01234567, 4); + const __m256i vbs89ABCDEFx0123 = _mm256_slli_epi32(vbb89ABCDEFx01234567, 4); + const __m256i vb01234567x0123 = _mm256_and_si256(vbs01234567x0123, vvalue_mask); + const __m256i vb89ABCDEFx0123 = _mm256_and_si256(vbs89ABCDEFx0123, vvalue_mask); + + vacc0x0123 = _mm256_dpbusd_avx_epi32(vacc0x0123, va0x01234567, vb01234567x0123); + vacc0x4567 = _mm256_dpbusd_avx_epi32(vacc0x4567, va0x01234567, vb89ABCDEFx0123); + vacc1x0123 = _mm256_dpbusd_avx_epi32(vacc1x0123, va1x01234567, vb01234567x0123); + vacc1x4567 = _mm256_dpbusd_avx_epi32(vacc1x4567, va1x01234567, vb89ABCDEFx0123); + vacc2x0123 = _mm256_dpbusd_avx_epi32(vacc2x0123, va2x01234567, vb01234567x0123); + vacc2x4567 = _mm256_dpbusd_avx_epi32(vacc2x4567, va2x01234567, vb89ABCDEFx0123); + vacc3x0123 = _mm256_dpbusd_avx_epi32(vacc3x0123, va3x01234567, vb01234567x0123); + vacc3x4567 = _mm256_dpbusd_avx_epi32(vacc3x4567, va3x01234567, vb89ABCDEFx0123); + vacc4x0123 = _mm256_dpbusd_avx_epi32(vacc4x0123, va4x01234567, vb01234567x0123); + vacc4x4567 = _mm256_dpbusd_avx_epi32(vacc4x4567, va4x01234567, vb89ABCDEFx0123); + xnn_prefetch_to_l1((const int8_t*) w + 960); + + w = (const int8_t*) w + 64; + k -= 8 * sizeof(int8_t); + } + + // Add adjacent pairs + const __m256i vsum0x02134657 = _mm256_hadd_epi32(vacc0x0123, vacc0x4567); + __m256i vacc0x01234567 = _mm256_permute4x64_epi64(vsum0x02134657, _MM_SHUFFLE(3, 1, 2, 0)); + const __m256i vsum1x02134657 = _mm256_hadd_epi32(vacc1x0123, vacc1x4567); + __m256i vacc1x01234567 = _mm256_permute4x64_epi64(vsum1x02134657, _MM_SHUFFLE(3, 1, 2, 0)); + const __m256i vsum2x02134657 = _mm256_hadd_epi32(vacc2x0123, vacc2x4567); + __m256i vacc2x01234567 = _mm256_permute4x64_epi64(vsum2x02134657, _MM_SHUFFLE(3, 1, 2, 0)); + const __m256i vsum3x02134657 = _mm256_hadd_epi32(vacc3x0123, vacc3x4567); + __m256i vacc3x01234567 = _mm256_permute4x64_epi64(vsum3x02134657, _MM_SHUFFLE(3, 1, 2, 0)); + const __m256i vsum4x02134657 = _mm256_hadd_epi32(vacc4x0123, vacc4x4567); + __m256i vacc4x01234567 = _mm256_permute4x64_epi64(vsum4x02134657, _MM_SHUFFLE(3, 1, 2, 0)); + + vacc0x01234567 = _mm256_srai_epi32(vacc0x01234567, 4); + vacc1x01234567 = _mm256_srai_epi32(vacc1x01234567, 4); + vacc2x01234567 = _mm256_srai_epi32(vacc2x01234567, 4); + vacc3x01234567 = _mm256_srai_epi32(vacc3x01234567, 4); + vacc4x01234567 = _mm256_srai_epi32(vacc4x01234567, 4); + __m256 vout0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567); + __m256 vout1x01234567 = _mm256_cvtepi32_ps(vacc1x01234567); + __m256 vout2x01234567 = _mm256_cvtepi32_ps(vacc2x01234567); + __m256 vout3x01234567 = _mm256_cvtepi32_ps(vacc3x01234567); + __m256 vout4x01234567 = _mm256_cvtepi32_ps(vacc4x01234567); + + vout0x01234567 = _mm256_mul_ps(vout0x01234567, _mm256_set1_ps(quantization_params[0].inv_scale)); + vout1x01234567 = _mm256_mul_ps(vout1x01234567, _mm256_set1_ps(quantization_params[1].inv_scale)); + vout2x01234567 = _mm256_mul_ps(vout2x01234567, _mm256_set1_ps(quantization_params[2].inv_scale)); + vout3x01234567 = _mm256_mul_ps(vout3x01234567, _mm256_set1_ps(quantization_params[3].inv_scale)); + vout4x01234567 = _mm256_mul_ps(vout4x01234567, _mm256_set1_ps(quantization_params[4].inv_scale)); + + const __m256 vfilter_output_scale0123456789ABCDEF = _mm256_load_ps((const float*) w); + const __m256 vbias0123456789ABCDEF = _mm256_load_ps((const float*) w + 8); + w = (const float*) w + 16; + + vout0x01234567 = _mm256_fmadd_ps(vout0x01234567, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vout1x01234567 = _mm256_fmadd_ps(vout1x01234567, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vout2x01234567 = _mm256_fmadd_ps(vout2x01234567, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vout3x01234567 = _mm256_fmadd_ps(vout3x01234567, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vout4x01234567 = _mm256_fmadd_ps(vout4x01234567, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + + vout0x01234567 = _mm256_max_ps(vout0x01234567, voutput_min); + vout1x01234567 = _mm256_max_ps(vout1x01234567, voutput_min); + vout2x01234567 = _mm256_max_ps(vout2x01234567, voutput_min); + vout3x01234567 = _mm256_max_ps(vout3x01234567, voutput_min); + vout4x01234567 = _mm256_max_ps(vout4x01234567, voutput_min); + + vout0x01234567 = _mm256_min_ps(vout0x01234567, voutput_max); + vout1x01234567 = _mm256_min_ps(vout1x01234567, voutput_max); + vout2x01234567 = _mm256_min_ps(vout2x01234567, voutput_max); + vout3x01234567 = _mm256_min_ps(vout3x01234567, voutput_max); + vout4x01234567 = _mm256_min_ps(vout4x01234567, voutput_max); + + if(nc >= 8) { + _mm256_storeu_ps(c0, vout0x01234567); + a0 = (const int8_t*) ((uintptr_t) a0 - kc); + c0 = (float*) ((uintptr_t) c0 + cn_stride); + _mm256_storeu_ps(c1, vout1x01234567); + a1 = (const int8_t*) ((uintptr_t) a1 - kc); + c1 = (float*) ((uintptr_t) c1 + cn_stride); + _mm256_storeu_ps(c2, vout2x01234567); + a2 = (const int8_t*) ((uintptr_t) a2 - kc); + c2 = (float*) ((uintptr_t) c2 + cn_stride); + _mm256_storeu_ps(c3, vout3x01234567); + a3 = (const int8_t*) ((uintptr_t) a3 - kc); + c3 = (float*) ((uintptr_t) c3 + cn_stride); + _mm256_storeu_ps(c4, vout4x01234567); + a4 = (const int8_t*) ((uintptr_t) a4 - kc); + c4 = (float*) ((uintptr_t) c4 + cn_stride); + nc -= 8; + } else { + __m128 vout0x0123 = _mm256_castps256_ps128(vout0x01234567); + __m128 vout1x0123 = _mm256_castps256_ps128(vout1x01234567); + __m128 vout2x0123 = _mm256_castps256_ps128(vout2x01234567); + __m128 vout3x0123 = _mm256_castps256_ps128(vout3x01234567); + __m128 vout4x0123 = _mm256_castps256_ps128(vout4x01234567); + if (nc & 4) { + _mm_storeu_ps(c0, vout0x0123); + c0 += 4; + _mm_storeu_ps(c1, vout1x0123); + c1 += 4; + _mm_storeu_ps(c2, vout2x0123); + c2 += 4; + _mm_storeu_ps(c3, vout3x0123); + c3 += 4; + _mm_storeu_ps(c4, vout4x0123); + c4 += 4; + vout0x0123 = _mm256_extractf128_ps(vout0x01234567, 1); + vout1x0123 = _mm256_extractf128_ps(vout1x01234567, 1); + vout2x0123 = _mm256_extractf128_ps(vout2x01234567, 1); + vout3x0123 = _mm256_extractf128_ps(vout3x01234567, 1); + vout4x0123 = _mm256_extractf128_ps(vout4x01234567, 1); + } + if (nc & 2) { + _mm_storel_pi((__m64*) c0, vout0x0123); + c0 += 2; + _mm_storel_pi((__m64*) c1, vout1x0123); + c1 += 2; + _mm_storel_pi((__m64*) c2, vout2x0123); + c2 += 2; + _mm_storel_pi((__m64*) c3, vout3x0123); + c3 += 2; + _mm_storel_pi((__m64*) c4, vout4x0123); + c4 += 2; + vout0x0123 = _mm_movehl_ps(vout0x0123, vout0x0123); + vout1x0123 = _mm_movehl_ps(vout1x0123, vout1x0123); + vout2x0123 = _mm_movehl_ps(vout2x0123, vout2x0123); + vout3x0123 = _mm_movehl_ps(vout3x0123, vout3x0123); + vout4x0123 = _mm_movehl_ps(vout4x0123, vout4x0123); + } + if (nc & 1) { + _mm_store_ss(c0, vout0x0123); + _mm_store_ss(c1, vout1x0123); + _mm_store_ss(c2, vout2x0123); + _mm_store_ss(c3, vout3x0123); + _mm_store_ss(c4, vout4x0123); + } + nc = 0; + } + } while (nc != 0); +} diff --git a/src/configs/gemm-config.c b/src/configs/gemm-config.c index d4cebaef075..06c5bad9fd8 100644 --- a/src/configs/gemm-config.c +++ b/src/configs/gemm-config.c @@ -1750,6 +1750,16 @@ static void init_qd8_f32_qc4w_gemm_config(void) { qd8_f32_qc4w_gemm_config.nr = 16; qd8_f32_qc4w_gemm_config.log2_kr = 3; qd8_f32_qc4w_gemm_config.planes = 2; + } else if (hardware_config->use_x86_avxvnni) { + qd8_f32_qc4w_gemm_config.minmax.dqgemm[XNN_MR_TO_INDEX(1)] = xnn_init_hmp_dqgemm_ukernel((xnn_dqgemm_ukernel_fn) xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x8c8__avxvnni_prfm); + qd8_f32_qc4w_gemm_config.minmax.dqgemm[XNN_MR_TO_INDEX(5)] = xnn_init_hmp_dqgemm_ukernel((xnn_dqgemm_ukernel_fn) xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x8c8__avxvnni_prfm); + qd8_f32_qc4w_gemm_config.init.f32_qc4w = xnn_init_f32_qc4w_minmax_avxvnni_params; + qd8_f32_qc4w_gemm_config.pack_gemm_gio = (xnn_packw_gemm_gio_ukernel_fn) xnn_pack_qs8_qc4w_gemm_gio_w; + qd8_f32_qc4w_gemm_config.pack_gemm_goi = (xnn_packw_gemm_goi_ukernel_fn) xnn_pack_qs8_qc4w_gemm_goi_w; + qd8_f32_qc4w_gemm_config.mr = 5; + qd8_f32_qc4w_gemm_config.nr = 8; + qd8_f32_qc4w_gemm_config.log2_kr = 3; + qd8_f32_qc4w_gemm_config.planes = 2; } else if (hardware_config->use_x86_xop) { // XOP should be checked before AVX2: AMD Excavator supports both, but performs better with XOP microkernels qd8_f32_qc4w_gemm_config.minmax.dqgemm[XNN_MR_TO_INDEX(1)] = xnn_init_hmp_dqgemm_ukernel((xnn_dqgemm_ukernel_fn) xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x4c8__xop_ld128);