diff --git a/lib/mmseqs/CMakeLists.txt b/lib/mmseqs/CMakeLists.txt index 66bffa6..765e463 100644 --- a/lib/mmseqs/CMakeLists.txt +++ b/lib/mmseqs/CMakeLists.txt @@ -16,6 +16,7 @@ set(HAVE_POWER9 0 CACHE BOOL "Have POWER9 CPU") set(HAVE_POWER8 0 CACHE BOOL "Have POWER8 CPU") set(HAVE_ARM8 0 CACHE BOOL "Have ARMv8 CPU") set(NATIVE_ARCH 1 CACHE BOOL "Assume native architecture for SIMD. Use one of the HAVE_* options or set CMAKE_CXX_FLAGS to the appropriate flags if you disable this.") +set(USE_SYSTEM_ZSTD 0 CACHE BOOL "Use zstd provided by system instead of bundled version") if (HAVE_SANITIZER) include(FindUBSan) @@ -81,6 +82,8 @@ if (NATIVE_ARCH AND (MMSEQS_ARCH STREQUAL "")) set(X64 1) elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "x86|X86") set(X86 1) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^sparc") + set(SPARC 1) else () message(WARNING "CPU without native SIMD instructions. Performance will be bad.") endif () @@ -151,7 +154,7 @@ if ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSI set(DISABLE_IPS4O 1) endif () -if (PPC64) +if (PPC64 OR SPARC) # FIXME: investigate why on ppc the regression seems to fail randomly set(DISABLE_IPS4O 1) endif () @@ -165,23 +168,31 @@ if (CMAKE_COMPILER_IS_CLANG AND (NOT EMSCRIPTEN)) set(MMSEQS_CXX_FLAGS "${MMSEQS_CXX_FLAGS} -stdlib=libc++") endif () - -# zstd -# We use ZSTD_findDecompressedSize which is only available with ZSTD_STATIC_LINKING_ONLY -# Thus we cannot use a system provided libzstd -set(ZSTD_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/lib/zstd") -set(CMAKE_INSTALL_LIBDIR bin) -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/lib/zstd/build/cmake/CMakeModules") -option(ZSTD_LEGACY_SUPPORT "LEGACY SUPPORT" OFF) -option(ZSTD_BUILD_STATIC "BUILD STATIC LIBRARIES" ON) -option(ZSTD_BUILD_SHARED "BUILD SHARED LIBRARIES" OFF) -option(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" OFF) -option(ZSTD_BUILD_PROGRAMS "BUILD PROGRAMS" OFF) -option(ZSTD_BUILD_CONTRIB "BUILD CONTRIB" OFF) -option(ZSTD_BUILD_TESTS "BUILD TESTS" OFF) -include_directories(lib/zstd/lib) -add_subdirectory(lib/zstd/build/cmake/lib EXCLUDE_FROM_ALL) -set_target_properties(libzstd_static PROPERTIES COMPILE_FLAGS "${MMSEQS_C_FLAGS}" LINK_FLAGS "${MMSEQS_C_FLAGS}") +if (USE_SYSTEM_ZSTD) + include(FindPackageHandleStandardArgs) + find_path(ZSTD_INCLUDE_DIRS NAMES zstd.h REQUIRED) + # We use ZSTD_findDecompressedSize which is only available with ZSTD_STATIC_LINKING_ONLY + find_library(ZSTD_LIBRARIES NAMES libzstd.a libzstd_static REQUIRED) + find_package_handle_standard_args(ZSTD DEFAULT_MSG ZSTD_LIBRARIES ZSTD_INCLUDE_DIRS) + mark_as_advanced(ZSTD_LIBRARIES ZSTD_INCLUDE_DIRS) + include_directories(${ZSTD_INCLUDE_DIRS}) +else () + # We use ZSTD_findDecompressedSize which is only available with ZSTD_STATIC_LINKING_ONLY + set(ZSTD_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/lib/zstd") + set(CMAKE_INSTALL_LIBDIR bin) + set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/lib/zstd/build/cmake/CMakeModules") + option(ZSTD_LEGACY_SUPPORT "LEGACY SUPPORT" OFF) + option(ZSTD_BUILD_STATIC "BUILD STATIC LIBRARIES" ON) + option(ZSTD_BUILD_SHARED "BUILD SHARED LIBRARIES" OFF) + option(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" OFF) + option(ZSTD_BUILD_PROGRAMS "BUILD PROGRAMS" OFF) + option(ZSTD_BUILD_CONTRIB "BUILD CONTRIB" OFF) + option(ZSTD_BUILD_TESTS "BUILD TESTS" OFF) + include_directories(lib/zstd/lib) + add_subdirectory(lib/zstd/build/cmake/lib EXCLUDE_FROM_ALL) + set_target_properties(libzstd_static PROPERTIES COMPILE_FLAGS "${MMSEQS_C_FLAGS}" LINK_FLAGS "${MMSEQS_C_FLAGS}") + set(ZSTD_LIBRARIES libzstd_static) +endif() # tinyexpr include_directories(lib/tinyexpr) diff --git a/lib/mmseqs/LICENCE.md b/lib/mmseqs/LICENSE.md similarity index 100% rename from lib/mmseqs/LICENCE.md rename to lib/mmseqs/LICENSE.md diff --git a/lib/mmseqs/README.md b/lib/mmseqs/README.md index 9ecbff8..7f5bfa0 100644 --- a/lib/mmseqs/README.md +++ b/lib/mmseqs/README.md @@ -9,6 +9,8 @@ MMseqs2 (Many-against-Many sequence searching) is a software suite to search and [Mirdita M, Steinegger M and Soeding J. MMseqs2 desktop and local web server app for fast, interactive sequence searches. Bioinformatics, doi: 10.1093/bioinformatics/bty1057 (2019)](https://academic.oup.com/bioinformatics/article/35/16/2856/5280135). +[Mirdita M, Steinegger M, Breitwieser F, Soding J, Levy Karin E: Fast and sensitive taxonomic assignment to metagenomic contigs. bioRxiv, doi: 10.1101/2020.11.27.401018 (2020)](https://www.biorxiv.org/content/10.1101/2020.11.27.401018v1). + [![BioConda Install](https://img.shields.io/conda/dn/bioconda/mmseqs2.svg?style=flag&label=BioConda%20install)](https://anaconda.org/bioconda/mmseqs2) [![Github All Releases](https://img.shields.io/github/downloads/soedinglab/mmseqs2/total.svg)](https://github.com/soedinglab/mmseqs2/releases/latest) [![Biocontainer Pulls](https://img.shields.io/endpoint?url=https%3A%2F%2Fmmseqs.com%2Fbiocontainer.php%3Fcontainer%3Dmmseqs2)](https://biocontainers.pro/#/tools/mmseqs2) diff --git a/lib/mmseqs/azure-pipelines.yml b/lib/mmseqs/azure-pipelines.yml index c3d65d3..3c36e70 100644 --- a/lib/mmseqs/azure-pipelines.yml +++ b/lib/mmseqs/azure-pipelines.yml @@ -7,19 +7,29 @@ variables: regression: 1 jobs: - - job: build_ubuntu_1804_userguide - displayName: Ubuntu 1804 Userguide + - job: build_ubuntu_2004_userguide + displayName: Ubuntu 2004 Userguide pool: - vmImage: 'Ubuntu-18.04' + vmImage: 'Ubuntu-20.04' steps: - checkout: "none" + - task: Cache@2 + inputs: + key: '"tectonic" | "$(Agent.OS)"' + restoreKeys: | + "tectonic" | "$(Agent.OS)" + "tectonic" + path: $(Pipeline.Workspace)/tectonic-cache/ + displayName: Cache Tectonic - script: | - sudo apt-get update - sudo apt-get -y install pandoc texlive-latex-recommended texlive-fonts-extra + wget -qO- https://github.com/tectonic-typesetting/tectonic/releases/download/tectonic%400.4.1/tectonic-0.4.1-x86_64-unknown-linux-gnu.tar.gz | tar xzvf - tectonic + wget -qO- https://github.com/jgm/pandoc/releases/download/2.11.3.2/pandoc-2.11.3.2-linux-amd64.tar.gz | tar --strip-components=2 -xzvf - pandoc-2.11.3.2/bin/pandoc + sudo mv -f pandoc tectonic /usr/local/bin displayName: Install Dependencies - script: | cd ${SYSTEM_DEFAULTWORKINGDIRECTORY} git clone https://github.com/soedinglab/MMseqs2.wiki.git . + export XDG_CACHE_HOME=${PIPELINE_WORKSPACE}/tectonic-cache/ .pandoc/make-pdf.sh displayName: Build Userguide - task: PublishPipelineArtifact@0 @@ -242,7 +252,7 @@ jobs: pool: vmImage: 'Ubuntu-18.04' dependsOn: - - build_ubuntu_1804_userguide + - build_ubuntu_2004_userguide - build_macos_1015 - build_ubuntu_1804 - build_ubuntu_cross_2004 @@ -251,7 +261,7 @@ jobs: - script: | cd "${BUILD_SOURCESDIRECTORY}" mkdir mmseqs - cp -f README.md LICENCE.md mmseqs + cp -f README.md LICENSE.md mmseqs cp -r examples mmseqs mkdir mmseqs/matrices cp -f data/*.out mmseqs/matrices diff --git a/lib/mmseqs/data/workflow/cascaded_clustering.sh b/lib/mmseqs/data/workflow/cascaded_clustering.sh index 46fd535..78ad9d9 100755 --- a/lib/mmseqs/data/workflow/cascaded_clustering.sh +++ b/lib/mmseqs/data/workflow/cascaded_clustering.sh @@ -117,134 +117,148 @@ if [ -n "$REASSIGN" ]; then "$MMSEQS" subtractdbs "${TMP_PATH}/clu" "${TMP_PATH}/aln" "${TMP_PATH}/clu_not_accepted" --e-profile 100000000 -e 100000000 ${THREADSANDCOMPRESS} \ || fail "subtractdbs1 reassign died" fi - # create file of cluster that do align based on given criteria - if notExists "${TMP_PATH}/clu_accepted.dbtype"; then - # shellcheck disable=SC2086 - "$MMSEQS" subtractdbs "${TMP_PATH}/clu" "${TMP_PATH}/clu_not_accepted" "${TMP_PATH}/clu_accepted" --e-profile 100000000 -e 100000000 ${THREADSANDCOMPRESS} \ - || fail "subtractdbs2 reassign died" - fi if notExists "${TMP_PATH}/clu_not_accepted_swap.dbtype"; then # shellcheck disable=SC2086 "$MMSEQS" swapdb "${TMP_PATH}/clu_not_accepted" "${TMP_PATH}/clu_not_accepted_swap" ${THREADSANDCOMPRESS} \ || fail "swapdb1 reassign died" fi - # create sequences database that were wrong assigned - if notExists "${TMP_PATH}/seq_wrong_assigned.dbtype"; then + # short circuit if nothing can be reassigned + if [ ! -s "${TMP_PATH}/clu_not_accepted_swap.index" ]; then # shellcheck disable=SC2086 - "$MMSEQS" createsubdb "${TMP_PATH}/clu_not_accepted_swap" "$SOURCE" "${TMP_PATH}/seq_wrong_assigned" ${VERBOSITY} \ - || fail "createsubdb1 reassign died" - fi - # build seed sequences - if notExists "${TMP_PATH}/seq_seeds.dbtype"; then - # shellcheck disable=SC2086 - "$MMSEQS" createsubdb "${TMP_PATH}/clu" "$SOURCE" "${TMP_PATH}/seq_seeds" ${VERBOSITY} \ - || fail "createsubdb2 reassign died" - fi - PARAM=PREFILTER${STEP}_PAR - eval PREFILTER_PAR="\$$PARAM" - # try to find best matching centroid sequences for prev. wrong assigned sequences - if notExists "${TMP_PATH}/seq_wrong_assigned_pref.dbtype"; then - if notExists "${TMP_PATH}/seq_seeds.merged.dbtype"; then - # combine seq dbs - MAXOFFSET=$(awk '($2+$3) > max{max=$2+$3}END{print max}' "${TMP_PATH}/seq_seeds.index") - awk -v OFFSET="${MAXOFFSET}" 'FNR==NR{print $0; next}{print $1"\t"$2+OFFSET"\t"$3}' "${TMP_PATH}/seq_seeds.index" \ - "${TMP_PATH}/seq_wrong_assigned.index" > "${TMP_PATH}/seq_seeds.merged.index" - ln -s "$(abspath "${TMP_PATH}/seq_seeds")" "${TMP_PATH}/seq_seeds.merged.0" - ln -s "$(abspath "${TMP_PATH}/seq_wrong_assigned")" "${TMP_PATH}/seq_seeds.merged.1" - cp "${TMP_PATH}/seq_seeds.dbtype" "${TMP_PATH}/seq_seeds.merged.dbtype" + "$MMSEQS" mvdb "${TMP_PATH}/clu" "$2" ${VERBOSITY} + if [ -n "$REMOVE_TMP" ]; then + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/clu_not_accepted_swap" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/clu_not_accepted" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/aln" ${VERBOSITY} + fi + else + # create file of cluster that do align based on given criteria + if notExists "${TMP_PATH}/clu_accepted.dbtype"; then + # shellcheck disable=SC2086 + "$MMSEQS" subtractdbs "${TMP_PATH}/clu" "${TMP_PATH}/clu_not_accepted" "${TMP_PATH}/clu_accepted" --e-profile 100000000 -e 100000000 ${THREADSANDCOMPRESS} \ + || fail "subtractdbs2 reassign died" + fi + # create sequences database that were wrong assigned + if notExists "${TMP_PATH}/seq_wrong_assigned.dbtype"; then + # shellcheck disable=SC2086 + "$MMSEQS" createsubdb "${TMP_PATH}/clu_not_accepted_swap" "$SOURCE" "${TMP_PATH}/seq_wrong_assigned" ${VERBOSITY} \ + || fail "createsubdb1 reassign died" + fi + # build seed sequences + if notExists "${TMP_PATH}/seq_seeds.dbtype"; then + # shellcheck disable=SC2086 + "$MMSEQS" createsubdb "${TMP_PATH}/clu" "$SOURCE" "${TMP_PATH}/seq_seeds" ${VERBOSITY} \ + || fail "createsubdb2 reassign died" + fi + PARAM=PREFILTER${STEP}_PAR + eval PREFILTER_PAR="\$$PARAM" + # try to find best matching centroid sequences for prev. wrong assigned sequences + if notExists "${TMP_PATH}/seq_wrong_assigned_pref.dbtype"; then + if notExists "${TMP_PATH}/seq_seeds.merged.dbtype"; then + # combine seq dbs + MAXOFFSET=$(awk '($2+$3) > max{max=$2+$3}END{print max}' "${TMP_PATH}/seq_seeds.index") + awk -v OFFSET="${MAXOFFSET}" 'FNR==NR{print $0; next}{print $1"\t"$2+OFFSET"\t"$3}' "${TMP_PATH}/seq_seeds.index" \ + "${TMP_PATH}/seq_wrong_assigned.index" > "${TMP_PATH}/seq_seeds.merged.index" + ln -s "$(abspath "${TMP_PATH}/seq_seeds")" "${TMP_PATH}/seq_seeds.merged.0" + ln -s "$(abspath "${TMP_PATH}/seq_wrong_assigned")" "${TMP_PATH}/seq_seeds.merged.1" + cp "${TMP_PATH}/seq_seeds.dbtype" "${TMP_PATH}/seq_seeds.merged.dbtype" + fi + # shellcheck disable=SC2086 + $RUNNER "$MMSEQS" prefilter "${TMP_PATH}/seq_wrong_assigned" "${TMP_PATH}/seq_seeds.merged" "${TMP_PATH}/seq_wrong_assigned_pref" ${PREFILTER_REASSIGN_PAR} \ + || fail "Prefilter reassign died" + fi + if notExists "${TMP_PATH}/seq_wrong_assigned_pref_swaped.dbtype"; then + # shellcheck disable=SC2086 + "$MMSEQS" swapdb "${TMP_PATH}/seq_wrong_assigned_pref" "${TMP_PATH}/seq_wrong_assigned_pref_swaped" ${THREADSANDCOMPRESS} \ + || fail "swapdb2 reassign died" + fi + if notExists "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln.dbtype"; then + # shellcheck disable=SC2086 + $RUNNER "$MMSEQS" "${ALIGN_MODULE}" "${TMP_PATH}/seq_seeds.merged" "${TMP_PATH}/seq_wrong_assigned" \ + "${TMP_PATH}/seq_wrong_assigned_pref_swaped" "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln" ${ALIGNMENT_REASSIGN_PAR} \ + || fail "align2 reassign died" fi - # shellcheck disable=SC2086 - $RUNNER "$MMSEQS" prefilter "${TMP_PATH}/seq_wrong_assigned" "${TMP_PATH}/seq_seeds.merged" "${TMP_PATH}/seq_wrong_assigned_pref" ${PREFILTER_REASSIGN_PAR} \ - || fail "Prefilter reassign died" - fi - if notExists "${TMP_PATH}/seq_wrong_assigned_pref_swaped.dbtype"; then - # shellcheck disable=SC2086 - "$MMSEQS" swapdb "${TMP_PATH}/seq_wrong_assigned_pref" "${TMP_PATH}/seq_wrong_assigned_pref_swaped" ${THREADSANDCOMPRESS} \ - || fail "swapdb2 reassign died" - fi - if notExists "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln.dbtype"; then - # shellcheck disable=SC2086 - $RUNNER "$MMSEQS" "${ALIGN_MODULE}" "${TMP_PATH}/seq_seeds.merged" "${TMP_PATH}/seq_wrong_assigned" \ - "${TMP_PATH}/seq_wrong_assigned_pref_swaped" "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln" ${ALIGNMENT_REASSIGN_PAR} \ - || fail "align2 reassign died" - fi - - if notExists "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln_ocol.dbtype"; then - # shellcheck disable=SC2086 - "$MMSEQS" filterdb "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln" "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln_ocol" --trim-to-one-column ${THREADSANDCOMPRESS} \ - || fail "filterdb2 reassign died" - fi - if notExists "${TMP_PATH}/clu_accepted_plus_wrong.dbtype"; then - # combine clusters - # shellcheck disable=SC2086 - "$MMSEQS" mergedbs "${TMP_PATH}/seq_seeds.merged" "${TMP_PATH}/clu_accepted_plus_wrong" "${TMP_PATH}/clu_accepted" \ - "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln_ocol" ${MERGEDBS_PAR} \ - || fail "mergedbs reassign died" - fi + if notExists "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln_ocol.dbtype"; then + # shellcheck disable=SC2086 + "$MMSEQS" filterdb "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln" "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln_ocol" --trim-to-one-column ${THREADSANDCOMPRESS} \ + || fail "filterdb2 reassign died" + fi - if notExists "${TMP_PATH}/missing.single.seqs.db.dbtype"; then - awk 'FNR==NR{if($3 > 1){ f[$1]=1; }next} !($1 in f){print $1"\t"$1}' "${TMP_PATH}/clu_accepted_plus_wrong.index" "${SOURCE}.index" > "${TMP_PATH}/missing.single.seqs" - # shellcheck disable=SC2086 - "$MMSEQS" tsv2db "${TMP_PATH}/missing.single.seqs" "${TMP_PATH}/missing.single.seqs.db" --output-dbtype 6 ${VERBCOMPRESS} \ - || fail "tsv2db reassign died" - fi + if notExists "${TMP_PATH}/clu_accepted_plus_wrong.dbtype"; then + # combine clusters + # shellcheck disable=SC2086 + "$MMSEQS" mergedbs "${TMP_PATH}/seq_seeds.merged" "${TMP_PATH}/clu_accepted_plus_wrong" "${TMP_PATH}/clu_accepted" \ + "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln_ocol" ${MERGEDBS_PAR} \ + || fail "mergedbs reassign died" + fi - if notExists "${TMP_PATH}/clu_accepted_plus_wrong_plus_single.dbtype"; then - # combine clusters - # shellcheck disable=SC2086 - "$MMSEQS" mergedbs "${SOURCE}" "${TMP_PATH}/clu_accepted_plus_wrong_plus_single" "${TMP_PATH}/clu_accepted_plus_wrong" \ - "${TMP_PATH}/missing.single.seqs.db" ${MERGEDBS_PAR} \ - || fail "mergedbs2 reassign died" - fi + if notExists "${TMP_PATH}/missing.single.seqs.db.dbtype"; then + awk 'FNR==NR{if($3 > 1){ f[$1]=1; }next} !($1 in f){print $1"\t"$1}' "${TMP_PATH}/clu_accepted_plus_wrong.index" "${SOURCE}.index" > "${TMP_PATH}/missing.single.seqs" + # shellcheck disable=SC2086 + "$MMSEQS" tsv2db "${TMP_PATH}/missing.single.seqs" "${TMP_PATH}/missing.single.seqs.db" --output-dbtype 6 ${VERBCOMPRESS} \ + || fail "tsv2db reassign died" + fi - PARAM=CLUSTER${STEP}_PAR - eval TMP="\$$PARAM" - # shellcheck disable=SC2086 - "$MMSEQS" clust "${SOURCE}" "${TMP_PATH}/clu_accepted_plus_wrong_plus_single" "${2}" ${TMP} \ - || fail "Clustering step $STEP died" + if notExists "${TMP_PATH}/clu_accepted_plus_wrong_plus_single.dbtype"; then + # combine clusters + # shellcheck disable=SC2086 + "$MMSEQS" mergedbs "${SOURCE}" "${TMP_PATH}/clu_accepted_plus_wrong_plus_single" "${TMP_PATH}/clu_accepted_plus_wrong" \ + "${TMP_PATH}/missing.single.seqs.db" ${MERGEDBS_PAR} \ + || fail "mergedbs2 reassign died" + fi - if [ -n "$REMOVE_TMP" ]; then - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/aln" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/clu_not_accepted" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/clu_accepted" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/clu_not_accepted_swap" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/seq_wrong_assigned" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/seq_seeds" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/seq_seeds.merged" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/seq_wrong_assigned_pref" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/seq_wrong_assigned_pref_swaped" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln_ocol" ${VERBOSITY} - rm -f "${TMP_PATH}/missing.single.seqs" - rm -f "${TMP_PATH}/clu_accepted_plus_wrong.tsv" + PARAM=CLUSTER${STEP}_PAR + eval TMP="\$$PARAM" # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/missing.single.seqs.db" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/clu_accepted_plus_wrong" ${VERBOSITY} - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/clu_accepted_plus_wrong_plus_single" ${VERBOSITY} + "$MMSEQS" clust "${SOURCE}" "${TMP_PATH}/clu_accepted_plus_wrong_plus_single" "${2}" ${TMP} \ + || fail "Clustering step $STEP died" + if [ -n "$REMOVE_TMP" ]; then + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/aln" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/clu_not_accepted" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/clu_accepted" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/clu_not_accepted_swap" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/seq_wrong_assigned" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/seq_seeds" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/seq_seeds.merged" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/seq_wrong_assigned_pref" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/seq_wrong_assigned_pref_swaped" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/seq_wrong_assigned_pref_swaped_aln_ocol" ${VERBOSITY} + rm -f "${TMP_PATH}/missing.single.seqs" + rm -f "${TMP_PATH}/clu_accepted_plus_wrong.tsv" + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/missing.single.seqs.db" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/clu_accepted_plus_wrong" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/clu_accepted_plus_wrong_plus_single" ${VERBOSITY} + fi fi fi - if [ -n "$REMOVE_TMP" ]; then # shellcheck disable=SC2086 "$MMSEQS" rmdb "${TMP_PATH}/clu_redundancy" ${VERBOSITY} # shellcheck disable=SC2086 "$MMSEQS" rmdb "${TMP_PATH}/input_step_redundancy" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/input_step_redundancy_h" ${VERBOSITY} STEP=0 while [ "$STEP" -lt "$STEPS" ]; do # shellcheck disable=SC2086 @@ -260,6 +274,8 @@ if [ -n "$REMOVE_TMP" ]; then while [ "$STEP" -lt "$STEPS" ]; do # shellcheck disable=SC2086 "$MMSEQS" rmdb "${TMP_PATH}/input_step$STEP" ${VERBOSITY} + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/input_step${STEP}_h" ${VERBOSITY} STEP=$((STEP+1)) done diff --git a/lib/mmseqs/data/workflow/databases.sh b/lib/mmseqs/data/workflow/databases.sh index d531773..cbe2bb7 100644 --- a/lib/mmseqs/data/workflow/databases.sh +++ b/lib/mmseqs/data/workflow/databases.sh @@ -134,6 +134,16 @@ case "${SELECTION}" in push_back "${TMP_PATH}/nt.gz" INPUT_TYPE="FASTA_LIST" ;; + "GTDB") + if notExists "${TMP_PATH}/download.done"; then + downloadFile "https://data.ace.uq.edu.au/public/gtdb/data/releases/latest/VERSION" "${TMP_PATH}/version" + downloadFile "https://data.ace.uq.edu.au/public/gtdb/data/releases/latest/genomic_files_reps/gtdb_proteins_aa_reps.tar.gz" "${TMP_PATH}/gtdb.tar.gz" + downloadFile "https://data.ace.uq.edu.au/public/gtdb/data/releases/latest/bac120_taxonomy.tsv" "${TMP_PATH}/bac120_taxonomy.tsv" + downloadFile "https://data.ace.uq.edu.au/public/gtdb/data/releases/latest/ar122_taxonomy.tsv" "${TMP_PATH}/ar122_taxonomy.tsv" + touch "${TMP_PATH}/download.done" + fi + INPUT_TYPE="GTDB" + ;; "PDB") if notExists "${TMP_PATH}/pdb_seqres.txt.gz"; then date "+%s" > "${TMP_PATH}/version" @@ -183,6 +193,15 @@ case "${SELECTION}" in fi INPUT_TYPE="eggNOG" ;; + "CDD") + if notExists "${TMP_PATH}/msa.msa.gz"; then + downloadFile "https://ftp.ncbi.nih.gov/pub/mmdb/cdd/cdd.info" "${TMP_PATH}/version" + downloadFile "https://ftp.ncbi.nih.gov/pub/mmdb/cdd/fasta.tar.gz" "${TMP_PATH}/msa.tar.gz" + fi + INPUT_TYPE="FASTA_MSA" + FASTA_MSA_SED='s|\.FASTA||g' + FASTA_MSA_MSA2PROFILE_PAR="--skip-query" + ;; "Resfinder") if notExists "${TMP_PATH}/download.done"; then downloadFile "https://api.bitbucket.org/2.0/repositories/genomicepidemiology/resfinder_db/commit/master?fields=hash,date" "${TMP_PATH}/version" @@ -283,27 +302,33 @@ case "${INPUT_TYPE}" in "${MMSEQS}" msa2profile "${TMP_PATH}/msa" "${OUTDB}" --match-mode 1 --match-ratio 0.5 ${THREADS_PAR} \ || fail "msa2profile died" if [ -n "${REMOVE_TMP}" ]; then - "${MMSEQS}" rmdb "${TMP_PATH}/msa" \ - || fail "rmdb died" + # shellcheck disable=SC2086 + "${MMSEQS}" rmdb "${TMP_PATH}/msa" ${VERB_PAR} \ + || fail "rmdb died" fi ;; "FASTA_MSA") # shellcheck disable=SC2086 - "${MMSEQS}" tar2db "${TMP_PATH}/msa.tar.gz" "${TMP_PATH}/msa" ${VERB_PAR} --output-dbtype 11 \ + "${MMSEQS}" tar2db "${TMP_PATH}/msa.tar.gz" "${TMP_PATH}/msa" --output-dbtype 11 ${THREADS_PAR} \ || fail "tar2db died" + if [ -n "${FASTA_MSA_SED}" ]; then + sed "${FASTA_MSA_SED}" "${TMP_PATH}/msa.lookup" > "${TMP_PATH}/msa.lookup_tmp" + mv -f "${TMP_PATH}/msa.lookup_tmp" "${TMP_PATH}/msa.lookup" + fi rm -f "${TMP_PATH}/msa.tar.gz" # shellcheck disable=SC2086 - "${MMSEQS}" msa2profile "${TMP_PATH}/msa" "${OUTDB}" --match-mode 1 --match-ratio 0.5 ${THREADS_PAR} \ + "${MMSEQS}" msa2profile "${TMP_PATH}/msa" "${OUTDB}" --match-mode 1 --match-ratio 0.5 ${FASTA_MSA_MSA2PROFILE_PAR} ${THREADS_PAR} \ || fail "msa2profile died" if [ -n "${REMOVE_TMP}" ]; then - "${MMSEQS}" rmdb "${TMP_PATH}/msa" \ + # shellcheck disable=SC2086 + "${MMSEQS}" rmdb "${TMP_PATH}/msa" ${VERB_PAR} \ || fail "rmdb died" fi ;; "eggNOG") # shellcheck disable=SC2086 - "${MMSEQS}" tar2db "${TMP_PATH}/bacteria" "${TMP_PATH}/archea" "${TMP_PATH}/eukaryota" "${TMP_PATH}/viruses" "${TMP_PATH}/msa" --output-dbtype 11 --tar-include '\.raw_alg\.faa\.gz$' ${COMP_PAR} \ - || fail "msa2profile died" + "${MMSEQS}" tar2db "${TMP_PATH}/bacteria" "${TMP_PATH}/archea" "${TMP_PATH}/eukaryota" "${TMP_PATH}/viruses" "${TMP_PATH}/msa" --output-dbtype 11 --tar-include '\.raw_alg\.faa\.gz$' ${THREADS_PAR} \ + || fail "tar2db died" rm -f "${TMP_PATH}/bacteria.tar" "${TMP_PATH}/archea.tar" "${TMP_PATH}/eukaryota.tar" "${TMP_PATH}/viruses.tar" sed 's|\.raw_alg\.faa\.gz||g' "${TMP_PATH}/msa.lookup" > "${TMP_PATH}/msa.lookup.tmp" mv -f "${TMP_PATH}/msa.lookup.tmp" "${TMP_PATH}/msa.lookup" @@ -312,8 +337,26 @@ case "${INPUT_TYPE}" in || fail "msa2profile died" mv -f "${TMP_PATH}/msa.lookup" "${OUTDB}.lookup" mv -f "${TMP_PATH}/msa.source" "${OUTDB}.source" - "${MMSEQS}" rmdb "${TMP_PATH}/msa" \ - || fail "rmdb died" + if [ -n "${REMOVE_TMP}" ]; then + # shellcheck disable=SC2086 + "${MMSEQS}" rmdb "${TMP_PATH}/msa" ${VERB_PAR} \ + || fail "rmdb died" + fi + ;; + "GTDB") + # shellcheck disable=SC2086 + "${MMSEQS}" tar2db "${TMP_PATH}/gtdb.tar.gz" "${TMP_PATH}/tardb" --tar-include 'faa$' ${THREADS_PAR} \ + || fail "tar2db died" + sed 's|_protein\.faa||g' "${TMP_PATH}/tardb.lookup" > "${TMP_PATH}/tardb.lookup.tmp" + mv -f -- "${TMP_PATH}/tardb.lookup.tmp" "${TMP_PATH}/tardb.lookup" + # shellcheck disable=SC2086 + "${MMSEQS}" createdb "${TMP_PATH}/tardb" "${OUTDB}" ${COMP_PAR} \ + || fail "createdb died" + if [ -n "${REMOVE_TMP}" ]; then + # shellcheck disable=SC2086 + "${MMSEQS}" rmdb "${TMP_PATH}/tardb" ${VERB_PAR} \ + || fail "rmdb died" + fi ;; esac fi @@ -356,6 +399,52 @@ if [ -n "${TAXONOMY}" ] && notExists "${OUTDB}_mapping"; then "${MMSEQS}" nrtotaxmapping "${TMP_PATH}/pdb.accession2taxid" "${TMP_PATH}/prot.accession2taxid" "${OUTDB}" "${OUTDB}_mapping" ${THREADS_PAR} \ || fail "nrtotaxmapping died" ;; + "GTDB") + # shellcheck disable=SC2016 + CMD='BEGIN { + FS = "[\t;]"; + rank["c"] = "class"; + rank["d"] = "superkingdom"; + rank["f"] = "family"; + rank["g"] = "genus"; + rank["o"] = "order"; + rank["p"] = "phylum"; + rank["s"] = "species"; + taxCnt = 1; + ids["root"] = 1; + print "1\t|\t1\t|\tno rank\t|\t-\t|" > taxdir"/nodes.dmp"; + print "1\t|\troot\t|\t-\t|\tscientific name\t|" > taxdir"/names.dmp"; + } + { + prevTaxon=1; + for (i = 2; i <= NF; i++) { + if ($i in ids) { + prevTaxon = ids[$i]; + } else { + taxCnt++; + ids[$i] = taxCnt; + r = substr($i, 0, 1); + name = substr($i, 4); + gsub(/_/, " ", name); + printf("%s\t|\t%s\t|\t%s\t|\t-\t|\n", taxCnt, prevTaxon, rank[r]) > taxdir"/nodes.dmp"; + printf("%s\t|\t%s\t|\t-\t|\tscientific name\t|\n", taxCnt, name) > taxdir"/names.dmp"; + prevTaxon = taxCnt; + } + } + printf("%s\t%s\n", $1, ids[$NF]) > taxdir"/mapping_genomes"; + }' + mkdir -p "${TMP_PATH}/taxonomy" + awk -v taxdir="${TMP_PATH}/taxonomy" "$CMD" "${TMP_PATH}/bac120_taxonomy.tsv" "${TMP_PATH}/ar122_taxonomy.tsv" + touch "${TMP_PATH}/taxonomy/merged.dmp" + touch "${TMP_PATH}/taxonomy/delnodes.dmp" + # shellcheck disable=SC2086 + "${MMSEQS}" createtaxdb "${OUTDB}" "${TMP_PATH}/taxdb" --ncbi-tax-dump "${TMP_PATH}/taxonomy" --tax-mapping-file "${TMP_PATH}/taxonomy/mapping_genomes" --tax-mapping-mode 1 ${THREADS_PAR} \ + || fail "createtaxdb died" + if [ -n "${REMOVE_TMP}" ]; then + rm -f -- "${TMP_PATH}/taxonomy/nodes.dmp" "${TMP_PATH}/taxonomy/names.dmp" "${TMP_PATH}/taxonomy/merged.dmp" "${TMP_PATH}/taxonomy/delnodes.dmp" "${TMP_PATH}/taxonomy/mapping_genomes" "${TMP_PATH}/bac120_taxonomy.tsv" "${TMP_PATH}/ar122_taxonomy.tsv" + rm -rf -- "${TMP_PATH}/taxdb" "${TMP_PATH}/taxonomy" + fi + ;; *) # shellcheck disable=SC2086 "${MMSEQS}" prefixid "${OUTDB}_h" "${TMP_PATH}/header_pref.tsv" --tsv ${THREADS_PAR} \ diff --git a/lib/mmseqs/data/workflow/easytaxonomy.sh b/lib/mmseqs/data/workflow/easytaxonomy.sh index af14c8f..70720a4 100755 --- a/lib/mmseqs/data/workflow/easytaxonomy.sh +++ b/lib/mmseqs/data/workflow/easytaxonomy.sh @@ -20,57 +20,51 @@ if notExists "${TMP_PATH}/result.dbtype"; then || fail "Search died" fi -if notExists "${TMP_PATH}/result_lca.dbtype"; then - # shellcheck disable=SC2086 - "$MMSEQS" lca "${TARGET}" "${TMP_PATH}/result" "${TMP_PATH}/result_lca" ${LCA_PAR} \ - || fail "lca died" -fi - if notExists "${RESULTS}_lca.tsv"; then # shellcheck disable=SC2086 - "$MMSEQS" createtsv "${TMP_PATH}/query" "${TMP_PATH}/result_lca" "${RESULTS}_lca.tsv" ${CREATETSV_PAR} \ + "$MMSEQS" createtsv "${TMP_PATH}/query" "${TMP_PATH}/result" "${RESULTS}_lca.tsv" ${CREATETSV_PAR} \ || fail "createtsv died" fi # shellcheck disable=SC2086 -"$MMSEQS" taxonomyreport "${TARGET}" "${TMP_PATH}/result_lca" "${RESULTS}_report" ${THREADS_PAR} \ +"$MMSEQS" taxonomyreport "${TARGET}" "${TMP_PATH}/result" "${RESULTS}_report" ${TAXONOMYREPORT_PAR} \ || fail "taxonomyreport died" -if notExists "${TMP_PATH}/result_tophit1.dbtype"; then - # shellcheck disable=SC2086 - "$MMSEQS" filterdb "${TMP_PATH}/result" "${TMP_PATH}/result_top1" --extract-lines 1 ${THREADS_COMP_PAR} \ - || fail "filterdb died" -fi +#if notExists "${TMP_PATH}/result_aln.dbtype"; then +# # shellcheck disable=SC2086 +# "$MMSEQS" filterdb "${TMP_PATH}/result" "${TMP_PATH}/result_aln" --extract-lines 1 ${THREADS_COMP_PAR} \ +# || fail "filterdb died" +#fi -if notExists "${TMP_PATH}/result_top1_swapped.dbtype"; then +if notExists "${TMP_PATH}/result_aln_swapped.dbtype"; then # shellcheck disable=SC2086 - "$MMSEQS" swapresults "${TMP_PATH}/query" "${TARGET}" "${TMP_PATH}/result_top1" "${TMP_PATH}/result_top1_swapped" ${SWAPRESULT_PAR} \ + "$MMSEQS" swapresults "${TMP_PATH}/query" "${TARGET}" "${TMP_PATH}/result_aln" "${TMP_PATH}/result_aln_swapped" ${SWAPRESULT_PAR} \ || fail "filterdb died" fi -if notExists "${TMP_PATH}/result_top1_swapped_sum.dbtype"; then +if notExists "${TMP_PATH}/result_aln_swapped_sum.dbtype"; then # shellcheck disable=SC2086 - "$MMSEQS" summarizealis "${TMP_PATH}/result_top1_swapped" "${TMP_PATH}/result_top1_swapped_sum" ${THREADS_COMP_PAR} \ + "$MMSEQS" summarizealis "${TMP_PATH}/result_aln_swapped" "${TMP_PATH}/result_aln_swapped_sum" ${THREADS_COMP_PAR} \ || fail "filterdb died" fi -if notExists "${TMP_PATH}/result_top1_swapped_sum_tax.dbtype"; then +if notExists "${TMP_PATH}/result_aln_swapped_sum_tax.dbtype"; then # shellcheck disable=SC2086 - "$MMSEQS" addtaxonomy "${TARGET}" "${TMP_PATH}/result_top1_swapped_sum" "${TMP_PATH}/result_top1_swapped_sum_tax" ${THREADS_COMP_PAR} --pick-id-from 1 --tax-lineage 1 \ + "$MMSEQS" addtaxonomy "${TARGET}" "${TMP_PATH}/result_aln_swapped_sum" "${TMP_PATH}/result_aln_swapped_sum_tax" ${ADDTAXONOMY_PAR} \ || fail "filterdb died" fi # shellcheck disable=SC2086 -"$MMSEQS" createtsv "${TARGET}" "${TMP_PATH}/result_top1_swapped_sum_tax" "${RESULTS}_tophit_report" ${CREATETSV_PAR} \ +"$MMSEQS" createtsv "${TARGET}" "${TMP_PATH}/result_aln_swapped_sum_tax" "${RESULTS}_tophit_report" ${CREATETSV_PAR} \ || fail "filterdb died" # shellcheck disable=SC2086 -"$MMSEQS" convertalis "${TMP_PATH}/query" "${TARGET}" "${TMP_PATH}/result_top1" "${RESULTS}_tophit_aln" ${CONVERT_PAR} \ +"$MMSEQS" convertalis "${TMP_PATH}/query" "${TARGET}" "${TMP_PATH}/result_aln" "${RESULTS}_tophit_aln" ${CONVERT_PAR} \ || fail "convertalis died" if [ -n "${REMOVE_TMP}" ]; then # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/result" + "$MMSEQS" rmdb "${TMP_PATH}/result" ${VERBOSITY} if [ -z "${LEAVE_INPUT}" ]; then # shellcheck disable=SC2086 "$MMSEQS" rmdb "${TMP_PATH}/query" ${VERBOSITY} @@ -78,13 +72,13 @@ if [ -n "${REMOVE_TMP}" ]; then "$MMSEQS" rmdb "${TMP_PATH}/query_h" ${VERBOSITY} fi # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/result_top1" ${VERBOSITY} + "$MMSEQS" rmdb "${TMP_PATH}/result_aln" ${VERBOSITY} # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/result_top1_swapped" ${VERBOSITY} + "$MMSEQS" rmdb "${TMP_PATH}/result_aln_swapped" ${VERBOSITY} # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/result_top1_swapped_sum" ${VERBOSITY} + "$MMSEQS" rmdb "${TMP_PATH}/result_aln_swapped_sum" ${VERBOSITY} # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/result_top1_swapped_sum_tax" ${VERBOSITY} + "$MMSEQS" rmdb "${TMP_PATH}/result_aln_swapped_sum_tax" ${VERBOSITY} rm -rf "${TMP_PATH}/taxonomy_tmp" rm -f "${TMP_PATH}/easytaxonomy.sh" diff --git a/lib/mmseqs/data/workflow/linclust.sh b/lib/mmseqs/data/workflow/linclust.sh index 4343bdb..a928293 100755 --- a/lib/mmseqs/data/workflow/linclust.sh +++ b/lib/mmseqs/data/workflow/linclust.sh @@ -89,6 +89,8 @@ if notExists "${TMP_PATH}/clu.dbtype"; then fi if [ -n "$REMOVE_TMP" ]; then + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/pref_filter1" ${VERBOSITY} # shellcheck disable=SC2086 "$MMSEQS" rmdb "${TMP_PATH}/pref" ${VERBOSITY} # shellcheck disable=SC2086 @@ -97,22 +99,18 @@ if [ -n "$REMOVE_TMP" ]; then "$MMSEQS" rmdb "${TMP_PATH}/pre_clust" ${VERBOSITY} # shellcheck disable=SC2086 "$MMSEQS" rmdb "${TMP_PATH}/input_step_redundancy" ${VERBOSITY} - rm -f "${TMP_PATH}/order_redundancy" - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/pref_filter1" ${VERBOSITY} + "$MMSEQS" rmdb "${TMP_PATH}/input_step_redundancy_h" ${VERBOSITY} + rm -f "${TMP_PATH}/order_redundancy" # shellcheck disable=SC2086 "$MMSEQS" rmdb "${TMP_PATH}/pref_filter2" ${VERBOSITY} - - if [ -n "${ALIGN_GAPPED}" ]; then - if [ -n "$FILTER" ]; then - # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/pref_rescore2" ${VERBOSITY} - fi + if [ -n "$FILTER" ]; then # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/aln" ${VERBOSITY} + "$MMSEQS" rmdb "${TMP_PATH}/pref_rescore2" ${VERBOSITY} fi # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/aln" ${VERBOSITY} + # shellcheck disable=SC2086 "$MMSEQS" rmdb "${TMP_PATH}/clust" ${VERBOSITY} rm -f "${TMP_PATH}/linclust.sh" fi diff --git a/lib/mmseqs/data/workflow/searchslicedtargetprofile.sh b/lib/mmseqs/data/workflow/searchslicedtargetprofile.sh index 06c8535..2865470 100755 --- a/lib/mmseqs/data/workflow/searchslicedtargetprofile.sh +++ b/lib/mmseqs/data/workflow/searchslicedtargetprofile.sh @@ -126,28 +126,31 @@ while [ "${FIRST_INDEX_LINE}" -le "${TOTAL_NUM_PROFILES}" ]; do # align current step chunk if notExists "${TMP_PATH}/aln.done"; then # shellcheck disable=SC2086 - ${RUNNER} "$MMSEQS" "${ALIGN_MODULE}" "${PROFILEDB}" "${INPUT}" "${TMP_PATH}/pref" "${TMP_PATH}/aln" ${ALIGNMENT_PAR} \ + ${RUNNER} "$MMSEQS" "${ALIGN_MODULE}" "${PROFILEDB}" "${INPUT}" "${TMP_PATH}/pref" "${TMP_PATH}/aln" ${ALIGNMENT_IT_PAR} \ || fail "align died" # shellcheck disable=SC2086 "$MMSEQS" rmdb "${TMP_PATH}/pref" ${VERBOSITY} touch "${TMP_PATH}/aln.done" fi - # swap alignment of current step chunk - if notExists "${TMP_PATH}/aln_swap.done"; then - # note: the evalue has been corrected for inverted search by the workflow caller + + # no matter what, process at least one profile... + if [ "${FILTER_RESULT}" -eq 1 ]; then + # shellcheck disable=SC2086 + ${RUNNER} "$MMSEQS" filterresult "${PROFILEDB}" "${INPUT}" "${TMP_PATH}/aln" "${TMP_PATH}/aln_filt" ${FILTER_PAR} \ + || fail "align died" # shellcheck disable=SC2086 - "$MMSEQS" swapresults "${TARGET}" "${INPUT}" "${TMP_PATH}/aln" "${TMP_PATH}/aln_swap" ${SWAP_PAR} \ - || fail "swapresults died" + "$MMSEQS" rmdb "${TMP_PATH}/aln" ${VERBOSITY} || fail "rmdb aln died" # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/aln" ${VERBOSITY} - touch "${TMP_PATH}/aln_swap.done" + "$MMSEQS" mvdb "${TMP_PATH}/aln_filt" "${TMP_PATH}/aln" ${VERBOSITY} || fail "mv aln_filt aln died" + touch "${TMP_PATH}/aln.done" fi + # merge swapped alignment of current chunk to previous steps if [ -f "${TMP_PATH}/aln_merged.dbtype" ]; then # shellcheck disable=SC2086 - "$MMSEQS" mergedbs "${INPUT}" "${TMP_PATH}/aln_merged_new" "${TMP_PATH}/aln_merged" "${TMP_PATH}/aln_swap" ${VERBOSITY} \ + "$MMSEQS" mergedbs "${PROFILEDB}" "${TMP_PATH}/aln_merged_new" "${TMP_PATH}/aln_merged" "${TMP_PATH}/aln" ${VERBOSITY} \ || fail "mergedbs died" # rmdb of aln_merged to avoid conflict with unmerged dbs: aln_merged.0, .1... # shellcheck disable=SC2086 @@ -155,24 +158,36 @@ while [ "${FIRST_INDEX_LINE}" -le "${TOTAL_NUM_PROFILES}" ]; do # shellcheck disable=SC2086 "$MMSEQS" mvdb "${TMP_PATH}/aln_merged_new" "${TMP_PATH}/aln_merged" ${VERBOSITY} || fail "mv aln_merged_new aln_merged died" # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/aln_swap" ${VERBOSITY} || fail "rmdb aln_swap died" + "$MMSEQS" rmdb "${TMP_PATH}/aln" ${VERBOSITY} || fail "rmdb aln died" else # shellcheck disable=SC2086 - "$MMSEQS" mvdb "${TMP_PATH}/aln_swap" "${TMP_PATH}/aln_merged" ${VERBOSITY} \ + "$MMSEQS" mvdb "${TMP_PATH}/aln" "${TMP_PATH}/aln_merged" ${VERBOSITY} \ || fail "mvdb died" fi STEP="$((STEP+1))" # update for the next step - rm -f "${TMP_PATH}/pref.done" "${TMP_PATH}/aln.done" "${TMP_PATH}/aln_swap.done" + rm -f "${TMP_PATH}/pref.done" "${TMP_PATH}/aln.done" printf "%d\\t%s\\n" "${FIRST_INDEX_LINE}" "${NUM_PREF_RESULTS_IN_ALL_PREV_STEPS}" > "${TMP_PATH}/aln_${STEP}.checkpoint" done -# keep only the top max-seqs hits according to the default alignment sorting criteria + +# swap alignment of current step chunk +if notExists "${TMP_PATH}/aln.done"; then + # keep only the top max-seqs hits according to the default alignment sorting criteria + # shellcheck disable=SC2086 + "$MMSEQS" align "${PROFILEDB}" "${INPUT}" "${TMP_PATH}/aln_merged" "${TMP_PATH}/aln" ${ALIGNMENT_PAR} \ + || fail "sortresult died" + # rmdb of aln_merged to avoid conflict with unmerged dbs: aln_merged.0, .1... + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/aln_merged" ${VERBOSITY} || fail "rmdb aln_merged died" +fi + +# note: the evalue has been corrected for inverted search by the workflow caller # shellcheck disable=SC2086 -"$MMSEQS" sortresult "${TMP_PATH}/aln_merged" "${RESULT}" ${SORTRESULT_PAR} \ - || fail "sortresult died" +"$MMSEQS" swapresults "${TARGET}" "${INPUT}" "${TMP_PATH}/aln" "${RESULT}" ${SWAPRES_PAR} \ + || fail "swapresults died" if [ -n "$REMOVE_TMP" ]; then diff --git a/lib/mmseqs/data/workflow/taxonomy.sh b/lib/mmseqs/data/workflow/taxonomy.sh index 40b8c57..d049337 100755 --- a/lib/mmseqs/data/workflow/taxonomy.sh +++ b/lib/mmseqs/data/workflow/taxonomy.sh @@ -58,11 +58,12 @@ else fi if [ -n "${REMOVE_TMP}" ]; then - rm -rf "${TMP_PATH}/tmp_hsp1" - + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/first" ${VERBOSITY} if [ -n "${TOPHIT_MODE}" ]; then # shellcheck disable=SC2086 - "$MMSEQS" rmdb "${TMP_PATH}/first" ${VERBOSITY} + "$MMSEQS" rmdb "${TMP_PATH}/top1" ${VERBOSITY} fi + rm -rf "${TMP_PATH}/tmp_hsp1" rm -f "${TMP_PATH}/taxonomy.sh" fi diff --git a/lib/mmseqs/data/workflow/taxpercontig.sh b/lib/mmseqs/data/workflow/taxpercontig.sh index 4d5a798..299ac36 100644 --- a/lib/mmseqs/data/workflow/taxpercontig.sh +++ b/lib/mmseqs/data/workflow/taxpercontig.sh @@ -75,15 +75,30 @@ if [ ! -e "${TMP_PATH}/orfs_h_swapped.dbtype" ]; then # shellcheck disable=SC2086 "$MMSEQS" swapdb "${ORFS_DB}_h" "${TMP_PATH}/orfs_h_swapped" ${SWAPDB_PAR} \ || fail "swapdb died" + awk 'BEGIN { printf("%c%c%c%c",5,0,0,0); exit; }' > "${TMP_PATH}/orfs_h_swapped.dbtype" fi -if [ ! -e "${RESULTS}.dbtype" ]; then +if [ "${TAX_OUTPUT}" = "0" ] || [ "${TAX_OUTPUT}" = "2" ]; then # shellcheck disable=SC2086 "$MMSEQS" aggregatetaxweights "${TAX_SEQ_DB}" "${TMP_PATH}/orfs_h_swapped" "${TMP_PATH}/orfs_tax" "${TMP_PATH}/orfs_tax_aln" "${RESULTS}" ${AGGREGATETAX_PAR} \ || fail "aggregatetaxweights died" fi +if [ "${TAX_OUTPUT}" = "1" ] || [ "${TAX_OUTPUT}" = "2" ]; then + if [ ! -e "${TMP_PATH}/orfs_tax_aln_first.dbtype" ]; then + # shellcheck disable=SC2086 + "$MMSEQS" filterdb "${TMP_PATH}/orfs_tax_aln" "${TMP_PATH}/orfs_tax_aln_first" --extract-lines 1 ${THREADS_COMP_PAR} \ + || fail "filterdb died" + fi + OUT_ALN="${RESULTS}_aln" + if [ "${TAX_OUTPUT}" = "1" ]; then + OUT_ALN="${RESULTS}" + fi + # shellcheck disable=SC2086 + "$MMSEQS" mergeresultsbyset "${TMP_PATH}/orfs_h_swapped" "${TMP_PATH}/orfs_tax_aln_first" "${OUT_ALN}" ${THREADS_COMP_PAR} \ + || fail "mvdb died" +fi if [ -n "${REMOVE_TMP}" ]; then # shellcheck disable=SC2086 @@ -106,6 +121,11 @@ if [ -n "${REMOVE_TMP}" ]; then # shellcheck disable=SC2086 "$MMSEQS" rmdb "${TMP_PATH}/orfs_h_swapped" ${VERBOSITY} + if [ "${TAX_OUTPUT}" = "1" ] || [ "${TAX_OUTPUT}" = "2" ]; then + # shellcheck disable=SC2086 + "$MMSEQS" rmdb "${TMP_PATH}/orfs_tax_aln_first" ${VERBOSITY} + fi + rm -rf "${TMP_PATH}/tmp_taxonomy" rm -f "${TMP_PATH}/taxpercontig.sh" fi diff --git a/lib/mmseqs/data/workflow/translated_search.sh b/lib/mmseqs/data/workflow/translated_search.sh index 9cf9f65..b2e4252 100755 --- a/lib/mmseqs/data/workflow/translated_search.sh +++ b/lib/mmseqs/data/workflow/translated_search.sh @@ -44,22 +44,22 @@ if [ -n "$NO_TARGET_INDEX" ]; then fi fi -if [ -n "$QUERY_NUCL" ] && [ -n "${ORF_FILTER}" ]; then - if notExists "${TMP_PATH}/q_orfs_aa_pref.dbtype"; then - # shellcheck disable=SC2086 - "$MMSEQS" prefilter "${QUERY}" "${TARGET}" "${TMP_PATH}/q_orfs_aa_pref" --min-ungapped-score 3 -s 3 -k 6 --diag-score 0 --spaced-kmer-mode 0 --max-seqs 1 ${THREAD_COMP_PAR} \ - || fail "Reference search died" - fi - - if notExists "${TMP_PATH}/q_orfs_aa_filter.dbtype"; then - awk '$3 > 1 { print $1 }' "${TMP_PATH}/q_orfs_aa_pref.index" > "${TMP_PATH}/q_orfs_aa_filter.list" - # shellcheck disable=SC2086 - "$MMSEQS" createsubdb "${TMP_PATH}/q_orfs_aa_filter.list" "${QUERY}" "${TMP_PATH}/q_orfs_aa_filter" ${CREATESUBDB_PAR} \ - || fail "createsubdb died" - fi - QUERY="${TMP_PATH}/q_orfs_aa_filter" - QUERY_ORF="${TMP_PATH}/q_orfs_aa_filter" -fi +#if [ -n "$QUERY_NUCL" ] && [ -n "${ORF_FILTER}" ]; then +# if notExists "${TMP_PATH}/q_orfs_aa_pref.dbtype"; then +# # shellcheck disable=SC2086 +# "$MMSEQS" prefilter "${QUERY}" "${TARGET}" "${TMP_PATH}/q_orfs_aa_pref" --min-ungapped-score 3 -s 3 -k 6 --diag-score 0 --spaced-kmer-mode 0 --max-seqs 1 ${THREAD_COMP_PAR} \ +# || fail "Reference search died" +# fi +# +# if notExists "${TMP_PATH}/q_orfs_aa_filter.dbtype"; then +# awk '$3 > 1 { print $1 }' "${TMP_PATH}/q_orfs_aa_pref.index" > "${TMP_PATH}/q_orfs_aa_filter.list" +# # shellcheck disable=SC2086 +# "$MMSEQS" createsubdb "${TMP_PATH}/q_orfs_aa_filter.list" "${QUERY}" "${TMP_PATH}/q_orfs_aa_filter" ${CREATESUBDB_PAR} \ +# || fail "createsubdb died" +# fi +# QUERY="${TMP_PATH}/q_orfs_aa_filter" +# QUERY_ORF="${TMP_PATH}/q_orfs_aa_filter" +#fi mkdir -p "${TMP_PATH}/search" if notExists "${TMP_PATH}/aln.dbtype"; then diff --git a/lib/mmseqs/lib/alp/LICENSE b/lib/mmseqs/lib/alp/LICENSE new file mode 100644 index 0000000..684a81f --- /dev/null +++ b/lib/mmseqs/lib/alp/LICENSE @@ -0,0 +1,20 @@ + PUBLIC DOMAIN NOTICE + National Center for Biotechnology Information + +This software/database is a "United States Government Work" under the +terms of the United States Copyright Act. It was written as part of +the author's offical duties as a United States Government employee and +thus cannot be copyrighted. This software/database is freely available +to the public for use. The National Library of Medicine and the U.S. +Government have not placed any restriction on its use or reproduction. + +Although all reasonable efforts have been taken to ensure the accuracy +and reliability of the software and data, the NLM and the U.S. +Government do not and cannot warrant the performance or results that +may be obtained by using this software or data. The NLM and the U.S. +Government disclaim all warranties, express or implied, including +warranties of performance, merchantability or fitness for any particular +purpose. + +Please cite the author in any work or product based on this material. + diff --git a/lib/mmseqs/lib/alp/njn_matrix.hpp b/lib/mmseqs/lib/alp/njn_matrix.hpp index e942dbd..9e8e4a6 100755 --- a/lib/mmseqs/lib/alp/njn_matrix.hpp +++ b/lib/mmseqs/lib/alp/njn_matrix.hpp @@ -597,7 +597,7 @@ namespace Njn { for (size_t i = 0; i < this->getM (); i++) { delete [] d_matrix_p [i]; d_matrix_p [i] = 0; } - if (this->getM () > 0) delete [] d_matrix_p; d_matrix_p = 0; + if (this->getM () > 0) { delete [] d_matrix_p; d_matrix_p = 0; } d_m = 0; d_n = 0; diff --git a/lib/mmseqs/lib/alp/njn_vector.hpp b/lib/mmseqs/lib/alp/njn_vector.hpp index 658de44..6ec51ee 100755 --- a/lib/mmseqs/lib/alp/njn_vector.hpp +++ b/lib/mmseqs/lib/alp/njn_vector.hpp @@ -330,7 +330,7 @@ namespace Njn { template void Vector ::free2 () { - if (getM () > 0) delete [] d_vector_p; d_vector_p = 0; + if (getM () > 0) { delete [] d_vector_p; d_vector_p = 0; } d_m = 0; } diff --git a/lib/mmseqs/lib/alp/readme.txt b/lib/mmseqs/lib/alp/readme.txt new file mode 100755 index 0000000..0d65da4 --- /dev/null +++ b/lib/mmseqs/lib/alp/readme.txt @@ -0,0 +1,10 @@ +This directory contains C++ library files related to calculation of the Gumbel parameters for pairwise sequence alignment. + +Usage with "make". + +One way to use this library is with the "make" command. The following assumes you have "make" and a C++ compiler suitably installed. If you use the command line to enter the "cpp" directory and type "make", it should create a library file called "libalp.a". How to use the library is shown in the example directory. If you enter this directory and type "make", it should compile the test program: this will work only if it can find the header and library files. In "example/Makefile", the -I flag to the C preprocessor adds a directory to search for headers ("sls_alignment_evaluer.hpp"), the -L flag to the linker adds a directory to search for libraries, and -lalp specifies linking to "libalp". + + +Please see the URL +http://www.ncbi.nlm.nih.gov/CBBresearch/Spouge/html_ncbi/html/index/software.html#6 +for further information. diff --git a/lib/mmseqs/lib/alp/sls_basic.hpp b/lib/mmseqs/lib/alp/sls_basic.hpp index 7049288..d56d481 100755 --- a/lib/mmseqs/lib/alp/sls_basic.hpp +++ b/lib/mmseqs/lib/alp/sls_basic.hpp @@ -176,7 +176,8 @@ namespace Sls { { if(!pointer_) { - std::cout << "Memory allocation error\n"; + std::cerr << "Memory allocation error" << std::endl; + abort(); }; } diff --git a/lib/mmseqs/lib/base64/LICENSE b/lib/mmseqs/lib/base64/LICENSE new file mode 100644 index 0000000..f2b2df7 --- /dev/null +++ b/lib/mmseqs/lib/base64/LICENSE @@ -0,0 +1,26 @@ +https://github.com/superwills/NibbleAndAHalf +base64.h -- Fast base64 encoding and decoding. +version 1.0.0, April 17, 2013 143a + +Copyright (C) 2013 William Sherif + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. +3. This notice may not be removed or altered from any source distribution. + +William Sherif +will.sherif@gmail.com + +YWxsIHlvdXIgYmFzZSBhcmUgYmVsb25nIHRvIHVz diff --git a/lib/mmseqs/lib/base64/README.md b/lib/mmseqs/lib/base64/README.md new file mode 100644 index 0000000..34efaa1 --- /dev/null +++ b/lib/mmseqs/lib/base64/README.md @@ -0,0 +1,8 @@ +NibbleAndAHalf +============== + +"Nibble And A Half" is an ANSI C library that provides fast base64 encoding and decoding, all in a single header file. + +Wed Apr 17 6:13p +- All test related functions moved to testbase64.h. To use, only need #include "base64.h": + https://github.com/superwills/NibbleAndAHalf/blob/master/NibbleAndAHalf/base64.h diff --git a/lib/mmseqs/lib/base64/base64.h b/lib/mmseqs/lib/base64/base64.h new file mode 100644 index 0000000..7b9dad0 --- /dev/null +++ b/lib/mmseqs/lib/base64/base64.h @@ -0,0 +1,148 @@ +/* + + https://github.com/superwills/NibbleAndAHalf + base64.h -- Fast base64 encoding and decoding. + version 1.0.0, April 17, 2013 143a + + Copyright (C) 2013 William Sherif + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + William Sherif + will.sherif@gmail.com + + YWxsIHlvdXIgYmFzZSBhcmUgYmVsb25nIHRvIHVz + +*/ +#ifndef BASE64_H +#define BASE64_H + +#include + +const static char *b64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +// maps A=>0,B=>1.. +const static unsigned char unb64[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //10 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //20 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //30 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //40 + 0, 0, 0, 62, 0, 0, 0, 63, 52, 53, //50 + 54, 55, 56, 57, 58, 59, 60, 61, 0, 0, //60 + 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, //70 + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, //80 + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, //90 + 25, 0, 0, 0, 0, 0, 0, 26, 27, 28, //100 + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, //110 + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, //120 + 49, 50, 51, 0, 0, 0, 0, 0, 0, 0, //130 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //140 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //150 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //160 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //170 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //180 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //190 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //200 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //210 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //220 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //230 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //240 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //250 + 0, 0, 0, 0, 0, 0, +}; // This array has 256 elements + +// Converts binary data of length=len to base64 characters. +std::string base64_encode(const void *data, int length) { + const unsigned char *bin = (const unsigned char *) data; + + int modLength = length % 3; + // 2 gives 1 and 1 gives 2, but 0 gives 0. + int padding = ((modLength & 1) << 1) + ((modLength & 2) >> 1); + + std::string res; + res.reserve(4 * (length + padding) / 3); + + int byteNo; + for (byteNo = 0; byteNo <= length - 3; byteNo += 3) { + unsigned char BYTE0 = bin[byteNo]; + unsigned char BYTE1 = bin[byteNo + 1]; + unsigned char BYTE2 = bin[byteNo + 2]; + res.append(1, b64[BYTE0 >> 2]); + res.append(1, b64[((0x3 & BYTE0) << 4) + (BYTE1 >> 4)]); + res.append(1, b64[((0x0f & BYTE1) << 2) + (BYTE2 >> 6)]); + res.append(1, b64[0x3f & BYTE2]); + } + + if (padding == 2) { + res.append(1, b64[bin[byteNo] >> 2]); + res.append(1, b64[(0x3 & bin[byteNo]) << 4]); + res.append(1, '='); + res.append(1, '='); + } else if (padding == 1) { + res.append(1, b64[bin[byteNo] >> 2]); + res.append(1, b64[((0x3 & bin[byteNo]) << 4) + (bin[byteNo + 1] >> 4)]); + res.append(1, b64[(0x0f & bin[byteNo + 1]) << 2]); + res.append(1, '='); + } + + return res; +} + +std::string base64_decode(const char *base64, int length) { + const unsigned char *data = (const unsigned char *) base64; + // 2 accesses below would be OOB. + if (length < 2) { + return ""; + } + + int padding = 0; + if (data[length - 1] == '=') ++padding; + if (data[length - 2] == '=') ++padding; + + std::string res; + res.reserve(3 * length / 4 - padding); + + int charNo; + for (charNo = 0; charNo <= length - 4 - padding; charNo += 4) { + int A = unb64[data[charNo]]; + int B = unb64[data[charNo + 1]]; + int C = unb64[data[charNo + 2]]; + int D = unb64[data[charNo + 3]]; + + res.append(1, (A << 2) | (B >> 4)); + res.append(1, (B << 4) | (C >> 2)); + res.append(1, (C << 6) | (D)); + } + + if (padding == 1) { + int A = unb64[data[charNo]]; + int B = unb64[data[charNo + 1]]; + int C = unb64[data[charNo + 2]]; + + res.append(1, (A << 2) | (B >> 4)); + res.append(1, (B << 4) | (C >> 2)); + } else if (padding == 2) { + int A = unb64[data[charNo]]; + int B = unb64[data[charNo + 1]]; + + res.append(1, (A << 2) | (B >> 4)); + } + + return res; +} + +#endif diff --git a/lib/mmseqs/lib/cacode/LICENSE.LAST b/lib/mmseqs/lib/cacode/LICENSE.LAST new file mode 100644 index 0000000..94a9ed0 --- /dev/null +++ b/lib/mmseqs/lib/cacode/LICENSE.LAST @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/lib/mmseqs/lib/cacode/LICENSE.NCBI b/lib/mmseqs/lib/cacode/LICENSE.NCBI new file mode 100644 index 0000000..f2b2df7 --- /dev/null +++ b/lib/mmseqs/lib/cacode/LICENSE.NCBI @@ -0,0 +1,26 @@ +https://github.com/superwills/NibbleAndAHalf +base64.h -- Fast base64 encoding and decoding. +version 1.0.0, April 17, 2013 143a + +Copyright (C) 2013 William Sherif + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. +3. This notice may not be removed or altered from any source distribution. + +William Sherif +will.sherif@gmail.com + +YWxsIHlvdXIgYmFzZSBhcmUgYmVsb25nIHRvIHVz diff --git a/lib/mmseqs/lib/cacode/README b/lib/mmseqs/lib/cacode/README new file mode 100644 index 0000000..45d7b84 --- /dev/null +++ b/lib/mmseqs/lib/cacode/README @@ -0,0 +1,2 @@ +CA_code was extracted from LAST (http://last.cbrc.jp) which is licensed under GPLv3-or-later (see LICENSE.LAST). +CA_code itself is public domain developed by members of the NCBI (see LICENSE.NCBI). diff --git a/lib/mmseqs/lib/gzstream/LICENSE b/lib/mmseqs/lib/gzstream/LICENSE new file mode 100644 index 0000000..b1e3f5a --- /dev/null +++ b/lib/mmseqs/lib/gzstream/LICENSE @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/lib/mmseqs/lib/gzstream/README b/lib/mmseqs/lib/gzstream/README new file mode 100644 index 0000000..61d8060 --- /dev/null +++ b/lib/mmseqs/lib/gzstream/README @@ -0,0 +1,7 @@ + + gzstream + C++ iostream classes wrapping the zlib compression library. +=========================================================================== + + Header Only version of this library from: + https://gist.github.com/piti118/1508048 diff --git a/lib/mmseqs/lib/ksw2/LICENSE.txt b/lib/mmseqs/lib/ksw2/LICENSE.txt new file mode 100644 index 0000000..1a06f64 --- /dev/null +++ b/lib/mmseqs/lib/ksw2/LICENSE.txt @@ -0,0 +1,24 @@ +The MIT License + +Copyright (c) 2018- Dana-Farber Cancer Institute + 2017-2018 Broad Institute, Inc. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/lib/mmseqs/lib/ksw2/README.md b/lib/mmseqs/lib/ksw2/README.md new file mode 100644 index 0000000..1a3676f --- /dev/null +++ b/lib/mmseqs/lib/ksw2/README.md @@ -0,0 +1,163 @@ +## Introduction + +KSW2 is a library to align a pair of biological sequences based on dynamic +programming (DP). So far it comes with global alignment and alignment extension +(no local alignment yet) under an affine gap cost function: gapCost(*k*) = +*q*+*k*\**e*, or a two-piece affine gap cost: gapCost2(*k*) = min{*q*+*k*\**e*, +*q2*+*k*\**e2*}. For the latter cost function, if *q*+*e*<*q2*+*e2* and *e*>*e2*, +(*q*,*e*) is effectively applied to short gaps only, while (*q2*,*e2*) applied +to gaps no shorter than ceil((*q2*-*q*)/(*e*-*e2*)-1). It helps to retain long +gaps. The algorithm behind the two-piece cost is close to [Gotoh +(1990)][piece-affine]. + +KSW2 supports fixed banding and optionally produces alignment paths (i.e. +CIGARs) with gaps either left- or right-aligned. It provides implementations +using SSE2 and SSE4.1 intrinsics based on [Hajime Suzuki][hs]'s diagonal +[formulation][hs-eq] which enables 16-way SSE parallelization for the most part +of the inner loop, regardless of the maximum score of the alignment. + +KSW2 implements the Suzuki-Kasahara algorithm and is a component of +[minimap2][mm2]. If you use KSW2 in your work, please cite: + +> * Suzuki, H. and Kasahara, M. (2018). Introducing difference recurrence relations for faster semi-global alignment of long sequences. *BMC Bioinformatics*, **19**:45. +> * Li, H (2018) Minimap2: pairwise alignment for nucleotide sequences. *Bioinformatics*, **34**:3094-3100. + +## Usage + +Each `ksw2_*.c` file implements a single function and is independent of each +other. Here are brief descriptions about what each file implements: + +* [ksw2_gg.c](ksw2_gg.c): global alignment; Green's standard formulation +* [ksw2_gg2.c](ksw2_gg2.c): global alignment; Suzuki's diagonal formulation +* [ksw2_gg2_sse.c](ksw2_gg2_sse.c): global alignment with SSE intrinsics; Suzuki's +* [ksw2_extz.c](ksw2_extz.c): global and extension alignment; Green's formulation +* [ksw2_extz2_sse.c](ksw2_extz2_sse.c): global and extension with SSE intrinsics; Suzuki's +* [ksw2_extd.c](ksw2_extd.c): global and extension alignment, dual gap cost; Green's formulation +* [ksw2_extd2_sse.c](ksw2_extd2_sse.c): global and extension, dual gap cost, with SSE intrinsics; Suzuki's + +Users are encouraged to copy the header file `ksw2.h` and relevant +`ksw2_*.c` file to their own source code trees. On x86 CPUs with SSE2 +intrinsics, `ksw2_extz2_sse.c` is recommended in general. It supports global +alignment, alignment extension with Z-drop, score-only alignment, global-only +alignment and right-aligned CIGARs. `ksw2_gg*.c` are mostly for demonstration +and comparison purposes. They are annotated with more comments and easier to +understand than `ksw2_ext*.c`. Header file [ksw2.h](ksw2.h) contains brief +documentations. TeX file [ksw2.tex](tex/ksw2.tex) gives brief derivation. + +To compile the test program `ksw-test`, just type `make`. It takes the +advantage of SSE4.1 when available. To compile with SSE2 only, use `make +sse2=1` instead. If you have installed [parasail][para], use `make +parasail=prefix`, where `prefix` points to the parasail install directory (e.g. +`/usr/local`). + +The following shows a complete example about how to use the library. +```c +#include +#include +#include "ksw2.h" + +void align(const char *tseq, const char *qseq, int sc_mch, int sc_mis, int gapo, int gape) +{ + int i, a = sc_mch, b = sc_mis < 0? sc_mis : -sc_mis; // a>0 and b<0 + int8_t mat[25] = { a,b,b,b,0, b,a,b,b,0, b,b,a,b,0, b,b,b,a,0, 0,0,0,0,0 }; + int tl = strlen(tseq), ql = strlen(qseq); + uint8_t *ts, *qs, c[256]; + ksw_extz_t ez; + + memset(&ez, 0, sizeof(ksw_extz_t)); + memset(c, 4, 256); + c['A'] = c['a'] = 0; c['C'] = c['c'] = 1; + c['G'] = c['g'] = 2; c['T'] = c['t'] = 3; // build the encoding table + ts = (uint8_t*)malloc(tl); + qs = (uint8_t*)malloc(ql); + for (i = 0; i < tl; ++i) ts[i] = c[(uint8_t)tseq[i]]; // encode to 0/1/2/3 + for (i = 0; i < ql; ++i) qs[i] = c[(uint8_t)qseq[i]]; + ksw_extz(0, ql, qs, tl, ts, 5, mat, gapo, gape, -1, -1, 0, &ez); + for (i = 0; i < ez.n_cigar; ++i) // print CIGAR + printf("%d%c", ez.cigar[i]>>4, "MID"[ez.cigar[i]&0xf]); + putchar('\n'); + free(ez.cigar); free(ts); free(qs); +} + +int main(int argc, char *argv[]) +{ + align("ATAGCTAGCTAGCAT", "AGCTAcCGCAT", 1, -2, 2, 1); + return 0; +} +``` + +## Performance Analysis + +The following table shows timing on two pairs of long sequences (both in the +"test" directory). + +|Data set|Command line options |Time (s)|CIGAR|Ext|SIMD|Source | +|:-------|:--------------------------------|:-------|:---:|:-:|:--:|:-------| +|50k |-t gg -s |7.3 |N |N |N |ksw2 | +| |-t gg2 -s |19.8 |N |N |N |ksw2 | +| |-t extz -s |9.2 |N |Y |N |ksw2 | +| |-t ps\_nw |9.8 |N |N |N |parasail| +| |-t ps\_nw\_striped\_sse2\_128\_32|2.9 |N |N |SSE2|parasail| +| |-t ps\_nw\_striped\_32 |2.2 |N |N |SSE4|parasail| +| |-t ps\_nw\_diag\_32 |3.0 |N |N |SSE4|parasail| +| |-t ps\_nw\_scan\_32 |3.0 |N |N |SSE4|parasail| +| |-t extz2\_sse -sg |0.96 |N |N |SSE2|ksw2 | +| |-t extz2\_sse -sg |0.84 |N |N |SSE4|ksw2 | +| |-t extz2\_sse -s |3.0 |N |Y |SSE2|ksw2 | +| |-t extz2\_sse -s |2.7 |N |Y |SSE4|ksw2 | +|16.5k |-t gg -s |0.84 |N |N |N |ksw2 | +| |-t gg |1.6 |Y |N |N |ksw2 | +| |-t gg2 |3.3 |Y |N |N |ksw2 | +| |-t extz |2.0 |Y |Y |N |ksw2 | +| |-t extz2\_sse |0.40 |Y |Y |SSE4|ksw2 | +| |-t extz2\_sse -g |0.18 |Y |N |SSE4|ksw2 | + +The standard DP formulation is about twice as fast as Suzuki's diagonal +formulation (`-tgg` vs `-tgg2`), but SSE-based diagonal formulation +is several times faster than the standard DP. If we only want to compute one +global alignment score, we can use 16-way parallelization in the entire inner +loop. For extension alignment, though, we need to keep an array of 32-bit +scores and have to use 4-way parallelization for part of the inner loop. This +significantly reduces performance (`-sg` vs `-s`). KSW2 is faster than +parasail partly because the former uses one score for all matches and another +score for all mismatches. For diagonal formulations, vectorization is more +complex given a generic scoring matrix. + +It is possible to further accelerate global alignment with dynamic banding as +is implemented in [edlib][edlib]. However, it is not as effective for extension +alignment. Another idea is [adaptive banding][adap-band], which might be worth +trying at some point. + +## Alternative Libraries + +|Library |CIGAR|Intra-seq|Affine-gap|Local |Global |Glocal |Extension| +|:---------------|:---:|:-------:|:--------:|:-------:|:-------:|:-------:|:-------:| +|[edlib][edlib] |Yes |Yes |No |Very fast|Very fast|Very fast|N/A | +|[KSW][klib] |Yes |Yes |Yes |Fast |Slow |N/A |Slow | +|KSW2 |Yes |Yes |Yes/dual |N/A |Fast |N/A |Fast | +|[libgaba][gaba] |Yes |Yes |Yes |N/A? |N/A? |N/A? |Fast | +|[libssa][ssa] |No |No? |Yes |Fast |Fast |N/A |N/A | +|[Opal][opal] |No |No |Yes |Fast |Fast |Fast |N/A | +|[Parasail][para]|No |Yes |Yes |Fast |Fast |Fast |N/A | +|[SeqAn][seqan] |Yes |Yes |Yes |Slow |Slow |Slow |N/A | +|[SSW][ssw] |Yes |Yes |Yes |Fast |N/A |N/A |N/A | +|[SWIPE][swipe] |Yes |No |Yes |Fast |N/A? |N/A? |N/A | +|[SWPS3][swps3] |No |Yes |Yes |Fast |N/A? |N/A |N/A | + + + +[hs]: https://github.com/ocxtal +[hs-eq]: https://github.com/ocxtal/diffbench +[edlib]: https://github.com/Martinsos/edlib +[klib]: https://github.com/attractivechaos/klib +[para]: https://github.com/jeffdaily/parasail +[opal]: https://github.com/Martinsos/opal +[ssw]: https://github.com/mengyao/Complete-Striped-Smith-Waterman-Library +[ssa]: https://github.com/RonnySoak/libssa +[gaba]: https://github.com/ocxtal/libgaba +[adap-band]: https://github.com/ocxtal/adaptivebandbench +[swipe]: https://github.com/torognes/swipe +[swps3]: http://lab.dessimoz.org/swps3/ +[seqan]: http://seqan.de +[piece-affine]: https://www.ncbi.nlm.nih.gov/pubmed/2165832 +[mm2]: https://github.com/lh3/minimap2 diff --git a/lib/mmseqs/lib/microtar/README.md b/lib/mmseqs/lib/microtar/README.md index 42acf49..8338c8f 100755 --- a/lib/mmseqs/lib/microtar/README.md +++ b/lib/mmseqs/lib/microtar/README.md @@ -1,98 +1,8 @@ # microtar A lightweight tar library written in ANSI C - -## Basic Usage -The library consists of `microtar.c` and `microtar.h`. These two files can be -dropped into an existing project and compiled along with it. - - -#### Reading -```c -mtar_t tar; -mtar_header_t h; -char *p; - -/* Open archive for reading */ -mtar_open(&tar, "test.tar", "r"); - -/* Print all file names and sizes */ -while ( (mtar_read_header(&tar, &h)) != MTAR_ENULLRECORD ) { - printf("%s (%d bytes)\n", h.name, h.size); - mtar_next(&tar); -} - -/* Load and print contents of file "test.txt" */ -mtar_find(&tar, "test.txt", &h); -p = calloc(1, h.size + 1); -mtar_read_data(&tar, p, h.size); -printf("%s", p); -free(p); - -/* Close archive */ -mtar_close(&tar); -``` - -#### Writing -```c -mtar_t tar; -const char *str1 = "Hello world"; -const char *str2 = "Goodbye world"; - -/* Open archive for writing */ -mtar_open(&tar, "test.tar", "w"); - -/* Write strings to files `test1.txt` and `test2.txt` */ -mtar_write_file_header(&tar, "test1.txt", strlen(str1)); -mtar_write_data(&tar, str1, strlen(str1)); -mtar_write_file_header(&tar, "test2.txt", strlen(str2)); -mtar_write_data(&tar, str2, strlen(str2)); - -/* Finalize -- this needs to be the last thing done before closing */ -mtar_finalize(&tar); - -/* Close archive */ -mtar_close(&tar); -``` - - -## Error handling -All functions which return an `int` will return `MTAR_ESUCCESS` if the operation -is successful. If an error occurs an error value less-than-zero will be -returned; this value can be passed to the function `mtar_strerror()` to get its -corresponding error string. - - -## Wrapping a stream -If you want to read or write from something other than a file, the `mtar_t` -struct can be manually initialized with your own callback functions and a -`stream` pointer. - -All callback functions are passed a pointer to the `mtar_t` struct as their -first argument. They should return `MTAR_ESUCCESS` if the operation succeeds -without an error, or an integer below zero if an error occurs. - -After the `stream` field has been set, all required callbacks have been set and -all unused fields have been zeroset the `mtar_t` struct can be safely used with -the microtar functions. `mtar_open` *should not* be called if the `mtar_t` -struct was initialized manually. - -#### Reading -The following callbacks should be set for reading an archive from a stream: - -Name | Arguments | Description ---------|------------------------------------------|--------------------------- -`read` | `mtar_t *tar, void *data, unsigned size` | Read data from the stream -`seek` | `mtar_t *tar, unsigned pos` | Set the position indicator -`close` | `mtar_t *tar` | Close the stream - -#### Writing -The following callbacks should be set for writing an archive to a stream: - -Name | Arguments | Description ---------|------------------------------------------------|--------------------- -`write` | `mtar_t *tar, const void *data, unsigned size` | Write data to the stream - +This library was adapted from the original microtar (https://github.com/rxi/microtar) +to be read-only and support fast seeking. ## License This library is free software; you can redistribute it and/or modify it under diff --git a/lib/mmseqs/lib/simd/simd.h b/lib/mmseqs/lib/simd/simd.h index d0d83c4..425e5d5 100644 --- a/lib/mmseqs/lib/simd/simd.h +++ b/lib/mmseqs/lib/simd/simd.h @@ -219,6 +219,8 @@ typedef __m256i simd_int; #define simdi32_mul(x,y) _mm256_mullo_epi32(x,y) #define simdi32_max(x,y) _mm256_max_epi32(x,y) #define simdi16_max(x,y) _mm256_max_epi16(x,y) +#define simdi32_insert(x,y,z) _mm256_insert_epi32(x,y,z) +#define simdi32_extract(x,y) _mm256_extract_epi32(x,y) #define simdi16_hmax(x) simd_hmax16_avx(x) #define simdui8_max(x,y) _mm256_max_epu8(x,y) #define simdi8_hmax(x) simd_hmax8_avx(x) @@ -234,6 +236,7 @@ typedef __m256i simd_int; #define simdi16_shuffle(x,y) _mm256_shuffle_epi16(x,y) #define simdi8_shuffle(x,y) _mm256_shuffle_epi8(x,y) #define simdi_setzero() _mm256_setzero_si256() +#define simdi8_blend(x,y,z) _mm256_blendv_epi8(x,y,z) #define simdi32_gt(x,y) _mm256_cmpgt_epi32(x,y) #define simdi8_gt(x,y) _mm256_cmpgt_epi8(x,y) #define simdi16_gt(x,y) _mm256_cmpgt_epi16(x,y) @@ -253,6 +256,8 @@ typedef __m256i simd_int; #define SIMD_MOVEMASK_MAX 0xffffffff #define simdi8_movemask(x) _mm256_movemask_epi8(x) #define simdi16_extract(x,y) extract_epi16(x,y) +#define simdi32_pack(x,y) _mm256_packs_epi32(x,y) +#define simdi16_pack(x,y) _mm256_packs_epi16(x,y) #define simdi16_slli(x,y) _mm256_slli_epi16(x,y) // shift integers in a left by y #define simdi16_srli(x,y) _mm256_srli_epi16(x,y) // shift integers in a right by y #define simdi32_slli(x,y) _mm256_slli_epi32(x,y) // shift integers in a left by y @@ -407,6 +412,8 @@ typedef __m128i simd_int; #define simdi32_mul(x,y) _mm_mullo_epi32(x,y) // SSE4.1 #define simdi32_max(x,y) _mm_max_epi32(x,y) // SSE4.1 #define simdi16_max(x,y) _mm_max_epi16(x,y) +#define simdi32_insert(x,y,z) _mm_insert_epi32(x,y,z) +#define simdi32_extract(x,y) _mm_extract_epi32(x,y) #define simdi16_hmax(x) simd_hmax16_sse(x) #define simdui8_max(x,y) _mm_max_epu8(x,y) #define simdi8_hmax(x) simd_hmax8_sse(x) @@ -422,6 +429,7 @@ typedef __m128i simd_int; #define simdi16_shuffle(x,y) _mm_shuffle_epi16(x,y) #define simdi8_shuffle(x,y) _mm_shuffle_epi8(x,y) #define simdi_setzero() _mm_setzero_si128() +#define simdi8_blend(x,y,z) _mm_blendv_epi8(x,y,z) #define simdi32_gt(x,y) _mm_cmpgt_epi32(x,y) #define simdi8_gt(x,y) _mm_cmpgt_epi8(x,y) #define simdi32_eq(x,y) _mm_cmpeq_epi32(x,y) @@ -440,6 +448,8 @@ typedef __m128i simd_int; #define SIMD_MOVEMASK_MAX 0xffff #define simdi8_movemask(x) _mm_movemask_epi8(x) #define simdi16_extract(x,y) extract_epi16(x,y) +#define simdi32_pack(x,y) _mm_packs_epi32(x,y) +#define simdi16_pack(x,y) _mm_packs_epi16(x,y) #define simdi16_slli(x,y) _mm_slli_epi16(x,y) // shift integers in a left by y #define simdi16_srli(x,y) _mm_srli_epi16(x,y) // shift integers in a right by y #define simdi32_slli(x,y) _mm_slli_epi32(x,y) // shift integers in a left by y diff --git a/lib/mmseqs/lib/simde/simde/arm/neon.h b/lib/mmseqs/lib/simde/simde/arm/neon.h index 86506b8..6a09adf 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon.h @@ -29,13 +29,19 @@ #include "neon/types.h" +#include "neon/aba.h" #include "neon/abd.h" +#include "neon/abdl.h" #include "neon/abs.h" #include "neon/add.h" #include "neon/addl.h" +#include "neon/addlv.h" #include "neon/addl_high.h" +#include "neon/addv.h" #include "neon/addw.h" +#include "neon/addw_high.h" #include "neon/and.h" +#include "neon/bic.h" #include "neon/bsl.h" #include "neon/cagt.h" #include "neon/ceq.h" @@ -46,43 +52,63 @@ #include "neon/cgtz.h" #include "neon/cle.h" #include "neon/clez.h" +#include "neon/cls.h" #include "neon/clt.h" #include "neon/cltz.h" +#include "neon/clz.h" +#include "neon/cnt.h" #include "neon/cvt.h" #include "neon/combine.h" #include "neon/create.h" #include "neon/dot.h" #include "neon/dot_lane.h" +#include "neon/dup_lane.h" #include "neon/dup_n.h" #include "neon/eor.h" #include "neon/ext.h" #include "neon/get_high.h" #include "neon/get_lane.h" #include "neon/get_low.h" +#include "neon/hadd.h" +#include "neon/hsub.h" #include "neon/ld1.h" #include "neon/ld3.h" #include "neon/ld4.h" #include "neon/max.h" +#include "neon/maxnm.h" +#include "neon/maxv.h" #include "neon/min.h" +#include "neon/minnm.h" +#include "neon/minv.h" #include "neon/mla.h" #include "neon/mla_n.h" #include "neon/mlal.h" #include "neon/mlal_high.h" +#include "neon/mlal_n.h" +#include "neon/mls.h" +#include "neon/mlsl.h" +#include "neon/mlsl_high.h" +#include "neon/mlsl_n.h" #include "neon/movl.h" #include "neon/movl_high.h" #include "neon/movn.h" +#include "neon/movn_high.h" #include "neon/mul.h" +#include "neon/mul_lane.h" #include "neon/mul_n.h" #include "neon/mull.h" +#include "neon/mull_high.h" #include "neon/mull_n.h" #include "neon/mvn.h" #include "neon/neg.h" +#include "neon/orn.h" #include "neon/orr.h" #include "neon/padal.h" #include "neon/padd.h" #include "neon/paddl.h" #include "neon/pmax.h" #include "neon/pmin.h" +#include "neon/qabs.h" #include "neon/qadd.h" #include "neon/qdmulh.h" #include "neon/qdmull.h" @@ -91,6 +117,9 @@ #include "neon/qmovn.h" #include "neon/qmovun.h" #include "neon/qmovn_high.h" +#include "neon/qneg.h" +#include "neon/qsub.h" +#include "neon/qshl.h" #include "neon/qtbl.h" #include "neon/qtbx.h" #include "neon/rbit.h" @@ -99,6 +128,7 @@ #include "neon/rev32.h" #include "neon/rev64.h" #include "neon/rhadd.h" +#include "neon/rnd.h" #include "neon/rshl.h" #include "neon/rshr_n.h" #include "neon/rsra_n.h" @@ -113,12 +143,15 @@ #include "neon/st4.h" #include "neon/sub.h" #include "neon/subl.h" +#include "neon/subw.h" +#include "neon/subw_high.h" #include "neon/tbl.h" #include "neon/tbx.h" #include "neon/trn.h" #include "neon/trn1.h" #include "neon/trn2.h" #include "neon/tst.h" +#include "neon/uqadd.h" #include "neon/uzp.h" #include "neon/uzp1.h" #include "neon/uzp2.h" diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/aba.h b/lib/mmseqs/lib/simde/simde/arm/neon/aba.h new file mode 100644 index 0000000..99fb9a0 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/aba.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_ABA_H) +#define SIMDE_ARM_NEON_ABA_H + +#include "abd.h" +#include "add.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vaba_s8(simde_int8x8_t a, simde_int8x8_t b, simde_int8x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaba_s8(a, b, c); + #else + return simde_vadd_s8(simde_vabd_s8(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaba_s8 + #define vaba_s8(a, b, c) simde_vaba_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vaba_s16(simde_int16x4_t a, simde_int16x4_t b, simde_int16x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaba_s16(a, b, c); + #else + return simde_vadd_s16(simde_vabd_s16(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaba_s16 + #define vaba_s16(a, b, c) simde_vaba_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vaba_s32(simde_int32x2_t a, simde_int32x2_t b, simde_int32x2_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaba_s32(a, b, c); + #else + return simde_vadd_s32(simde_vabd_s32(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaba_s32 + #define vaba_s32(a, b, c) simde_vaba_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vaba_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaba_u8(a, b, c); + #else + return simde_vadd_u8(simde_vabd_u8(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaba_u8 + #define vaba_u8(a, b, c) simde_vaba_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vaba_u16(simde_uint16x4_t a, simde_uint16x4_t b, simde_uint16x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaba_u16(a, b, c); + #else + return simde_vadd_u16(simde_vabd_u16(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaba_u16 + #define vaba_u16(a, b, c) simde_vaba_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vaba_u32(simde_uint32x2_t a, simde_uint32x2_t b, simde_uint32x2_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaba_u32(a, b, c); + #else + return simde_vadd_u32(simde_vabd_u32(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaba_u32 + #define vaba_u32(a, b, c) simde_vaba_u32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vabaq_s8(simde_int8x16_t a, simde_int8x16_t b, simde_int8x16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabaq_s8(a, b, c); + #else + return simde_vaddq_s8(simde_vabdq_s8(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabaq_s8 + #define vabaq_s8(a, b, c) simde_vabaq_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vabaq_s16(simde_int16x8_t a, simde_int16x8_t b, simde_int16x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabaq_s16(a, b, c); + #else + return simde_vaddq_s16(simde_vabdq_s16(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabaq_s16 + #define vabaq_s16(a, b, c) simde_vabaq_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vabaq_s32(simde_int32x4_t a, simde_int32x4_t b, simde_int32x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabaq_s32(a, b, c); + #else + return simde_vaddq_s32(simde_vabdq_s32(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabaq_s32 + #define vabaq_s32(a, b, c) simde_vabaq_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vabaq_u8(simde_uint8x16_t a, simde_uint8x16_t b, simde_uint8x16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabaq_u8(a, b, c); + #else + return simde_vaddq_u8(simde_vabdq_u8(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabaq_u8 + #define vabaq_u8(a, b, c) simde_vabaq_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vabaq_u16(simde_uint16x8_t a, simde_uint16x8_t b, simde_uint16x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabaq_u16(a, b, c); + #else + return simde_vaddq_u16(simde_vabdq_u16(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabaq_u16 + #define vabaq_u16(a, b, c) simde_vabaq_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vabaq_u32(simde_uint32x4_t a, simde_uint32x4_t b, simde_uint32x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabaq_u32(a, b, c); + #else + return simde_vaddq_u32(simde_vabdq_u32(b, c), a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabaq_u32 + #define vabaq_u32(a, b, c) simde_vabaq_u32((a), (b), (c)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ABA_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/abdl.h b/lib/mmseqs/lib/simde/simde/arm/neon/abdl.h new file mode 100644 index 0000000..0957344 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/abdl.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_ABDL_H) +#define SIMDE_ARM_NEON_ABDL_H + +#include "abs.h" +#include "subl.h" +#include "movl.h" +#include "reinterpret.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vabdl_s8(simde_int8x8_t a, simde_int8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabdl_s8(a, b); + #else + return simde_vabsq_s16(simde_vsubl_s8(a, b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabdl_s8 + #define vabdl_s8(a, b) simde_vabdl_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vabdl_s16(simde_int16x4_t a, simde_int16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabdl_s16(a, b); + #else + return simde_vabsq_s32(simde_vsubl_s16(a, b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabdl_s16 + #define vabdl_s16(a, b) simde_vabdl_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vabdl_s32(simde_int32x2_t a, simde_int32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabdl_s32(a, b); + #else + return simde_vabsq_s64(simde_vsubl_s32(a, b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabdl_s32 + #define vabdl_s32(a, b) simde_vabdl_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vabdl_u8(simde_uint8x8_t a, simde_uint8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabdl_u8(a, b); + #else + return simde_vreinterpretq_u16_s16( + simde_vabsq_s16( + simde_vsubq_s16( + simde_vreinterpretq_s16_u16(simde_vmovl_u8(a)), + simde_vreinterpretq_s16_u16(simde_vmovl_u8(b)) + ) + ) + ); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabdl_u8 + #define vabdl_u8(a, b) simde_vabdl_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vabdl_u16(simde_uint16x4_t a, simde_uint16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabdl_u16(a, b); + #else + return simde_vreinterpretq_u32_s32( + simde_vabsq_s32( + simde_vsubq_s32( + simde_vreinterpretq_s32_u32(simde_vmovl_u16(a)), + simde_vreinterpretq_s32_u32(simde_vmovl_u16(b)) + ) + ) + ); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabdl_u16 + #define vabdl_u16(a, b) simde_vabdl_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vabdl_u32(simde_uint32x2_t a, simde_uint32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vabdl_u32(a, b); + #else + return simde_vreinterpretq_u64_s64( + simde_vabsq_s64( + simde_vsubq_s64( + simde_vreinterpretq_s64_u64(simde_vmovl_u32(a)), + simde_vreinterpretq_s64_u64(simde_vmovl_u32(b)) + ) + ) + ); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vabdl_u32 + #define vabdl_u32(a, b) simde_vabdl_u32((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ABDL_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/abs.h b/lib/mmseqs/lib/simde/simde/arm/neon/abs.h index c4e45f3..922a7f8 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/abs.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/abs.h @@ -361,7 +361,9 @@ simde_int64x2_t simde_vabsq_s64(simde_int64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vabsq_s64(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(HEDLEY_IBM_VERSION) + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbslq_s64(vreinterpretq_u64_s64(vshrq_n_s64(a, 63)), vsubq_s64(vdupq_n_s64(0), a), a); + #elif defined(SIMDE_POWER_ALTIVEC_P64_NATIVE) && !defined(HEDLEY_IBM_VERSION) return vec_abs(a); #else simde_int64x2_private diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/addlv.h b/lib/mmseqs/lib/simde/simde/arm/neon/addlv.h new file mode 100644 index 0000000..79d9451 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/addlv.h @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_ADDLV_H) +#define SIMDE_ARM_NEON_ADDLV_H + +#include "types.h" +#include "movl.h" +#include "addv.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vaddlv_s8(simde_int8x8_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlv_s8(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddvq_s16(simde_vmovl_s8(a)); + #else + simde_int8x8_private a_ = simde_int8x8_to_private(a); + int16_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlv_s8 + #define vaddlv_s8(a) simde_vaddlv_s8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vaddlv_s16(simde_int16x4_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlv_s16(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddvq_s32(simde_vmovl_s16(a)); + #else + simde_int16x4_private a_ = simde_int16x4_to_private(a); + int32_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlv_s16 + #define vaddlv_s16(a) simde_vaddlv_s16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vaddlv_s32(simde_int32x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlv_s32(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddvq_s64(simde_vmovl_s32(a)); + #else + simde_int32x2_private a_ = simde_int32x2_to_private(a); + int64_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlv_s32 + #define vaddlv_s32(a) simde_vaddlv_s32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vaddlv_u8(simde_uint8x8_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlv_u8(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddvq_u16(simde_vmovl_u8(a)); + #else + simde_uint8x8_private a_ = simde_uint8x8_to_private(a); + uint16_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlv_u8 + #define vaddlv_u8(a) simde_vaddlv_u8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vaddlv_u16(simde_uint16x4_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlv_u16(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddvq_u32(simde_vmovl_u16(a)); + #else + simde_uint16x4_private a_ = simde_uint16x4_to_private(a); + uint32_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlv_u16 + #define vaddlv_u16(a) simde_vaddlv_u16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vaddlv_u32(simde_uint32x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlv_u32(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddvq_u64(simde_vmovl_u32(a)); + #else + simde_uint32x2_private a_ = simde_uint32x2_to_private(a); + uint64_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlv_u32 + #define vaddlv_u32(a) simde_vaddlv_u32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vaddlvq_s8(simde_int8x16_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlvq_s8(a); + #else + simde_int8x16_private a_ = simde_int8x16_to_private(a); + int16_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlvq_s8 + #define vaddlvq_s8(a) simde_vaddlvq_s8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vaddlvq_s16(simde_int16x8_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlvq_s16(a); + #else + simde_int16x8_private a_ = simde_int16x8_to_private(a); + int32_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlvq_s16 + #define vaddlvq_s16(a) simde_vaddlvq_s16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vaddlvq_s32(simde_int32x4_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlvq_s32(a); + #else + simde_int32x4_private a_ = simde_int32x4_to_private(a); + int64_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlvq_s32 + #define vaddlvq_s32(a) simde_vaddlvq_s32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vaddlvq_u8(simde_uint8x16_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlvq_u8(a); + #else + simde_uint8x16_private a_ = simde_uint8x16_to_private(a); + uint16_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlvq_u8 + #define vaddlvq_u8(a) simde_vaddlvq_u8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vaddlvq_u16(simde_uint16x8_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlvq_u16(a); + #else + simde_uint16x8_private a_ = simde_uint16x8_to_private(a); + uint32_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlvq_u16 + #define vaddlvq_u16(a) simde_vaddlvq_u16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vaddlvq_u32(simde_uint32x4_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddlvq_u32(a); + #else + simde_uint32x4_private a_ = simde_uint32x4_to_private(a); + uint64_t r = 0; + + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddlvq_u32 + #define vaddlvq_u32(a) simde_vaddlvq_u32(a) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ADDLV_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/addv.h b/lib/mmseqs/lib/simde/simde/arm/neon/addv.h new file mode 100644 index 0000000..bcc082b --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/addv.h @@ -0,0 +1,447 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_ADDV_H) +#define SIMDE_ARM_NEON_ADDV_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vaddv_f32(simde_float32x2_t a) { + simde_float32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddv_f32(a); + #else + simde_float32x2_private a_ = simde_float32x2_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddv_f32 + #define vaddv_f32(v) simde_vaddv_f32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vaddv_s8(simde_int8x8_t a) { + int8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddv_s8(a); + #else + simde_int8x8_private a_ = simde_int8x8_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddv_s8 + #define vaddv_s8(v) simde_vaddv_s8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vaddv_s16(simde_int16x4_t a) { + int16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddv_s16(a); + #else + simde_int16x4_private a_ = simde_int16x4_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddv_s16 + #define vaddv_s16(v) simde_vaddv_s16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vaddv_s32(simde_int32x2_t a) { + int32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddv_s32(a); + #else + simde_int32x2_private a_ = simde_int32x2_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddv_s32 + #define vaddv_s32(v) simde_vaddv_s32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_vaddv_u8(simde_uint8x8_t a) { + uint8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddv_u8(a); + #else + simde_uint8x8_private a_ = simde_uint8x8_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddv_u8 + #define vaddv_u8(v) simde_vaddv_u8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vaddv_u16(simde_uint16x4_t a) { + uint16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddv_u16(a); + #else + simde_uint16x4_private a_ = simde_uint16x4_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddv_u16 + #define vaddv_u16(v) simde_vaddv_u16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vaddv_u32(simde_uint32x2_t a) { + uint32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddv_u32(a); + #else + simde_uint32x2_private a_ = simde_uint32x2_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddv_u32 + #define vaddv_u32(v) simde_vaddv_u32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vaddvq_f32(simde_float32x4_t a) { + simde_float32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_f32(a); + #else + simde_float32x4_private a_ = simde_float32x4_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddvq_f32 + #define vaddvq_f32(v) simde_vaddvq_f32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vaddvq_f64(simde_float64x2_t a) { + simde_float64_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_f64(a); + #else + simde_float64x2_private a_ = simde_float64x2_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddvq_f64 + #define vaddvq_f64(v) simde_vaddvq_f64(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vaddvq_s8(simde_int8x16_t a) { + int8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_s8(a); + #else + simde_int8x16_private a_ = simde_int8x16_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddvq_s8 + #define vaddvq_s8(v) simde_vaddvq_s8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vaddvq_s16(simde_int16x8_t a) { + int16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_s16(a); + #else + simde_int16x8_private a_ = simde_int16x8_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddvq_s16 + #define vaddvq_s16(v) simde_vaddvq_s16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vaddvq_s32(simde_int32x4_t a) { + int32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_s32(a); + #else + simde_int32x4_private a_ = simde_int32x4_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddvq_s32 + #define vaddvq_s32(v) simde_vaddvq_s32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vaddvq_s64(simde_int64x2_t a) { + int64_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_s64(a); + #else + simde_int64x2_private a_ = simde_int64x2_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddvq_s64 + #define vaddvq_s64(v) simde_vaddvq_s64(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_vaddvq_u8(simde_uint8x16_t a) { + uint8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_u8(a); + #else + simde_uint8x16_private a_ = simde_uint8x16_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddvq_u8 + #define vaddvq_u8(v) simde_vaddvq_u8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vaddvq_u16(simde_uint16x8_t a) { + uint16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_u16(a); + #else + simde_uint16x8_private a_ = simde_uint16x8_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddvq_u16 + #define vaddvq_u16(v) simde_vaddvq_u16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vaddvq_u32(simde_uint32x4_t a) { + uint32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_u32(a); + #else + simde_uint32x4_private a_ = simde_uint32x4_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddvq_u32 + #define vaddvq_u32(v) simde_vaddvq_u32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vaddvq_u64(simde_uint64x2_t a) { + uint64_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_u64(a); + #else + simde_uint64x2_private a_ = simde_uint64x2_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(+:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r += a_.values[i]; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddvq_u64 + #define vaddvq_u64(v) simde_vaddvq_u64(v) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ADDV_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/addw_high.h b/lib/mmseqs/lib/simde/simde/arm/neon/addw_high.h new file mode 100644 index 0000000..620120c --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/addw_high.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_ADDW_HIGH_H) +#define SIMDE_ARM_NEON_ADDW_HIGH_H + +#include "types.h" +#include "movl.h" +#include "add.h" +#include "get_high.h" +#include "get_low.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vaddw_high_s8(simde_int16x8_t a, simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddw_high_s8(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddq_s16(a, simde_vmovl_s8(simde_vget_high_s8(b))); + #else + simde_int16x8_private r_; + simde_int16x8_private a_ = simde_int16x8_to_private(a); + simde_int8x16_private b_ = simde_int8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddw_high_s8 + #define vaddw_high_s8(a, b) simde_vaddw_high_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vaddw_high_s16(simde_int32x4_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddw_high_s16(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddq_s32(a, simde_vmovl_s16(simde_vget_high_s16(b))); + #else + simde_int32x4_private r_; + simde_int32x4_private a_ = simde_int32x4_to_private(a); + simde_int16x8_private b_ = simde_int16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddw_high_s16 + #define vaddw_high_s16(a, b) simde_vaddw_high_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vaddw_high_s32(simde_int64x2_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddw_high_s32(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddq_s64(a, simde_vmovl_s32(simde_vget_high_s32(b))); + #else + simde_int64x2_private r_; + simde_int64x2_private a_ = simde_int64x2_to_private(a); + simde_int32x4_private b_ = simde_int32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddw_high_s32 + #define vaddw_high_s32(a, b) simde_vaddw_high_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vaddw_high_u8(simde_uint16x8_t a, simde_uint8x16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddw_high_u8(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddq_u16(a, simde_vmovl_u8(simde_vget_high_u8(b))); + #else + simde_uint16x8_private r_; + simde_uint16x8_private a_ = simde_uint16x8_to_private(a); + simde_uint8x16_private b_ = simde_uint8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddw_high_u8 + #define vaddw_high_u8(a, b) simde_vaddw_high_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vaddw_high_u16(simde_uint32x4_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddw_high_u16(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddq_u32(a, simde_vmovl_u16(simde_vget_high_u16(b))); + #else + simde_uint32x4_private r_; + simde_uint32x4_private a_ = simde_uint32x4_to_private(a); + simde_uint16x8_private b_ = simde_uint16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddw_high_u16 + #define vaddw_high_u16(a, b) simde_vaddw_high_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vaddw_high_u32(simde_uint64x2_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddw_high_u32(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vaddq_u64(a, simde_vmovl_u32(simde_vget_high_u32(b))); + #else + simde_uint64x2_private r_; + simde_uint64x2_private a_ = simde_uint64x2_to_private(a); + simde_uint32x4_private b_ = simde_uint32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + + return simde_uint64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vaddw_high_u32 + #define vaddw_high_u32(a, b) simde_vaddw_high_u32((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ADDW_HIGH_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/bic.h b/lib/mmseqs/lib/simde/simde/arm/neon/bic.h new file mode 100644 index 0000000..4ceba1b --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/bic.h @@ -0,0 +1,472 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_BIC_H) +#define SIMDE_ARM_NEON_BIC_H + +#include "dup_n.h" +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vbic_s8(simde_int8x8_t a, simde_int8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbic_s8(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_andnot_si64(b, a); + #else + simde_int8x8_private + a_ = simde_int8x8_to_private(a), + b_ = simde_int8x8_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_int8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbic_s8 + #define vbic_s8(a, b) simde_vbic_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vbic_s16(simde_int16x4_t a, simde_int16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbic_s16(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_andnot_si64(b, a); + #else + simde_int16x4_private + a_ = simde_int16x4_to_private(a), + b_ = simde_int16x4_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_int16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbic_s16 + #define vbic_s16(a, b) simde_vbic_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vbic_s32(simde_int32x2_t a, simde_int32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbic_s32(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_andnot_si64(b, a); + #else + simde_int32x2_private + a_ = simde_int32x2_to_private(a), + b_ = simde_int32x2_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_int32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbic_s32 + #define vbic_s32(a, b) simde_vbic_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_vbic_s64(simde_int64x1_t a, simde_int64x1_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbic_s64(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_andnot_si64(b, a); + #else + simde_int64x1_private + a_ = simde_int64x1_to_private(a), + b_ = simde_int64x1_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_int64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbic_s64 + #define vbic_s64(a, b) simde_vbic_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vbic_u8(simde_uint8x8_t a, simde_uint8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbic_u8(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_andnot_si64(b, a); + #else + simde_uint8x8_private + a_ = simde_uint8x8_to_private(a), + b_ = simde_uint8x8_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_uint8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbic_u8 + #define vbic_u8(a, b) simde_vbic_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vbic_u16(simde_uint16x4_t a, simde_uint16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbic_u16(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_andnot_si64(b, a); + #else + simde_uint16x4_private + a_ = simde_uint16x4_to_private(a), + b_ = simde_uint16x4_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbic_u16 + #define vbic_u16(a, b) simde_vbic_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vbic_u32(simde_uint32x2_t a, simde_uint32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbic_u32(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_andnot_si64(b, a); + #else + simde_uint32x2_private + a_ = simde_uint32x2_to_private(a), + b_ = simde_uint32x2_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_uint32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbic_u32 + #define vbic_u32(a, b) simde_vbic_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1_t +simde_vbic_u64(simde_uint64x1_t a, simde_uint64x1_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbic_u64(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_andnot_si64(b, a); + #else + simde_uint64x1_private + a_ = simde_uint64x1_to_private(a), + b_ = simde_uint64x1_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_uint64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbic_u64 + #define vbic_u64(a, b) simde_vbic_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vbicq_s8(simde_int8x16_t a, simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbicq_s8(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_andnot_si128(b, a); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v128_andnot(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_andc(a, b); + #else + simde_int8x16_private + a_ = simde_int8x16_to_private(a), + b_ = simde_int8x16_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbicq_s8 + #define vbicq_s8(a, b) simde_vbicq_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vbicq_s16(simde_int16x8_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbicq_s16(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_andnot_si128(b, a); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v128_andnot(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_andc(a, b); + #else + simde_int16x8_private + a_ = simde_int16x8_to_private(a), + b_ = simde_int16x8_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbicq_s16 + #define vbicq_s16(a, b) simde_vbicq_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vbicq_s32(simde_int32x4_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbicq_s32(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_andnot_si128(b, a); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v128_andnot(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_andc(a, b); + #else + simde_int32x4_private + a_ = simde_int32x4_to_private(a), + b_ = simde_int32x4_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbicq_s32 + #define vbicq_s32(a, b) simde_vbicq_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vbicq_s64(simde_int64x2_t a, simde_int64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbicq_s64(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_andnot_si128(b, a); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v128_andnot(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_andc(a, b); + #else + simde_int64x2_private + a_ = simde_int64x2_to_private(a), + b_ = simde_int64x2_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbicq_s64 + #define vbicq_s64(a, b) simde_vbicq_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vbicq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbicq_u8(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_andnot_si128(b, a); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v128_andnot(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_andc(a, b); + #else + simde_uint8x16_private + a_ = simde_uint8x16_to_private(a), + b_ = simde_uint8x16_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_uint8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbicq_u8 + #define vbicq_u8(a, b) simde_vbicq_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vbicq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbicq_u16(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_andnot_si128(b, a); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v128_andnot(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_andc(a, b); + #else + simde_uint16x8_private + a_ = simde_uint16x8_to_private(a), + b_ = simde_uint16x8_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbicq_u16 + #define vbicq_u16(a, b) simde_vbicq_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vbicq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbicq_u32(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_andnot_si128(b, a); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v128_andnot(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_andc(a, b); + #else + simde_uint32x4_private + a_ = simde_uint32x4_to_private(a), + b_ = simde_uint32x4_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbicq_u32 + #define vbicq_u32(a, b) simde_vbicq_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vbicq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vbicq_u64(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_andnot_si128(b, a); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v128_andnot(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_andc(a, b); + #else + simde_uint64x2_private + a_ = simde_uint64x2_to_private(a), + b_ = simde_uint64x2_to_private(b), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + + return simde_uint64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbicq_u64 + #define vbicq_u64(a, b) simde_vbicq_u64((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_BIC_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/bsl.h b/lib/mmseqs/lib/simde/simde/arm/neon/bsl.h index 2f8af8a..dd519e3 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/bsl.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/bsl.h @@ -228,6 +228,8 @@ simde_vbslq_f32(simde_uint32x4_t a, simde_float32x4_t b, simde_float32x4_t c) { return vbslq_f32(a, b, c); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_bitselect(b, c, a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_castsi128_ps(_mm_ternarylogic_epi32(a, _mm_castps_si128(b), _mm_castps_si128(c), 0xca)); #else simde_uint8x16_t a_ = simde_vreinterpretq_u8_u32(a), @@ -249,6 +251,8 @@ simde_vbslq_f64(simde_uint64x2_t a, simde_float64x2_t b, simde_float64x2_t c) { return vbslq_f64(a, b, c); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_bitselect(b, c, a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_castsi128_pd(_mm_ternarylogic_epi32(a, _mm_castpd_si128(b), _mm_castpd_si128(c), 0xca)); #else simde_uint8x16_t a_ = simde_vreinterpretq_u8_u64(a), @@ -269,6 +273,8 @@ simde_vbslq_s8(simde_uint8x16_t a, simde_int8x16_t b, simde_int8x16_t c) { return vbslq_s8(a, b, c); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_bitselect(b, c, a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, c, 0xca); #else simde_uint8x16_t a_ = (a), @@ -291,6 +297,8 @@ simde_vbslq_s16(simde_uint16x8_t a, simde_int16x8_t b, simde_int16x8_t c) { return wasm_v128_bitselect(b, c, a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_sel(c, b, a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, c, 0xca); #else simde_uint8x16_t a_ = simde_vreinterpretq_u8_u16(a), @@ -311,6 +319,8 @@ simde_vbslq_s32(simde_uint32x4_t a, simde_int32x4_t b, simde_int32x4_t c) { return vbslq_s32(a, b, c); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_bitselect(b, c, a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, c, 0xca); #else simde_uint8x16_t a_ = simde_vreinterpretq_u8_u32(a), @@ -331,6 +341,8 @@ simde_vbslq_s64(simde_uint64x2_t a, simde_int64x2_t b, simde_int64x2_t c) { return vbslq_s64(a, b, c); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_bitselect(b, c, a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, c, 0xca); #else simde_uint8x16_t a_ = simde_vreinterpretq_u8_u64(a), @@ -353,6 +365,8 @@ simde_vbslq_u8(simde_uint8x16_t a, simde_uint8x16_t b, simde_uint8x16_t c) { return wasm_v128_bitselect(b, c, a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_sel(c, b, a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, c, 0xca); #else return simde_veorq_u8(c, simde_vandq_u8(simde_veorq_u8(c, b), a)); #endif @@ -369,6 +383,8 @@ simde_vbslq_u16(simde_uint16x8_t a, simde_uint16x8_t b, simde_uint16x8_t c) { return vbslq_u16(a, b, c); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_bitselect(b, c, a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, c, 0xca); #else simde_uint8x16_t a_ = simde_vreinterpretq_u8_u16(a), @@ -389,6 +405,8 @@ simde_vbslq_u32(simde_uint32x4_t a, simde_uint32x4_t b, simde_uint32x4_t c) { return vbslq_u32(a, b, c); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_bitselect(b, c, a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, c, 0xca); #else simde_uint8x16_t a_ = simde_vreinterpretq_u8_u32(a), @@ -409,6 +427,8 @@ simde_vbslq_u64(simde_uint64x2_t a, simde_uint64x2_t b, simde_uint64x2_t c) { return vbslq_u64(a, b, c); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_bitselect(b, c, a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, c, 0xca); #else simde_uint8x16_t a_ = simde_vreinterpretq_u8_u64(a), diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/cge.h b/lib/mmseqs/lib/simde/simde/arm/neon/cge.h index 7b35820..7373801 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/cge.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/cge.h @@ -209,6 +209,8 @@ simde_uint64x2_t simde_vcgeq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcgeq_s64(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vreinterpretq_u64_s32(vmvnq_s32(vreinterpretq_s32_s64(vshrq_n_s64(vqsubq_s64(a, b), 63)))); #elif defined(SIMDE_X86_SSE4_2_NATIVE) return _mm_or_si128(_mm_cmpgt_epi64(a, b), _mm_cmpeq_epi64(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/cgt.h b/lib/mmseqs/lib/simde/simde/arm/neon/cgt.h index 353da78..d9717d3 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/cgt.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/cgt.h @@ -211,8 +211,15 @@ simde_uint64x2_t simde_vcgtq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcgtq_s64(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vreinterpretq_u64_s64(vshrq_n_s64(vqsubq_s64(b, a), 63)); #elif defined(SIMDE_X86_SSE4_2_NATIVE) return _mm_cmpgt_epi64(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://stackoverflow.com/a/65175746/501126 */ + __m128i r = _mm_and_si128(_mm_cmpeq_epi32(a, b), _mm_sub_epi64(b, a)); + r = _mm_or_si128(r, _mm_cmpgt_epi32(a, b)); + return _mm_shuffle_epi32(r, _MM_SHUFFLE(3,3,1,1)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpgt(a, b)); #else diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/cle.h b/lib/mmseqs/lib/simde/simde/arm/neon/cle.h index 0ffe8c7..a11d288 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/cle.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/cle.h @@ -209,6 +209,8 @@ simde_uint64x2_t simde_vcleq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcleq_s64(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vreinterpretq_u64_s32(vmvnq_s32(vreinterpretq_s32_s64(vshrq_n_s64(vqsubq_s64(b, a), 63)))); #elif defined(SIMDE_X86_SSE4_2_NATIVE) return _mm_or_si128(_mm_cmpgt_epi64(b, a), _mm_cmpeq_epi64(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/cls.h b/lib/mmseqs/lib/simde/simde/arm/neon/cls.h new file mode 100644 index 0000000..d71f855 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/cls.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_CLS_H) +#define SIMDE_ARM_NEON_CLS_H + +#include "types.h" +#include "bsl.h" +#include "clz.h" +#include "cltz.h" +#include "dup_n.h" +#include "mvn.h" +#include "sub.h" +#include "reinterpret.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vcls_s8(simde_int8x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vcls_s8(a); + #else + return simde_vsub_s8(simde_vclz_s8(simde_vbsl_s8(simde_vcltz_s8(a), simde_vmvn_s8(a), a)), simde_vdup_n_s8(INT8_C(1))); + #endif +} +#define simde_vcls_u8(a) simde_vcls_s8(simde_vreinterpret_s8_u8(a)) +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vcls_s8 + #define vcls_s8(a) simde_vcls_s8(a) + #undef vcls_u8 + #define vcls_u8(a) simde_vcls_u8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vcls_s16(simde_int16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vcls_s16(a); + #else + return simde_vsub_s16(simde_vclz_s16(simde_vbsl_s16(simde_vcltz_s16(a), simde_vmvn_s16(a), a)), simde_vdup_n_s16(INT16_C(1))); + #endif +} +#define simde_vcls_u16(a) simde_vcls_s16(simde_vreinterpret_s16_u16(a)) +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vcls_s16 + #define vcls_s16(a) simde_vcls_s16(a) + #undef vcls_u16 + #define vcls_u16(a) simde_vcls_u16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vcls_s32(simde_int32x2_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vcls_s32(a); + #else + return simde_vsub_s32(simde_vclz_s32(simde_vbsl_s32(simde_vcltz_s32(a), simde_vmvn_s32(a), a)), simde_vdup_n_s32(INT32_C(1))); + #endif +} +#define simde_vcls_u32(a) simde_vcls_s32(simde_vreinterpret_s32_u32(a)) +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vcls_s32 + #define vcls_s32(a) simde_vcls_s32(a) + #undef vcls_u32 + #define vcls_u32(a) simde_vcls_u32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vclsq_s8(simde_int8x16_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclsq_s8(a); + #else + return simde_vsubq_s8(simde_vclzq_s8(simde_vbslq_s8(simde_vcltzq_s8(a), simde_vmvnq_s8(a), a)), simde_vdupq_n_s8(INT8_C(1))); + #endif +} +#define simde_vclsq_u8(a) simde_vclsq_s8(simde_vreinterpretq_s8_u8(a)) +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclsq_s8 + #define vclsq_s8(a) simde_vclsq_s8(a) + #undef vclsq_u8 + #define vclsq_u8(a) simde_vclsq_u8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vclsq_s16(simde_int16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclsq_s16(a); + #else + return simde_vsubq_s16(simde_vclzq_s16(simde_vbslq_s16(simde_vcltzq_s16(a), simde_vmvnq_s16(a), a)), simde_vdupq_n_s16(INT16_C(1))); + #endif +} +#define simde_vclsq_u16(a) simde_vclsq_s16(simde_vreinterpretq_s16_u16(a)) +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclsq_s16 + #define vclsq_s16(a) simde_vclsq_s16(a) + #undef vclsq_u16 + #define vclsq_u16(a) simde_vclsq_u16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vclsq_s32(simde_int32x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclsq_s32(a); + #else + return simde_vsubq_s32(simde_vclzq_s32(simde_vbslq_s32(simde_vcltzq_s32(a), simde_vmvnq_s32(a), a)), simde_vdupq_n_s32(INT32_C(1))); + #endif +} +#define simde_vclsq_u32(a) simde_vclsq_s32(simde_vreinterpretq_s32_u32(a)) +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclsq_s32 + #define vclsq_s32(a) simde_vclsq_s32(a) + #undef vclsq_u32 + #define vclsq_u32(a) simde_vclsq_u32(a) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_CLS_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/clt.h b/lib/mmseqs/lib/simde/simde/arm/neon/clt.h index 0387ae1..b9d3866 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/clt.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/clt.h @@ -211,6 +211,8 @@ simde_uint64x2_t simde_vcltq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltq_s64(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vreinterpretq_u64_s64(vshrq_n_s64(vqsubq_s64(a, b), 63)); #elif defined(SIMDE_X86_SSE4_2_NATIVE) return _mm_cmpgt_epi64(b, a); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/cltz.h b/lib/mmseqs/lib/simde/simde/arm/neon/cltz.h index 96ea6e2..5c53183 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/cltz.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/cltz.h @@ -184,7 +184,7 @@ simde_vcltzq_f64(simde_float64x2_t a) { simde_float64x2_private a_ = simde_float64x2_to_private(a); simde_uint64x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && 0 + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < SIMDE_FLOAT64_C(0.0)); #else SIMDE_VECTORIZE diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/clz.h b/lib/mmseqs/lib/simde/simde/arm/neon/clz.h new file mode 100644 index 0000000..72770c0 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/clz.h @@ -0,0 +1,423 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_CLZ_H) +#define SIMDE_ARM_NEON_CLZ_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_x_vclzb_u8(uint8_t a) { + #if \ + defined(SIMDE_BUILTIN_SUFFIX_8_) && \ + ( \ + SIMDE_BUILTIN_HAS_8_(clz) || \ + HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + HEDLEY_IBM_VERSION_CHECK(13,1,0) \ + ) + if (HEDLEY_UNLIKELY(a == 0)) + return 8 * sizeof(r); + + return HEDLEY_STATIC_CAST(uint8_t, SIMDE_BUILTIN_8_(clz)(HEDLEY_STATIC_CAST(unsigned SIMDE_BUILTIN_TYPE_8_, a))); + #else + uint8_t r; + uint8_t shift; + + if (HEDLEY_UNLIKELY(a == 0)) + return 8 * sizeof(r); + + r = HEDLEY_STATIC_CAST(uint8_t, (a > UINT8_C(0x0F)) << 2); a >>= r; + shift = HEDLEY_STATIC_CAST(uint8_t, (a > UINT8_C(0x03)) << 1); a >>= shift; r |= shift; + r |= (a >> 1); + + return ((8 * sizeof(r)) - 1) - r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_x_vclzh_u16(uint16_t a) { + #if \ + defined(SIMDE_BUILTIN_SUFFIX_16_) && \ + ( \ + SIMDE_BUILTIN_HAS_16_(clz) || \ + HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + HEDLEY_IBM_VERSION_CHECK(13,1,0) \ + ) + if (HEDLEY_UNLIKELY(a == 0)) + return 8 * sizeof(r); + + return HEDLEY_STATIC_CAST(uint16_t, SIMDE_BUILTIN_16_(clz)(HEDLEY_STATIC_CAST(unsigned SIMDE_BUILTIN_TYPE_16_, a))); + #else + uint16_t r; + uint16_t shift; + + if (HEDLEY_UNLIKELY(a == 0)) + return 8 * sizeof(r); + + r = HEDLEY_STATIC_CAST(uint16_t, (a > UINT16_C(0x00FF)) << 3); a >>= r; + shift = HEDLEY_STATIC_CAST(uint16_t, (a > UINT16_C(0x000F)) << 2); a >>= shift; r |= shift; + shift = HEDLEY_STATIC_CAST(uint16_t, (a > UINT16_C(0x0003)) << 1); a >>= shift; r |= shift; + r |= (a >> 1); + + return ((8 * sizeof(r)) - 1) - r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_x_vclzs_u32(uint32_t a) { + #if \ + defined(SIMDE_BUILTIN_SUFFIX_32_) && \ + ( \ + SIMDE_BUILTIN_HAS_32_(clz) || \ + HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + HEDLEY_IBM_VERSION_CHECK(13,1,0) \ + ) + if (HEDLEY_UNLIKELY(a == 0)) + return 8 * sizeof(a); + + return HEDLEY_STATIC_CAST(uint32_t, SIMDE_BUILTIN_32_(clz)(HEDLEY_STATIC_CAST(unsigned SIMDE_BUILTIN_TYPE_32_, a))); + #else + uint32_t r; + uint32_t shift; + + if (HEDLEY_UNLIKELY(a == 0)) + return 8 * sizeof(a); + + r = HEDLEY_STATIC_CAST(uint32_t, (a > UINT32_C(0xFFFF)) << 4); a >>= r; + shift = HEDLEY_STATIC_CAST(uint32_t, (a > UINT32_C(0x00FF)) << 3); a >>= shift; r |= shift; + shift = HEDLEY_STATIC_CAST(uint32_t, (a > UINT32_C(0x000F)) << 2); a >>= shift; r |= shift; + shift = HEDLEY_STATIC_CAST(uint32_t, (a > UINT32_C(0x0003)) << 1); a >>= shift; r |= shift; + r |= (a >> 1); + + return ((8 * sizeof(r)) - 1) - r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_x_vclzb_s8(int8_t a) { + return HEDLEY_STATIC_CAST(int8_t, simde_x_vclzb_u8(HEDLEY_STATIC_CAST(uint8_t, a))); +} + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_x_vclzh_s16(int16_t a) { + return HEDLEY_STATIC_CAST(int16_t, simde_x_vclzh_u16(HEDLEY_STATIC_CAST(uint16_t, a))); +} + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_x_vclzs_s32(int32_t a) { + return HEDLEY_STATIC_CAST(int32_t, simde_x_vclzs_u32(HEDLEY_STATIC_CAST(uint32_t, a))); +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vclz_s8(simde_int8x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclz_s8(a); + #else + simde_int8x8_private + a_ = simde_int8x8_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzb_s8(a_.values[i]); + } + + return simde_int8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclz_s8 + #define vclz_s8(a) simde_vclz_s8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vclz_s16(simde_int16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclz_s16(a); + #else + simde_int16x4_private + a_ = simde_int16x4_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzh_s16(a_.values[i]); + } + + return simde_int16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclz_s16 + #define vclz_s16(a) simde_vclz_s16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vclz_s32(simde_int32x2_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclz_s32(a); + #else + simde_int32x2_private + a_ = simde_int32x2_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzs_s32(a_.values[i]); + } + + return simde_int32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclz_s32 + #define vclz_s32(a) simde_vclz_s32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vclz_u8(simde_uint8x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclz_u8(a); + #else + simde_uint8x8_private + a_ = simde_uint8x8_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzb_u8(a_.values[i]); + } + + return simde_uint8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclz_u8 + #define vclz_u8(a) simde_vclz_u8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vclz_u16(simde_uint16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclz_u16(a); + #else + simde_uint16x4_private + a_ = simde_uint16x4_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzh_u16(a_.values[i]); + } + + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclz_u16 + #define vclz_u16(a) simde_vclz_u16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vclz_u32(simde_uint32x2_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclz_u32(a); + #else + simde_uint32x2_private + a_ = simde_uint32x2_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzs_u32(a_.values[i]); + } + + return simde_uint32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclz_u32 + #define vclz_u32(a) simde_vclz_u32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vclzq_s8(simde_int8x16_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclzq_s8(a); + #elif defined(SIMDE_X86_GFNI_NATIVE) + /* https://gist.github.com/animetosho/6cb732ccb5ecd86675ca0a442b3c0622 */ + a = _mm_gf2p8affine_epi64_epi8(a, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201), HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201)), 0); + a = _mm_andnot_si128(_mm_add_epi8(a, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, 0xff))), a); + return _mm_gf2p8affine_epi64_epi8(a, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0, HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0), 8); + #else + simde_int8x16_private + a_ = simde_int8x16_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzb_s8(a_.values[i]); + } + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclzq_s8 + #define vclzq_s8(a) simde_vclzq_s8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vclzq_s16(simde_int16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclzq_s16(a); + #else + simde_int16x8_private + a_ = simde_int16x8_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzh_s16(a_.values[i]); + } + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclzq_s16 + #define vclzq_s16(a) simde_vclzq_s16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vclzq_s32(simde_int32x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclzq_s32(a); + #else + simde_int32x4_private + a_ = simde_int32x4_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzs_s32(a_.values[i]); + } + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclzq_s32 + #define vclzq_s32(a) simde_vclzq_s32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vclzq_u8(simde_uint8x16_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclzq_u8(a); + #elif defined(SIMDE_X86_GFNI_NATIVE) + a = _mm_gf2p8affine_epi64_epi8(a, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201), HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201)), 0); + a = _mm_andnot_si128(_mm_add_epi8(a, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, 0xff))), a); + return _mm_gf2p8affine_epi64_epi8(a, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0, HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0), 8); + #else + simde_uint8x16_private + a_ = simde_uint8x16_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzb_u8(a_.values[i]); + } + + return simde_uint8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclzq_u8 + #define vclzq_u8(a) simde_vclzq_u8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vclzq_u16(simde_uint16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclzq_u16(a); + #else + simde_uint16x8_private + a_ = simde_uint16x8_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzh_u16(a_.values[i]); + } + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclzq_u16 + #define vclzq_u16(a) simde_vclzq_u16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vclzq_u32(simde_uint32x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vclzq_u32(a); + #else + simde_uint32x4_private + a_ = simde_uint32x4_to_private(a), + r_; + + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzs_u32(a_.values[i]); + } + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vclzq_u32 + #define vclzq_u32(a) simde_vclzq_u32(a) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_CLZ_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/cnt.h b/lib/mmseqs/lib/simde/simde/arm/neon/cnt.h new file mode 100644 index 0000000..aef7349 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/cnt.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_CNT_H) +#define SIMDE_ARM_NEON_CNT_H + +#include "types.h" +#include + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_x_arm_neon_cntb(uint8_t v) { + v = v - ((v >> 1) & (85)); + v = (v & (51)) + ((v >> (2)) & (51)); + v = (v + (v >> (4))) & (15); + return HEDLEY_STATIC_CAST(uint8_t, v) >> (sizeof(uint8_t) - 1) * CHAR_BIT; +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vcnt_s8(simde_int8x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vcnt_s8(a); + #else + simde_int8x8_private + r_, + a_ = simde_int8x8_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, simde_x_arm_neon_cntb(HEDLEY_STATIC_CAST(uint8_t, a_.values[i]))); + } + + return simde_int8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vcnt_s8 + #define vcnt_s8(a) simde_vcnt_s8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vcnt_u8(simde_uint8x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vcnt_u8(a); + #else + simde_uint8x8_private + r_, + a_ = simde_uint8x8_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_arm_neon_cntb(a_.values[i]); + } + + return simde_uint8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vcnt_u8 + #define vcnt_u8(a) simde_vcnt_u8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vcntq_s8(simde_int8x16_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vcntq_s8(a); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), a))); + #else + simde_int8x16_private + r_, + a_ = simde_int8x16_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, simde_x_arm_neon_cntb(HEDLEY_STATIC_CAST(uint8_t, a_.values[i]))); + } + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vcntq_s8 + #define vcntq_s8(a) simde_vcntq_s8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vcntq_u8(simde_uint8x16_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vcntq_u8(a); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_popcnt(a); + #else + simde_uint8x16_private + r_, + a_ = simde_uint8x16_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_arm_neon_cntb(a_.values[i]); + } + + return simde_uint8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vcntq_u8 + #define vcntq_u8(a) simde_vcntq_u8((a)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_CNT_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/dot.h b/lib/mmseqs/lib/simde/simde/arm/neon/dot.h index 19c1fe5..fa7febe 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/dot.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/dot.h @@ -67,7 +67,7 @@ simde_vdot_s32(simde_int32x2_t r, simde_int8x8_t a, simde_int8x8_t b) { return simde_vadd_s32(r, simde_int32x2_from_private(r_)); #endif } -#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) #undef vdot_s32 #define vdot_s32(r, a, b) simde_vdot_s32((r), (a), (b)) #endif @@ -97,7 +97,7 @@ simde_vdot_u32(simde_uint32x2_t r, simde_uint8x8_t a, simde_uint8x8_t b) { return simde_vadd_u32(r, simde_uint32x2_from_private(r_)); #endif } -#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) #undef vdot_u32 #define vdot_u32(r, a, b) simde_vdot_u32((r), (a), (b)) #endif @@ -128,7 +128,7 @@ simde_vdotq_s32(simde_int32x4_t r, simde_int8x16_t a, simde_int8x16_t b) { return simde_vaddq_s32(r, simde_int32x4_from_private(r_)); #endif } -#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) #undef vdotq_s32 #define vdotq_s32(r, a, b) simde_vdotq_s32((r), (a), (b)) #endif @@ -159,7 +159,7 @@ simde_vdotq_u32(simde_uint32x4_t r, simde_uint8x16_t a, simde_uint8x16_t b) { return simde_vaddq_u32(r, simde_uint32x4_from_private(r_)); #endif } -#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) #undef vdotq_u32 #define vdotq_u32(r, a, b) simde_vdotq_u32((r), (a), (b)) #endif diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/dot_lane.h b/lib/mmseqs/lib/simde/simde/arm/neon/dot_lane.h index 8413369..9910ba6 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/dot_lane.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/dot_lane.h @@ -74,7 +74,7 @@ simde_vdot_lane_s32(simde_int32x2_t r, simde_int8x8_t a, simde_int8x8_t b, const #endif return result; } -#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) #undef vdot_lane_s32 #define vdot_lane_s32(r, a, b, lane) simde_vdot_lane_s32((r), (a), (b), (lane)) #endif @@ -110,7 +110,7 @@ simde_vdot_lane_u32(simde_uint32x2_t r, simde_uint8x8_t a, simde_uint8x8_t b, co #endif return result; } -#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) #undef vdot_lane_u32 #define vdot_lane_u32(r, a, b, lane) simde_vdot_lane_u32((r), (a), (b), (lane)) #endif @@ -147,7 +147,7 @@ simde_vdot_laneq_s32(simde_int32x4_t r, simde_int8x16_t a, simde_int8x16_t b, co #endif return result; } -#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) #undef vdot_laneq_s32 #define vdot_laneq_s32(r, a, b, lane) simde_vdot_laneq_s32((r), (a), (b), (lane)) #endif @@ -184,7 +184,7 @@ simde_vdot_laneq_u32(simde_uint32x4_t r, simde_uint8x16_t a, simde_uint8x16_t b, #endif return result; } -#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) #undef vdot_laneq_u32 #define vdot_laneq_u32(r, a, b, lane) simde_vdot_laneq_u32((r), (a), (b), (lane)) #endif diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/dup_lane.h b/lib/mmseqs/lib/simde/simde/arm/neon/dup_lane.h new file mode 100644 index 0000000..66e1ab6 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/dup_lane.h @@ -0,0 +1,702 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_DUP_LANE_H) +#define SIMDE_ARM_NEON_DUP_LANE_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vdup_lane_f32(simde_float32x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float32x2_private + vec_ = simde_float32x2_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_float32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdup_lane_f32(vec, lane) vdup_lane_f32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdup_lane_f32 + #define vdup_lane_f32(vec, lane) simde_vdup_lane_f32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vdup_lane_f64(simde_float64x1_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + (void) lane; + return vec; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_lane_f64 + #define vdup_lane_f64(vec, lane) simde_vdup_lane_f64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vdup_lane_s8(simde_int8x8_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_int8x8_private + vec_ = simde_int8x8_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int8x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdup_lane_s8(vec, lane) vdup_lane_s8(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdup_lane_s8 + #define vdup_lane_s8(vec, lane) simde_vdup_lane_s8((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vdup_lane_s16(simde_int16x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int16x4_private + vec_ = simde_int16x4_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int16x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdup_lane_s16(vec, lane) vdup_lane_s16(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdup_lane_s16 + #define vdup_lane_s16(vec, lane) simde_vdup_lane_s16((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vdup_lane_s32(simde_int32x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_int32x2_private + vec_ = simde_int32x2_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdup_lane_s32(vec, lane) vdup_lane_s32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdup_lane_s32 + #define vdup_lane_s32(vec, lane) simde_vdup_lane_s32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_vdup_lane_s64(simde_int64x1_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + simde_int64x1_private + vec_ = simde_int64x1_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int64x1_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdup_lane_s64(vec, lane) vdup_lane_s64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdup_lane_s64 + #define vdup_lane_s64(vec, lane) simde_vdup_lane_s64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vdup_lane_u8(simde_uint8x8_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_uint8x8_private + vec_ = simde_uint8x8_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint8x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdup_lane_u8(vec, lane) vdup_lane_u8(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdup_lane_u8 + #define vdup_lane_u8(vec, lane) simde_vdup_lane_u8((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vdup_lane_u16(simde_uint16x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint16x4_private + vec_ = simde_uint16x4_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint16x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdup_lane_u16(vec, lane) vdup_lane_u16(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdup_lane_u16 + #define vdup_lane_u16(vec, lane) simde_vdup_lane_u16((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vdup_lane_u32(simde_uint32x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_uint32x2_private + vec_ = simde_uint32x2_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdup_lane_u32(vec, lane) vdup_lane_u32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdup_lane_u32 + #define vdup_lane_u32(vec, lane) simde_vdup_lane_u32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1_t +simde_vdup_lane_u64(simde_uint64x1_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + simde_uint64x1_private + vec_ = simde_uint64x1_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint64x1_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdup_lane_u64(vec, lane) vdup_lane_u64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdup_lane_u64 + #define vdup_lane_u64(vec, lane) simde_vdup_lane_u64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vdup_laneq_f32(simde_float32x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_float32x4_private vec_ = simde_float32x4_to_private(vec); + simde_float32x2_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_float32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_laneq_f32(vec, lane) vdup_laneq_f32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_laneq_f32 + #define vdup_laneq_f32(vec, lane) simde_vdup_laneq_f32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vdup_laneq_f64(simde_float64x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float64x2_private vec_ = simde_float64x2_to_private(vec); + simde_float64x1_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_float64x1_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_laneq_f64 + #define vdup_laneq_f64(vec, lane) simde_vdup_laneq_f64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vdup_laneq_s8(simde_int8x16_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + simde_int8x16_private vec_ = simde_int8x16_to_private(vec); + simde_int8x8_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int8x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_laneq_s8(vec, lane) vdup_laneq_s8(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_laneq_s8 + #define vdup_laneq_s8(vec, lane) simde_vdup_laneq_s8((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vdup_laneq_s16(simde_int16x8_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_int16x8_private vec_ = simde_int16x8_to_private(vec); + simde_int16x4_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int16x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_laneq_s16(vec, lane) vdup_laneq_s16(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_laneq_s16 + #define vdup_laneq_s16(vec, lane) simde_vdup_laneq_s16((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vdup_laneq_s32(simde_int32x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int32x4_private vec_ = simde_int32x4_to_private(vec); + simde_int32x2_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_laneq_s32(vec, lane) vdup_laneq_s32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_laneq_s32 + #define vdup_laneq_s32(vec, lane) simde_vdup_laneq_s32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_vdup_laneq_s64(simde_int64x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_int64x2_private vec_ = simde_int64x2_to_private(vec); + simde_int64x1_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int64x1_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_laneq_s64(vec, lane) vdup_laneq_s64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_laneq_s64 + #define vdup_laneq_s64(vec, lane) simde_vdup_laneq_s64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vdup_laneq_u8(simde_uint8x16_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + simde_uint8x16_private vec_ = simde_uint8x16_to_private(vec); + simde_uint8x8_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint8x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_laneq_u8(vec, lane) vdup_laneq_u8(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_laneq_u8 + #define vdup_laneq_u8(vec, lane) simde_vdup_laneq_u8((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vdup_laneq_u16(simde_uint16x8_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_uint16x8_private vec_ = simde_uint16x8_to_private(vec); + simde_uint16x4_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint16x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_laneq_u16(vec, lane) vdup_laneq_u16(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_laneq_u16 + #define vdup_laneq_u16(vec, lane) simde_vdup_laneq_u16((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vdup_laneq_u32(simde_uint32x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint32x4_private vec_ = simde_uint32x4_to_private(vec); + simde_uint32x2_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_laneq_u32(vec, lane) vdup_laneq_u32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_laneq_u32 + #define vdup_laneq_u32(vec, lane) simde_vdup_laneq_u32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1_t +simde_vdup_laneq_u64(simde_uint64x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_uint64x2_private vec_ = simde_uint64x2_to_private(vec); + simde_uint64x1_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint64x1_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_laneq_u64(vec, lane) vdup_laneq_u64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdup_laneq_u64 + #define vdup_laneq_u64(vec, lane) simde_vdup_laneq_u64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vdupq_laneq_f32(simde_float32x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_float32x4_private + vec_ = simde_float32x4_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_float32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_laneq_f32(vec, lane) vdupq_laneq_f32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupq_laneq_f32 + #define vdupq_laneq_f32(vec, lane) simde_vdupq_laneq_f32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vdupq_laneq_f64(simde_float64x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float64x2_private + vec_ = simde_float64x2_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_float64x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupq_laneq_f64 + #define vdupq_laneq_f64(vec, lane) simde_vdupq_laneq_f64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vdupq_laneq_s8(simde_int8x16_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + simde_int8x16_private + vec_ = simde_int8x16_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int8x16_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_laneq_s8(vec, lane) vdupq_laneq_s8(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupq_laneq_s8 + #define vdupq_laneq_s8(vec, lane) simde_vdupq_laneq_s8((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vdupq_laneq_s16(simde_int16x8_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_int16x8_private + vec_ = simde_int16x8_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int16x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_laneq_s16(vec, lane) vdupq_laneq_s16(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupq_laneq_s16 + #define vdupq_laneq_s16(vec, lane) simde_vdupq_laneq_s16((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vdupq_laneq_s32(simde_int32x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int32x4_private + vec_ = simde_int32x4_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_laneq_s32(vec, lane) vdupq_laneq_s32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupq_laneq_s32 + #define vdupq_laneq_s32(vec, lane) simde_vdupq_laneq_s32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vdupq_laneq_s64(simde_int64x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_int64x2_private + vec_ = simde_int64x2_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_int64x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_laneq_s64(vec, lane) vdupq_laneq_s64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupq_laneq_s64 + #define vdupq_laneq_s64(vec, lane) simde_vdupq_laneq_s64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vdupq_laneq_u8(simde_uint8x16_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + simde_uint8x16_private + vec_ = simde_uint8x16_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint8x16_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_laneq_u8(vec, lane) vdupq_laneq_u8(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupq_laneq_u8 + #define vdupq_laneq_u8(vec, lane) simde_vdupq_laneq_u8((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vdupq_laneq_u16(simde_uint16x8_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_uint16x8_private + vec_ = simde_uint16x8_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint16x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_laneq_u16(vec, lane) vdupq_laneq_u16(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupq_laneq_u16 + #define vdupq_laneq_u16(vec, lane) simde_vdupq_laneq_u16((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vdupq_laneq_u32(simde_uint32x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint32x4_private + vec_ = simde_uint32x4_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_laneq_u32(vec, lane) vdupq_laneq_u32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupq_laneq_u32 + #define vdupq_laneq_u32(vec, lane) simde_vdupq_laneq_u32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vdupq_laneq_u64(simde_uint64x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_uint64x2_private + vec_ = simde_uint64x2_to_private(vec), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = vec_.values[lane]; + } + + return simde_uint64x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_laneq_u64(vec, lane) vdupq_laneq_u64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupq_laneq_u64 + #define vdupq_laneq_u64(vec, lane) simde_vdupq_laneq_u64((vec), (lane)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_DUP_LANE_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/hadd.h b/lib/mmseqs/lib/simde/simde/arm/neon/hadd.h new file mode 100644 index 0000000..be05a1a --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/hadd.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +/* TODO: the 128-bit versions only require AVX-512 because of the final + * conversions from larger types down to smaller ones. We could get + * the same results from AVX/AVX2 instructions with some shuffling + * to extract the low half of each input element to the low half + * of a 256-bit vector, then cast that to a 128-bit vector. */ + +#if !defined(SIMDE_ARM_NEON_HADD_H) +#define SIMDE_ARM_NEON_HADD_H + +#include "addl.h" +#include "shr_n.h" +#include "movn.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vhadd_s8(simde_int8x8_t a, simde_int8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhadd_s8(a, b); + #else + return simde_vmovn_s16(simde_vshrq_n_s16(simde_vaddl_s8(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhadd_s8 + #define vhadd_s8(a, b) simde_vhadd_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vhadd_s16(simde_int16x4_t a, simde_int16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhadd_s16(a, b); + #else + return simde_vmovn_s32(simde_vshrq_n_s32(simde_vaddl_s16(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhadd_s16 + #define vhadd_s16(a, b) simde_vhadd_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vhadd_s32(simde_int32x2_t a, simde_int32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhadd_s32(a, b); + #else + return simde_vmovn_s64(simde_vshrq_n_s64(simde_vaddl_s32(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhadd_s32 + #define vhadd_s32(a, b) simde_vhadd_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vhadd_u8(simde_uint8x8_t a, simde_uint8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhadd_u8(a, b); + #else + return simde_vmovn_u16(simde_vshrq_n_u16(simde_vaddl_u8(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhadd_u8 + #define vhadd_u8(a, b) simde_vhadd_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vhadd_u16(simde_uint16x4_t a, simde_uint16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhadd_u16(a, b); + #else + return simde_vmovn_u32(simde_vshrq_n_u32(simde_vaddl_u16(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhadd_u16 + #define vhadd_u16(a, b) simde_vhadd_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vhadd_u32(simde_uint32x2_t a, simde_uint32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhadd_u32(a, b); + #else + return simde_vmovn_u64(simde_vshrq_n_u64(simde_vaddl_u32(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhadd_u32 + #define vhadd_u32(a, b) simde_vhadd_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vhaddq_s8(simde_int8x16_t a, simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhaddq_s8(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cvtepi16_epi8(_mm256_srai_epi16(_mm256_add_epi16(_mm256_cvtepi8_epi16(a), _mm256_cvtepi8_epi16(b)), 1)); + #else + simde_int8x16_private + r_, + a_ = simde_int8x16_to_private(a), + b_ = simde_int8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (HEDLEY_STATIC_CAST(int16_t, a_.values[i]) + HEDLEY_STATIC_CAST(int16_t, b_.values[i])) >> 1); + } + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhaddq_s8 + #define vhaddq_s8(a, b) simde_vhaddq_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vhaddq_s16(simde_int16x8_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhaddq_s16(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cvtepi32_epi16(_mm256_srai_epi32(_mm256_add_epi32(_mm256_cvtepi16_epi32(a), _mm256_cvtepi16_epi32(b)), 1)); + #else + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a), + b_ = simde_int16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (HEDLEY_STATIC_CAST(int32_t, a_.values[i]) + HEDLEY_STATIC_CAST(int32_t, b_.values[i])) >> 1); + } + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhaddq_s16 + #define vhaddq_s16(a, b) simde_vhaddq_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vhaddq_s32(simde_int32x4_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhaddq_s32(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cvtepi64_epi32(_mm256_srai_epi64(_mm256_add_epi64(_mm256_cvtepi32_epi64(a), _mm256_cvtepi32_epi64(b)), 1)); + #else + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a), + b_ = simde_int32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int32_t, (HEDLEY_STATIC_CAST(int64_t, a_.values[i]) + HEDLEY_STATIC_CAST(int64_t, b_.values[i])) >> 1); + } + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhaddq_s32 + #define vhaddq_s32(a, b) simde_vhaddq_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vhaddq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhaddq_u8(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cvtepi16_epi8(_mm256_srli_epi16(_mm256_add_epi16(_mm256_cvtepu8_epi16(a), _mm256_cvtepu8_epi16(b)), 1)); + #else + simde_uint8x16_private + r_, + a_ = simde_uint8x16_to_private(a), + b_ = simde_uint8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint16_t, b_.values[i])) >> 1); + } + + return simde_uint8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhaddq_u8 + #define vhaddq_u8(a, b) simde_vhaddq_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vhaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhaddq_u16(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cvtepi32_epi16(_mm256_srli_epi32(_mm256_add_epi32(_mm256_cvtepu16_epi32(a), _mm256_cvtepu16_epi32(b)), 1)); + #else + simde_uint16x8_private + r_, + a_ = simde_uint16x8_to_private(a), + b_ = simde_uint16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, (HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint32_t, b_.values[i])) >> 1); + } + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhaddq_u16 + #define vhaddq_u16(a, b) simde_vhaddq_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vhaddq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhaddq_u32(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cvtepi64_epi32(_mm256_srli_epi64(_mm256_add_epi64(_mm256_cvtepu32_epi64(a), _mm256_cvtepu32_epi64(b)), 1)); + #else + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a), + b_ = simde_uint32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, (HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint64_t, b_.values[i])) >> 1); + } + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhaddq_u32 + #define vhaddq_u32(a, b) simde_vhaddq_u32((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_HADD_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/hsub.h b/lib/mmseqs/lib/simde/simde/arm/neon/hsub.h new file mode 100644 index 0000000..7357d6d --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/hsub.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +/* TODO: the 128-bit versions only require AVX-512 because of the final + * conversions from larger types down to smaller ones. We could get + * the same results from AVX/AVX2 instructions with some shuffling + * to extract the low half of each input element to the low half + * of a 256-bit vector, then cast that to a 128-bit vector. */ + +#if !defined(SIMDE_ARM_NEON_HSUB_H) +#define SIMDE_ARM_NEON_HSUB_H + +#include "subl.h" +#include "shr_n.h" +#include "movn.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vhsub_s8(simde_int8x8_t a, simde_int8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsub_s8(a, b); + #else + return simde_vmovn_s16(simde_vshrq_n_s16(simde_vsubl_s8(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsub_s8 + #define vhsub_s8(a, b) simde_vhsub_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vhsub_s16(simde_int16x4_t a, simde_int16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsub_s16(a, b); + #else + return simde_vmovn_s32(simde_vshrq_n_s32(simde_vsubl_s16(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsub_s16 + #define vhsub_s16(a, b) simde_vhsub_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vhsub_s32(simde_int32x2_t a, simde_int32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsub_s32(a, b); + #else + return simde_vmovn_s64(simde_vshrq_n_s64(simde_vsubl_s32(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsub_s32 + #define vhsub_s32(a, b) simde_vhsub_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vhsub_u8(simde_uint8x8_t a, simde_uint8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsub_u8(a, b); + #else + return simde_vmovn_u16(simde_vshrq_n_u16(simde_vsubl_u8(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsub_u8 + #define vhsub_u8(a, b) simde_vhsub_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vhsub_u16(simde_uint16x4_t a, simde_uint16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsub_u16(a, b); + #else + return simde_vmovn_u32(simde_vshrq_n_u32(simde_vsubl_u16(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsub_u16 + #define vhsub_u16(a, b) simde_vhsub_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vhsub_u32(simde_uint32x2_t a, simde_uint32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsub_u32(a, b); + #else + return simde_vmovn_u64(simde_vshrq_n_u64(simde_vsubl_u32(a, b), 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsub_u32 + #define vhsub_u32(a, b) simde_vhsub_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vhsubq_s8(simde_int8x16_t a, simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsubq_s8(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cvtepi16_epi8(_mm256_srai_epi16(_mm256_sub_epi16(_mm256_cvtepi8_epi16(a), _mm256_cvtepi8_epi16(b)), 1)); + #else + simde_int8x16_private + r_, + a_ = simde_int8x16_to_private(a), + b_ = simde_int8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (HEDLEY_STATIC_CAST(int16_t, a_.values[i]) - HEDLEY_STATIC_CAST(int16_t, b_.values[i])) >> 1); + } + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsubq_s8 + #define vhsubq_s8(a, b) simde_vhsubq_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vhsubq_s16(simde_int16x8_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsubq_s16(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cvtepi32_epi16(_mm256_srai_epi32(_mm256_sub_epi32(_mm256_cvtepi16_epi32(a), _mm256_cvtepi16_epi32(b)), 1)); + #else + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a), + b_ = simde_int16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (HEDLEY_STATIC_CAST(int32_t, a_.values[i]) - HEDLEY_STATIC_CAST(int32_t, b_.values[i])) >> 1); + } + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsubq_s16 + #define vhsubq_s16(a, b) simde_vhsubq_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vhsubq_s32(simde_int32x4_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsubq_s32(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cvtepi64_epi32(_mm256_srai_epi64(_mm256_sub_epi64(_mm256_cvtepi32_epi64(a), _mm256_cvtepi32_epi64(b)), 1)); + #else + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a), + b_ = simde_int32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int32_t, (HEDLEY_STATIC_CAST(int64_t, a_.values[i]) - HEDLEY_STATIC_CAST(int64_t, b_.values[i])) >> 1); + } + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsubq_s32 + #define vhsubq_s32(a, b) simde_vhsubq_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vhsubq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsubq_u8(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cvtepi16_epi8(_mm256_srli_epi16(_mm256_sub_epi16(_mm256_cvtepu8_epi16(a), _mm256_cvtepu8_epi16(b)), 1)); + #else + simde_uint8x16_private + r_, + a_ = simde_uint8x16_to_private(a), + b_ = simde_uint8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint16_t, b_.values[i])) >> 1); + } + + return simde_uint8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsubq_u8 + #define vhsubq_u8(a, b) simde_vhsubq_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vhsubq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsubq_u16(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cvtepi32_epi16(_mm256_srli_epi32(_mm256_sub_epi32(_mm256_cvtepu16_epi32(a), _mm256_cvtepu16_epi32(b)), 1)); + #else + simde_uint16x8_private + r_, + a_ = simde_uint16x8_to_private(a), + b_ = simde_uint16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, (HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint32_t, b_.values[i])) >> 1); + } + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsubq_u16 + #define vhsubq_u16(a, b) simde_vhsubq_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vhsubq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vhsubq_u32(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cvtepi64_epi32(_mm256_srli_epi64(_mm256_sub_epi64(_mm256_cvtepu32_epi64(a), _mm256_cvtepu32_epi64(b)), 1)); + #else + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a), + b_ = simde_uint32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, (HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint64_t, b_.values[i])) >> 1); + } + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vhsubq_u32 + #define vhsubq_u32(a, b) simde_vhsubq_u32((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_HSUB_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/ld1.h b/lib/mmseqs/lib/simde/simde/arm/neon/ld1.h index f226cf9..8c2a406 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/ld1.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/ld1.h @@ -200,9 +200,6 @@ simde_vld1q_f32(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(4)]) { return vld1q_f32(ptr); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_loadu_ps(ptr); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - (void) ptr; - return vec_ld(0, HEDLEY_REINTERPRET_CAST(const float*, ptr)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_load(ptr); #else @@ -243,9 +240,6 @@ simde_vld1q_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { return vld1q_s8(ptr); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - (void) ptr; - return vec_ld(0, ptr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_load(ptr); #else @@ -266,9 +260,6 @@ simde_vld1q_s16(int16_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { return vld1q_s16(ptr); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - (void) ptr; - return vec_ld(0, ptr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_load(ptr); #else @@ -289,9 +280,6 @@ simde_vld1q_s32(int32_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { return vld1q_s32(ptr); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - (void) ptr; - return vec_ld(0, ptr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_load(ptr); #else @@ -332,9 +320,6 @@ simde_vld1q_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { return vld1q_u8(ptr); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - (void) ptr; - return vec_ld(0, ptr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_load(ptr); #else @@ -355,9 +340,6 @@ simde_vld1q_u16(uint16_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { return vld1q_u16(ptr); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - (void) ptr; - return vec_ld(0, ptr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_load(ptr); #else @@ -378,9 +360,6 @@ simde_vld1q_u32(uint32_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { return vld1q_u32(ptr); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - (void) ptr; - return vec_ld(0, ptr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_load(ptr); #else diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/ld3.h b/lib/mmseqs/lib/simde/simde/arm/neon/ld3.h index 8f171e8..36e014a 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/ld3.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/ld3.h @@ -29,9 +29,13 @@ #define SIMDE_ARM_NEON_LD3_H #include "types.h" +#include "ld1.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +#if defined(HEDLEY_GCC_VERSION) + SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ +#endif SIMDE_BEGIN_DECLS_ #if !defined(SIMDE_BUG_INTEL_857088) @@ -42,12 +46,21 @@ simde_vld3_f32(simde_float32 const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3_f32(ptr); #else - simde_float32x2_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_float32x2_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_float32x2_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_float32x2x3_t s_ = { { simde_float32x2_from_private(a_[0]), simde_float32x2_from_private(a_[1]), simde_float32x2_from_private(a_[2]) } }; - return (s_); + + simde_float32x2x3_t r = { { + simde_float32x2_from_private(r_[0]), + simde_float32x2_from_private(r_[1]), + simde_float32x2_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -61,12 +74,21 @@ simde_vld3_f64(simde_float64 const *ptr) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vld3_f64(ptr); #else - simde_float64x1_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_float64x1_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_float64x1_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_float64x1x3_t s_ = { { simde_float64x1_from_private(a_[0]), simde_float64x1_from_private(a_[1]), simde_float64x1_from_private(a_[2]) } }; - return s_; + + simde_float64x1x3_t r = { { + simde_float64x1_from_private(r_[0]), + simde_float64x1_from_private(r_[1]), + simde_float64x1_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -80,12 +102,21 @@ simde_vld3_s8(int8_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3_s8(ptr); #else - simde_int8x8_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_int8x8_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_int8x8_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_int8x8x3_t s_ = { { simde_int8x8_from_private(a_[0]), simde_int8x8_from_private(a_[1]), simde_int8x8_from_private(a_[2]) } }; - return s_; + + simde_int8x8x3_t r = { { + simde_int8x8_from_private(r_[0]), + simde_int8x8_from_private(r_[1]), + simde_int8x8_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -99,12 +130,21 @@ simde_vld3_s16(int16_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3_s16(ptr); #else - simde_int16x4_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_int16x4_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_int16x4_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_int16x4x3_t s_ = { { simde_int16x4_from_private(a_[0]), simde_int16x4_from_private(a_[1]), simde_int16x4_from_private(a_[2]) } }; - return s_; + + simde_int16x4x3_t r = { { + simde_int16x4_from_private(r_[0]), + simde_int16x4_from_private(r_[1]), + simde_int16x4_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -118,12 +158,21 @@ simde_vld3_s32(int32_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3_s32(ptr); #else - simde_int32x2_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_int32x2_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_int32x2_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_int32x2x3_t s_ = { { simde_int32x2_from_private(a_[0]), simde_int32x2_from_private(a_[1]), simde_int32x2_from_private(a_[2]) } }; - return s_; + + simde_int32x2x3_t r = { { + simde_int32x2_from_private(r_[0]), + simde_int32x2_from_private(r_[1]), + simde_int32x2_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -137,12 +186,21 @@ simde_vld3_s64(int64_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3_s64(ptr); #else - simde_int64x1_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_int64x1_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_int64x1_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_int64x1x3_t s_ = { { simde_int64x1_from_private(a_[0]), simde_int64x1_from_private(a_[1]), simde_int64x1_from_private(a_[2]) } }; - return s_; + + simde_int64x1x3_t r = { { + simde_int64x1_from_private(r_[0]), + simde_int64x1_from_private(r_[1]), + simde_int64x1_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -156,12 +214,21 @@ simde_vld3_u8(uint8_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3_u8(ptr); #else - simde_uint8x8_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_uint8x8_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_uint8x8_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_uint8x8x3_t s_ = { { simde_uint8x8_from_private(a_[0]), simde_uint8x8_from_private(a_[1]), simde_uint8x8_from_private(a_[2]) } }; - return s_; + + simde_uint8x8x3_t r = { { + simde_uint8x8_from_private(r_[0]), + simde_uint8x8_from_private(r_[1]), + simde_uint8x8_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -175,12 +242,21 @@ simde_vld3_u16(uint16_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3_u16(ptr); #else - simde_uint16x4_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_uint16x4_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_uint16x4_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_uint16x4x3_t s_ = { { simde_uint16x4_from_private(a_[0]), simde_uint16x4_from_private(a_[1]), simde_uint16x4_from_private(a_[2]) } }; - return s_; + + simde_uint16x4x3_t r = { { + simde_uint16x4_from_private(r_[0]), + simde_uint16x4_from_private(r_[1]), + simde_uint16x4_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -194,12 +270,21 @@ simde_vld3_u32(uint32_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3_u32(ptr); #else - simde_uint32x2_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_uint32x2_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_uint32x2_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_uint32x2x3_t s_ = { { simde_uint32x2_from_private(a_[0]), simde_uint32x2_from_private(a_[1]), simde_uint32x2_from_private(a_[2]) } }; - return s_; + + simde_uint32x2x3_t r = { { + simde_uint32x2_from_private(r_[0]), + simde_uint32x2_from_private(r_[1]), + simde_uint32x2_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -213,12 +298,21 @@ simde_vld3_u64(uint64_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3_u64(ptr); #else - simde_uint64x1_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_uint64x1_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_uint64x1_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_uint64x1x3_t s_ = { { simde_uint64x1_from_private(a_[0]), simde_uint64x1_from_private(a_[1]), simde_uint64x1_from_private(a_[2]) } }; - return s_; + + simde_uint64x1x3_t r = { { + simde_uint64x1_from_private(r_[0]), + simde_uint64x1_from_private(r_[1]), + simde_uint64x1_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -232,12 +326,21 @@ simde_vld3q_f32(simde_float32 const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3q_f32(ptr); #else - simde_float32x4_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_float32x4_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_float32x4_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_float32x4x3_t s_ = { { simde_float32x4_from_private(a_[0]), simde_float32x4_from_private(a_[1]), simde_float32x4_from_private(a_[2]) } }; - return s_; + + simde_float32x4x3_t r = { { + simde_float32x4_from_private(r_[0]), + simde_float32x4_from_private(r_[1]), + simde_float32x4_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -251,12 +354,21 @@ simde_vld3q_f64(simde_float64 const *ptr) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vld3q_f64(ptr); #else - simde_float64x2_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_float64x2_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_float64x2_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_float64x2x3_t s_ = { { simde_float64x2_from_private(a_[0]), simde_float64x2_from_private(a_[1]), simde_float64x2_from_private(a_[2]) } }; - return s_; + + simde_float64x2x3_t r = { { + simde_float64x2_from_private(r_[0]), + simde_float64x2_from_private(r_[1]), + simde_float64x2_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -270,12 +382,21 @@ simde_vld3q_s8(int8_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3q_s8(ptr); #else - simde_int8x16_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_int8x16_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_int8x16_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_int8x16x3_t s_ = { { simde_int8x16_from_private(a_[0]), simde_int8x16_from_private(a_[1]), simde_int8x16_from_private(a_[2]) } }; - return s_; + + simde_int8x16x3_t r = { { + simde_int8x16_from_private(r_[0]), + simde_int8x16_from_private(r_[1]), + simde_int8x16_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -289,12 +410,21 @@ simde_vld3q_s16(int16_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3q_s16(ptr); #else - simde_int16x8_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_int16x8_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_int16x8_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_int16x8x3_t s_ = { { simde_int16x8_from_private(a_[0]), simde_int16x8_from_private(a_[1]), simde_int16x8_from_private(a_[2]) } }; - return s_; + + simde_int16x8x3_t r = { { + simde_int16x8_from_private(r_[0]), + simde_int16x8_from_private(r_[1]), + simde_int16x8_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -308,12 +438,21 @@ simde_vld3q_s32(int32_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3q_s32(ptr); #else - simde_int32x4_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_int32x4_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_int32x4_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_int32x4x3_t s_ = { { simde_int32x4_from_private(a_[0]), simde_int32x4_from_private(a_[1]), simde_int32x4_from_private(a_[2]) } }; - return s_; + + simde_int32x4x3_t r = { { + simde_int32x4_from_private(r_[0]), + simde_int32x4_from_private(r_[1]), + simde_int32x4_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -327,12 +466,21 @@ simde_vld3q_s64(int64_t const *ptr) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vld3q_s64(ptr); #else - simde_int64x2_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_int64x2_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_int64x2_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_int64x2x3_t s_ = { { simde_int64x2_from_private(a_[0]), simde_int64x2_from_private(a_[1]), simde_int64x2_from_private(a_[2]) } }; - return s_; + + simde_int64x2x3_t r = { { + simde_int64x2_from_private(r_[0]), + simde_int64x2_from_private(r_[1]), + simde_int64x2_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -347,12 +495,21 @@ simde_vld3q_u8(uint8_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3q_u8(ptr); #else - simde_uint8x16_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_uint8x16_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_uint8x16_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_uint8x16x3_t s_ = { { simde_uint8x16_from_private(a_[0]), simde_uint8x16_from_private(a_[1]), simde_uint8x16_from_private(a_[2]) } }; - return s_; + + simde_uint8x16x3_t r = { { + simde_uint8x16_from_private(r_[0]), + simde_uint8x16_from_private(r_[1]), + simde_uint8x16_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -366,12 +523,21 @@ simde_vld3q_u16(uint16_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3q_u16(ptr); #else - simde_uint16x8_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_uint16x8_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_uint16x8_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_uint16x8x3_t s_ = { { simde_uint16x8_from_private(a_[0]), simde_uint16x8_from_private(a_[1]), simde_uint16x8_from_private(a_[2]) } }; - return s_; + + simde_uint16x8x3_t r = { { + simde_uint16x8_from_private(r_[0]), + simde_uint16x8_from_private(r_[1]), + simde_uint16x8_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -385,12 +551,21 @@ simde_vld3q_u32(uint32_t const *ptr) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld3q_u32(ptr); #else - simde_uint32x4_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_uint32x4_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_uint32x4_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_uint32x4x3_t s_ = { { simde_uint32x4_from_private(a_[0]), simde_uint32x4_from_private(a_[1]), simde_uint32x4_from_private(a_[2]) } }; - return s_; + + simde_uint32x4x3_t r = { { + simde_uint32x4_from_private(r_[0]), + simde_uint32x4_from_private(r_[1]), + simde_uint32x4_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -404,12 +579,21 @@ simde_vld3q_u64(uint64_t const *ptr) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vld3q_u64(ptr); #else - simde_uint64x2_private a_[3]; - for (size_t i = 0; i < (sizeof(simde_uint64x2_t) / sizeof(*ptr)) * 3 ; i++) { - a_[i % 3].values[i / 3] = ptr[i]; + simde_uint64x2_private r_[3]; + + for (size_t i = 0; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } } - simde_uint64x2x3_t s_ = { { simde_uint64x2_from_private(a_[0]), simde_uint64x2_from_private(a_[1]), simde_uint64x2_from_private(a_[2]) } }; - return s_; + + simde_uint64x2x3_t r = { { + simde_uint64x2_from_private(r_[0]), + simde_uint64x2_from_private(r_[1]), + simde_uint64x2_from_private(r_[2]) + } }; + + return r; #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/ld4.h b/lib/mmseqs/lib/simde/simde/arm/neon/ld4.h index 6e2a046..871b926 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/ld4.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/ld4.h @@ -32,6 +32,9 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +#if defined(HEDLEY_GCC_VERSION) + SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ +#endif SIMDE_BEGIN_DECLS_ #if !defined(SIMDE_BUG_INTEL_857088) diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/max.h b/lib/mmseqs/lib/simde/simde/arm/neon/max.h index 4de9e71..dadf46a 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/max.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/max.h @@ -41,8 +41,6 @@ simde_float32x2_t simde_vmax_f32(simde_float32x2_t a, simde_float32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmax_f32(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbsl_f32(simde_vcgt_f32(a, b), a, b); #else simde_float32x2_private r_, @@ -51,7 +49,11 @@ simde_vmax_f32(simde_float32x2_t a, simde_float32x2_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + #if !defined(SIMDE_FAST_NANS) + r_.values[i] = (a_.values[i] >= b_.values[i]) ? a_.values[i] : ((a_.values[i] < b_.values[i]) ? b_.values[i] : SIMDE_MATH_NANF); + #else + r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + #endif } return simde_float32x2_from_private(r_); @@ -67,8 +69,6 @@ simde_float64x1_t simde_vmax_f64(simde_float64x1_t a, simde_float64x1_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vmax_f64(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbsl_f64(simde_vcgt_f64(a, b), a, b); #else simde_float64x1_private r_, @@ -77,7 +77,11 @@ simde_vmax_f64(simde_float64x1_t a, simde_float64x1_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + #if !defined(SIMDE_FAST_NANS) + r_.values[i] = (a_.values[i] >= b_.values[i]) ? a_.values[i] : ((a_.values[i] < b_.values[i]) ? b_.values[i] : SIMDE_MATH_NAN); + #else + r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + #endif } return simde_float64x1_from_private(r_); @@ -290,13 +294,19 @@ simde_vmaxq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmaxq_f32(a, b); #elif defined(SIMDE_X86_SSE_NATIVE) - return _mm_max_ps(a, b); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + #if !defined(SIMDE_FAST_NANS) + __m128 nan_mask = _mm_cmpunord_ps(a, b); + __m128 res = _mm_max_ps(a, b); + res = _mm_andnot_ps(nan_mask, res); + res = _mm_or_ps(res, _mm_and_ps(_mm_set1_ps(SIMDE_MATH_NANF), nan_mask)); + return res; + #else + return _mm_max_ps(a, b); + #endif + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_NANS) return vec_max(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_f32x4_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_f32(simde_vcgtq_f32(a, b), a, b); #else simde_float32x4_private r_, @@ -305,7 +315,11 @@ simde_vmaxq_f32(simde_float32x4_t a, simde_float32x4_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + #if !defined(SIMDE_FAST_NANS) + r_.values[i] = (a_.values[i] >= b_.values[i]) ? a_.values[i] : ((a_.values[i] < b_.values[i]) ? b_.values[i] : SIMDE_MATH_NANF); + #else + r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + #endif } return simde_float32x4_from_private(r_); @@ -322,13 +336,19 @@ simde_vmaxq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vmaxq_f64(a, b); #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_max_pd(a, b); - #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + #if !defined(SIMDE_FAST_NANS) + __m128d nan_mask = _mm_cmpunord_pd(a, b); + __m128d res = _mm_max_pd(a, b); + res = _mm_andnot_pd(nan_mask, res); + res = _mm_or_pd(res, _mm_and_pd(_mm_set1_pd(SIMDE_MATH_NAN), nan_mask)); + return res; + #else + return _mm_max_pd(a, b); + #endif + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(SIMDE_FAST_NANS) return vec_max(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_f64x2_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_f64(simde_vcgtq_f64(a, b), a, b); #else simde_float64x2_private r_, @@ -337,7 +357,11 @@ simde_vmaxq_f64(simde_float64x2_t a, simde_float64x2_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + #if !defined(SIMDE_FAST_NANS) + r_.values[i] = (a_.values[i] >= b_.values[i]) ? a_.values[i] : ((a_.values[i] < b_.values[i]) ? b_.values[i] : SIMDE_MATH_NAN); + #else + r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + #endif } return simde_float64x2_from_private(r_); diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/maxnm.h b/lib/mmseqs/lib/simde/simde/arm/neon/maxnm.h new file mode 100644 index 0000000..5dc0d9f --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/maxnm.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MAXNM_H) +#define SIMDE_ARM_NEON_MAXNM_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vmaxnm_f32(simde_float32x2_t a, simde_float32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && (__ARM_NEON_FP >= 6) + return vmaxnm_f32(a, b); + #else + simde_float32x2_private + r_, + a_ = simde_float32x2_to_private(a), + b_ = simde_float32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fmaxf) + r_.values[i] = fmaxf(a_.values[i], b_.values[i]); + #else + if (a_.values[i] > b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] < b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + + return simde_float32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmaxnm_f32 + #define vmaxnm_f32(a, b) simde_vmaxnm_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vmaxnm_f64(simde_float64x1_t a, simde_float64x1_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmaxnm_f64(a, b); + #else + simde_float64x1_private + r_, + a_ = simde_float64x1_to_private(a), + b_ = simde_float64x1_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fmax) + r_.values[i] = fmax(a_.values[i], b_.values[i]); + #else + if (a_.values[i] > b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] < b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + + return simde_float64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxnm_f64 + #define vmaxnm_f64(a, b) simde_vmaxnm_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vmaxnmq_f32(simde_float32x4_t a, simde_float32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && (__ARM_NEON_FP >= 6) + return vmaxnmq_f32(a, b); + #elif defined(SIMDE_X86_SSE_NATIVE) + #if !defined(SIMDE_FAST_NANS) + __m128 r = _mm_max_ps(a, b); + __m128 bnan = _mm_cmpunord_ps(b, b); + r = _mm_andnot_ps(bnan, r); + r = _mm_or_ps(r, _mm_and_ps(a, bnan)); + return r; + #else + return _mm_max_ps(a, b); + #endif + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_max(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) + return wasm_f32x4_max(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_NANS) + return simde_vbslq_f32(simde_vcgeq_f32(a, b), a, b); + #else + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fmaxf) + r_.values[i] = fmaxf(a_.values[i], b_.values[i]); + #else + if (a_.values[i] > b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] < b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + + return simde_float32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmaxnmq_f32 + #define vmaxnmq_f32(a, b) simde_vmaxnmq_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vmaxnmq_f64(simde_float64x2_t a, simde_float64x2_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmaxnmq_f64(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + #if !defined(SIMDE_FAST_NANS) + __m128d r = _mm_max_pd(a, b); + __m128d bnan = _mm_cmpunord_pd(b, b); + r = _mm_andnot_pd(bnan, r); + r = _mm_or_pd(r, _mm_and_pd(a, bnan)); + return r; + #else + return _mm_max_pd(a, b); + #endif + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_max(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) + return wasm_f64x2_max(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_NANS) + return simde_vbslq_f64(simde_vcgeq_f64(a, b), a, b); + #else + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fmax) + r_.values[i] = fmax(a_.values[i], b_.values[i]); + #else + if (a_.values[i] > b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] < b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + + return simde_float64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxnmq_f64 + #define vmaxnmq_f64(a, b) simde_vmaxnmq_f64((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MAXNM_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/maxv.h b/lib/mmseqs/lib/simde/simde/arm/neon/maxv.h new file mode 100644 index 0000000..37437b0 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/maxv.h @@ -0,0 +1,400 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MAXV_H) +#define SIMDE_ARM_NEON_MAXV_H + +#include "types.h" +#include + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vmaxv_f32(simde_float32x2_t a) { + simde_float32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxv_f32(a); + #else + simde_float32x2_private a_ = simde_float32x2_to_private(a); + + r = -SIMDE_MATH_INFINITYF; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxv_f32 + #define vmaxv_f32(v) simde_vmaxv_f32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vmaxv_s8(simde_int8x8_t a) { + int8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxv_s8(a); + #else + simde_int8x8_private a_ = simde_int8x8_to_private(a); + + r = INT8_MIN; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxv_s8 + #define vmaxv_s8(v) simde_vmaxv_s8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vmaxv_s16(simde_int16x4_t a) { + int16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxv_s16(a); + #else + simde_int16x4_private a_ = simde_int16x4_to_private(a); + + r = INT16_MIN; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxv_s16 + #define vmaxv_s16(v) simde_vmaxv_s16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vmaxv_s32(simde_int32x2_t a) { + int32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxv_s32(a); + #else + simde_int32x2_private a_ = simde_int32x2_to_private(a); + + r = INT32_MIN; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxv_s32 + #define vmaxv_s32(v) simde_vmaxv_s32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_vmaxv_u8(simde_uint8x8_t a) { + uint8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxv_u8(a); + #else + simde_uint8x8_private a_ = simde_uint8x8_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxv_u8 + #define vmaxv_u8(v) simde_vmaxv_u8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vmaxv_u16(simde_uint16x4_t a) { + uint16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxv_u16(a); + #else + simde_uint16x4_private a_ = simde_uint16x4_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxv_u16 + #define vmaxv_u16(v) simde_vmaxv_u16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vmaxv_u32(simde_uint32x2_t a) { + uint32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxv_u32(a); + #else + simde_uint32x2_private a_ = simde_uint32x2_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxv_u32 + #define vmaxv_u32(v) simde_vmaxv_u32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vmaxvq_f32(simde_float32x4_t a) { + simde_float32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxvq_f32(a); + #else + simde_float32x4_private a_ = simde_float32x4_to_private(a); + + r = -SIMDE_MATH_INFINITYF; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxvq_f32 + #define vmaxvq_f32(v) simde_vmaxvq_f32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vmaxvq_f64(simde_float64x2_t a) { + simde_float64_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxvq_f64(a); + #else + simde_float64x2_private a_ = simde_float64x2_to_private(a); + + r = -SIMDE_MATH_INFINITY; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxvq_f64 + #define vmaxvq_f64(v) simde_vmaxvq_f64(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vmaxvq_s8(simde_int8x16_t a) { + int8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxvq_s8(a); + #else + simde_int8x16_private a_ = simde_int8x16_to_private(a); + + r = INT8_MIN; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxvq_s8 + #define vmaxvq_s8(v) simde_vmaxvq_s8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vmaxvq_s16(simde_int16x8_t a) { + int16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxvq_s16(a); + #else + simde_int16x8_private a_ = simde_int16x8_to_private(a); + + r = INT16_MIN; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxvq_s16 + #define vmaxvq_s16(v) simde_vmaxvq_s16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vmaxvq_s32(simde_int32x4_t a) { + int32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxvq_s32(a); + #else + simde_int32x4_private a_ = simde_int32x4_to_private(a); + + r = INT32_MIN; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxvq_s32 + #define vmaxvq_s32(v) simde_vmaxvq_s32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_vmaxvq_u8(simde_uint8x16_t a) { + uint8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxvq_u8(a); + #else + simde_uint8x16_private a_ = simde_uint8x16_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxvq_u8 + #define vmaxvq_u8(v) simde_vmaxvq_u8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vmaxvq_u16(simde_uint16x8_t a) { + uint16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxvq_u16(a); + #else + simde_uint16x8_private a_ = simde_uint16x8_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxvq_u16 + #define vmaxvq_u16(v) simde_vmaxvq_u16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vmaxvq_u32(simde_uint32x4_t a) { + uint32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vmaxvq_u32(a); + #else + simde_uint32x4_private a_ = simde_uint32x4_to_private(a); + + r = 0; + SIMDE_VECTORIZE_REDUCTION(max:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] > r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmaxvq_u32 + #define vmaxvq_u32(v) simde_vmaxvq_u32(v) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MAXV_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/minnm.h b/lib/mmseqs/lib/simde/simde/arm/neon/minnm.h new file mode 100644 index 0000000..11e7bd2 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/minnm.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MINNM_H) +#define SIMDE_ARM_NEON_MINNM_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vminnm_f32(simde_float32x2_t a, simde_float32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && (__ARM_NEON_FP >= 6) + return vminnm_f32(a, b); + #else + simde_float32x2_private + r_, + a_ = simde_float32x2_to_private(a), + b_ = simde_float32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fminf) + r_.values[i] = fminf(a_.values[i], b_.values[i]); + #else + if (a_.values[i] < b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] > b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + + return simde_float32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vminnm_f32 + #define vminnm_f32(a, b) simde_vminnm_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vminnm_f64(simde_float64x1_t a, simde_float64x1_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vminnm_f64(a, b); + #else + simde_float64x1_private + r_, + a_ = simde_float64x1_to_private(a), + b_ = simde_float64x1_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fmin) + r_.values[i] = fmin(a_.values[i], b_.values[i]); + #else + if (a_.values[i] < b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] > b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + + return simde_float64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminnm_f64 + #define vminnm_f64(a, b) simde_vminnm_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vminnmq_f32(simde_float32x4_t a, simde_float32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && (__ARM_NEON_FP >= 6) + return vminnmq_f32(a, b); + #elif defined(SIMDE_X86_SSE_NATIVE) + #if !defined(SIMDE_FAST_NANS) + __m128 r = _mm_min_ps(a, b); + __m128 bnan = _mm_cmpunord_ps(b, b); + r = _mm_andnot_ps(bnan, r); + r = _mm_or_ps(r, _mm_and_ps(a, bnan)); + return r; + #else + return _mm_min_ps(a, b); + #endif + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_min(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) + return wasm_f32x4_min(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_NANS) + return simde_vbslq_f32(simde_vcgeq_f32(a, b), a, b); + #else + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fminf) + r_.values[i] = fminf(a_.values[i], b_.values[i]); + #else + if (a_.values[i] < b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] > b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + + return simde_float32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vminnmq_f32 + #define vminnmq_f32(a, b) simde_vminnmq_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vminnmq_f64(simde_float64x2_t a, simde_float64x2_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vminnmq_f64(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + #if !defined(SIMDE_FAST_NANS) + __m128d r = _mm_min_pd(a, b); + __m128d bnan = _mm_cmpunord_pd(b, b); + r = _mm_andnot_pd(bnan, r); + r = _mm_or_pd(r, _mm_and_pd(a, bnan)); + return r; + #else + return _mm_min_pd(a, b); + #endif + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_min(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) + return wasm_f64x2_min(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_NANS) + return simde_vbslq_f64(simde_vcgeq_f64(a, b), a, b); + #else + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fmin) + r_.values[i] = fmin(a_.values[i], b_.values[i]); + #else + if (a_.values[i] < b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] > b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + + return simde_float64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminnmq_f64 + #define vminnmq_f64(a, b) simde_vminnmq_f64((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MINNM_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/minv.h b/lib/mmseqs/lib/simde/simde/arm/neon/minv.h new file mode 100644 index 0000000..93028d7 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/minv.h @@ -0,0 +1,424 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MINV_H) +#define SIMDE_ARM_NEON_MINV_H + +#include "types.h" +#include + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vminv_f32(simde_float32x2_t a) { + simde_float32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminv_f32(a); + #else + simde_float32x2_private a_ = simde_float32x2_to_private(a); + + r = SIMDE_MATH_INFINITYF; + #if defined(SIMDE_FAST_NANS) + SIMDE_VECTORIZE_REDUCTION(min:r) + #else + SIMDE_VECTORIZE + #endif + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + #if defined(SIMDE_FAST_NANS) + r = a_.values[i] < r ? a_.values[i] : r; + #else + r = (a_.values[i] < r) ? a_.values[i] : ((a_.values[i] >= r) ? r : ((a_.values[i] == a_.values[i]) ? r : a_.values[i])); + #endif + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminv_f32 + #define vminv_f32(v) simde_vminv_f32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vminv_s8(simde_int8x8_t a) { + int8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminv_s8(a); + #else + simde_int8x8_private a_ = simde_int8x8_to_private(a); + + r = INT8_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminv_s8 + #define vminv_s8(v) simde_vminv_s8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vminv_s16(simde_int16x4_t a) { + int16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminv_s16(a); + #else + simde_int16x4_private a_ = simde_int16x4_to_private(a); + + r = INT16_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminv_s16 + #define vminv_s16(v) simde_vminv_s16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vminv_s32(simde_int32x2_t a) { + int32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminv_s32(a); + #else + simde_int32x2_private a_ = simde_int32x2_to_private(a); + + r = INT32_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminv_s32 + #define vminv_s32(v) simde_vminv_s32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_vminv_u8(simde_uint8x8_t a) { + uint8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminv_u8(a); + #else + simde_uint8x8_private a_ = simde_uint8x8_to_private(a); + + r = UINT8_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminv_u8 + #define vminv_u8(v) simde_vminv_u8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vminv_u16(simde_uint16x4_t a) { + uint16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminv_u16(a); + #else + simde_uint16x4_private a_ = simde_uint16x4_to_private(a); + + r = UINT16_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminv_u16 + #define vminv_u16(v) simde_vminv_u16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vminv_u32(simde_uint32x2_t a) { + uint32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminv_u32(a); + #else + simde_uint32x2_private a_ = simde_uint32x2_to_private(a); + + r = UINT32_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminv_u32 + #define vminv_u32(v) simde_vminv_u32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vminvq_f32(simde_float32x4_t a) { + simde_float32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminvq_f32(a); + #else + simde_float32x4_private a_ = simde_float32x4_to_private(a); + + r = SIMDE_MATH_INFINITYF; + #if defined(SIMDE_FAST_NANS) + SIMDE_VECTORIZE_REDUCTION(min:r) + #else + SIMDE_VECTORIZE + #endif + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + #if defined(SIMDE_FAST_NANS) + r = a_.values[i] < r ? a_.values[i] : r; + #else + r = (a_.values[i] < r) ? a_.values[i] : ((a_.values[i] >= r) ? r : ((a_.values[i] == a_.values[i]) ? r : a_.values[i])); + #endif + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminvq_f32 + #define vminvq_f32(v) simde_vminvq_f32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vminvq_f64(simde_float64x2_t a) { + simde_float64_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminvq_f64(a); + #else + simde_float64x2_private a_ = simde_float64x2_to_private(a); + + r = SIMDE_MATH_INFINITY; + #if defined(SIMDE_FAST_NANS) + SIMDE_VECTORIZE_REDUCTION(min:r) + #else + SIMDE_VECTORIZE + #endif + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + #if defined(SIMDE_FAST_NANS) + r = a_.values[i] < r ? a_.values[i] : r; + #else + r = (a_.values[i] < r) ? a_.values[i] : ((a_.values[i] >= r) ? r : ((a_.values[i] == a_.values[i]) ? r : a_.values[i])); + #endif + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminvq_f64 + #define vminvq_f64(v) simde_vminvq_f64(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vminvq_s8(simde_int8x16_t a) { + int8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminvq_s8(a); + #else + simde_int8x16_private a_ = simde_int8x16_to_private(a); + + r = INT8_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminvq_s8 + #define vminvq_s8(v) simde_vminvq_s8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vminvq_s16(simde_int16x8_t a) { + int16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminvq_s16(a); + #else + simde_int16x8_private a_ = simde_int16x8_to_private(a); + + r = INT16_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminvq_s16 + #define vminvq_s16(v) simde_vminvq_s16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vminvq_s32(simde_int32x4_t a) { + int32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminvq_s32(a); + #else + simde_int32x4_private a_ = simde_int32x4_to_private(a); + + r = INT32_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminvq_s32 + #define vminvq_s32(v) simde_vminvq_s32(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_vminvq_u8(simde_uint8x16_t a) { + uint8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminvq_u8(a); + #else + simde_uint8x16_private a_ = simde_uint8x16_to_private(a); + + r = UINT8_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminvq_u8 + #define vminvq_u8(v) simde_vminvq_u8(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vminvq_u16(simde_uint16x8_t a) { + uint16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminvq_u16(a); + #else + simde_uint16x8_private a_ = simde_uint16x8_to_private(a); + + r = UINT16_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminvq_u16 + #define vminvq_u16(v) simde_vminvq_u16(v) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vminvq_u32(simde_uint32x4_t a) { + uint32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vminvq_u32(a); + #else + simde_uint32x4_private a_ = simde_uint32x4_to_private(a); + + r = UINT32_MAX; + SIMDE_VECTORIZE_REDUCTION(min:r) + for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) { + r = a_.values[i] < r ? a_.values[i] : r; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vminvq_u32 + #define vminvq_u32(v) simde_vminvq_u32(v) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MINV_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/mlal_n.h b/lib/mmseqs/lib/simde/simde/arm/neon/mlal_n.h new file mode 100644 index 0000000..6025492 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/mlal_n.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MLAL_N_H) +#define SIMDE_ARM_NEON_MLAL_N_H + +#include "movl.h" +#include "dup_n.h" +#include "mla.h" +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmlal_n_s16(simde_int32x4_t a, simde_int16x4_t b, int16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlal_n_s16(a, b, c); + #else + return simde_vmlaq_s32(a, simde_vmovl_s16(b), simde_vdupq_n_s32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlal_n_s16 + #define vmlal_n_s16(a, b, c) simde_vmlal_n_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vmlal_n_s32(simde_int64x2_t a, simde_int32x2_t b, int32_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlal_n_s32(a, b, c); + #else + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a), + b_ = simde_int64x2_to_private(simde_vmovl_s32(b)), + c_ = simde_int64x2_to_private(simde_vdupq_n_s64(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = (b_.values * c_.values) + a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; + } + #endif + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlal_n_s32 + #define vmlal_n_s32(a, b, c) simde_vmlal_n_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmlal_n_u16(simde_uint32x4_t a, simde_uint16x4_t b, uint16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlal_n_u16(a, b, c); + #else + return simde_vmlaq_u32(a, simde_vmovl_u16(b), simde_vdupq_n_u32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlal_n_u16 + #define vmlal_n_u16(a, b, c) simde_vmlal_n_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vmlal_n_u32(simde_uint64x2_t a, simde_uint32x2_t b, uint32_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlal_n_u32(a, b, c); + #else + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a), + b_ = simde_uint64x2_to_private(simde_vmovl_u32(b)), + c_ = simde_uint64x2_to_private(simde_vdupq_n_u64(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = (b_.values * c_.values) + a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; + } + #endif + + return simde_uint64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlal_n_u32 + #define vmlal_n_u32(a, b, c) simde_vmlal_n_u32((a), (b), (c)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MLAL_N_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/mls.h b/lib/mmseqs/lib/simde/simde/arm/neon/mls.h new file mode 100644 index 0000000..b10749d --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/mls.h @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MLS_H) +#define SIMDE_ARM_NEON_MLS_H + +#include "mul.h" +#include "sub.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vmls_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32x2_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_f32(a, b, c); + #else + return simde_vsub_f32(a, simde_vmul_f32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_f32 + #define vmls_f32(a, b, c) simde_vmls_f32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vmls_f64(simde_float64x1_t a, simde_float64x1_t b, simde_float64x1_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmls_f64(a, b, c); + #else + return simde_vsub_f64(a, simde_vmul_f64(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_f64 + #define vmls_f64(a, b, c) simde_vmls_f64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vmls_s8(simde_int8x8_t a, simde_int8x8_t b, simde_int8x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_s8(a, b, c); + #else + return simde_vsub_s8(a, simde_vmul_s8(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_s8 + #define vmls_s8(a, b, c) simde_vmls_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vmls_s16(simde_int16x4_t a, simde_int16x4_t b, simde_int16x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_s16(a, b, c); + #else + return simde_vsub_s16(a, simde_vmul_s16(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_s16 + #define vmls_s16(a, b, c) simde_vmls_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vmls_s32(simde_int32x2_t a, simde_int32x2_t b, simde_int32x2_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_s32(a, b, c); + #else + return simde_vsub_s32(a, simde_vmul_s32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_s32 + #define vmls_s32(a, b, c) simde_vmls_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vmls_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_u8(a, b, c); + #else + return simde_vsub_u8(a, simde_vmul_u8(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_u8 + #define vmls_u8(a, b, c) simde_vmls_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vmls_u16(simde_uint16x4_t a, simde_uint16x4_t b, simde_uint16x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_u16(a, b, c); + #else + return simde_vsub_u16(a, simde_vmul_u16(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_u16 + #define vmls_u16(a, b, c) simde_vmls_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vmls_u32(simde_uint32x2_t a, simde_uint32x2_t b, simde_uint32x2_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_u32(a, b, c); + #else + return simde_vsub_u32(a, simde_vmul_u32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_u32 + #define vmls_u32(a, b, c) simde_vmls_u32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vmlsq_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_f32(a, b, c); + #else + return simde_vsubq_f32(a, simde_vmulq_f32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_f32 + #define vmlsq_f32(a, b, c) simde_vmlsq_f32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vmlsq_f64(simde_float64x2_t a, simde_float64x2_t b, simde_float64x2_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsq_f64(a, b, c); + #else + return simde_vsubq_f64(a, simde_vmulq_f64(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_f64 + #define vmlsq_f64(a, b, c) simde_vmlsq_f64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vmlsq_s8(simde_int8x16_t a, simde_int8x16_t b, simde_int8x16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_s8(a, b, c); + #else + return simde_vsubq_s8(a, simde_vmulq_s8(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_s8 + #define vmlsq_s8(a, b, c) simde_vmlsq_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vmlsq_s16(simde_int16x8_t a, simde_int16x8_t b, simde_int16x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_s16(a, b, c); + #else + return simde_vsubq_s16(a, simde_vmulq_s16(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_s16 + #define vmlsq_s16(a, b, c) simde_vmlsq_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmlsq_s32(simde_int32x4_t a, simde_int32x4_t b, simde_int32x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_s32(a, b, c); + #else + return simde_vsubq_s32(a, simde_vmulq_s32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_s32 + #define vmlsq_s32(a, b, c) simde_vmlsq_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vmlsq_u8(simde_uint8x16_t a, simde_uint8x16_t b, simde_uint8x16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_u8(a, b, c); + #else + return simde_vsubq_u8(a, simde_vmulq_u8(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_u8 + #define vmlsq_u8(a, b, c) simde_vmlsq_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vmlsq_u16(simde_uint16x8_t a, simde_uint16x8_t b, simde_uint16x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_u16(a, b, c); + #else + return simde_vsubq_u16(a, simde_vmulq_u16(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_u16 + #define vmlsq_u16(a, b, c) simde_vmlsq_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmlsq_u32(simde_uint32x4_t a, simde_uint32x4_t b, simde_uint32x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_u32(a, b, c); + #else + return simde_vsubq_u32(a, simde_vmulq_u32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_u32 + #define vmlsq_u32(a, b, c) simde_vmlsq_u32((a), (b), (c)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MLS_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/mlsl.h b/lib/mmseqs/lib/simde/simde/arm/neon/mlsl.h new file mode 100644 index 0000000..e79cea1 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/mlsl.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MLSL_H) +#define SIMDE_ARM_NEON_MLSL_H + +#include "mull.h" +#include "sub.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vmlsl_s8(simde_int16x8_t a, simde_int8x8_t b, simde_int8x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsl_s8(a, b, c); + #else + return simde_vsubq_s16(a, simde_vmull_s8(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_s8 + #define vmlsl_s8(a, b, c) simde_vmlsl_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmlsl_s16(simde_int32x4_t a, simde_int16x4_t b, simde_int16x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsl_s16(a, b, c); + #else + return simde_vsubq_s32(a, simde_vmull_s16(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_s16 + #define vmlsl_s16(a, b, c) simde_vmlsl_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vmlsl_s32(simde_int64x2_t a, simde_int32x2_t b, simde_int32x2_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsl_s32(a, b, c); + #else + return simde_vsubq_s64(a, simde_vmull_s32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_s32 + #define vmlsl_s32(a, b, c) simde_vmlsl_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vmlsl_u8(simde_uint16x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsl_u8(a, b, c); + #else + return simde_vsubq_u16(a, simde_vmull_u8(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_u8 + #define vmlsl_u8(a, b, c) simde_vmlsl_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmlsl_u16(simde_uint32x4_t a, simde_uint16x4_t b, simde_uint16x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsl_u16(a, b, c); + #else + return simde_vsubq_u32(a, simde_vmull_u16(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_u16 + #define vmlsl_u16(a, b, c) simde_vmlsl_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vmlsl_u32(simde_uint64x2_t a, simde_uint32x2_t b, simde_uint32x2_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsl_u32(a, b, c); + #else + return simde_vsubq_u64(a, simde_vmull_u32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_u32 + #define vmlsl_u32(a, b, c) simde_vmlsl_u32((a), (b), (c)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MLSL_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/mlsl_high.h b/lib/mmseqs/lib/simde/simde/arm/neon/mlsl_high.h new file mode 100644 index 0000000..d70ca93 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/mlsl_high.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MLSL_HIGH_H) +#define SIMDE_ARM_NEON_MLSL_HIGH_H + +#include "mull_high.h" +#include "sub.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vmlsl_high_s8(simde_int16x8_t a, simde_int8x16_t b, simde_int8x16_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsl_high_s8(a, b, c); + #else + return simde_vsubq_s16(a, simde_vmull_high_s8(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_high_s8 + #define vmlsl_high_s8(a, b, c) simde_vmlsl_high_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmlsl_high_s16(simde_int32x4_t a, simde_int16x8_t b, simde_int16x8_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsl_high_s16(a, b, c); + #else + return simde_vsubq_s32(a, simde_vmull_high_s16(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_high_s16 + #define vmlsl_high_s16(a, b, c) simde_vmlsl_high_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vmlsl_high_s32(simde_int64x2_t a, simde_int32x4_t b, simde_int32x4_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsl_high_s32(a, b, c); + #else + return simde_vsubq_s64(a, simde_vmull_high_s32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_high_s32 + #define vmlsl_high_s32(a, b, c) simde_vmlsl_high_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vmlsl_high_u8(simde_uint16x8_t a, simde_uint8x16_t b, simde_uint8x16_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsl_high_u8(a, b, c); + #else + return simde_vsubq_u16(a, simde_vmull_high_u8(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_high_u8 + #define vmlsl_high_u8(a, b, c) simde_vmlsl_high_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmlsl_high_u16(simde_uint32x4_t a, simde_uint16x8_t b, simde_uint16x8_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsl_high_u16(a, b, c); + #else + return simde_vsubq_u32(a, simde_vmull_high_u16(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_high_u16 + #define vmlsl_high_u16(a, b, c) simde_vmlsl_high_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vmlsl_high_u32(simde_uint64x2_t a, simde_uint32x4_t b, simde_uint32x4_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsl_high_u32(a, b, c); + #else + return simde_vsubq_u64(a, simde_vmull_high_u32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_high_u32 + #define vmlsl_high_u32(a, b, c) simde_vmlsl_high_u32((a), (b), (c)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MLSL_HIGH_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/mlsl_n.h b/lib/mmseqs/lib/simde/simde/arm/neon/mlsl_n.h new file mode 100644 index 0000000..68ee44b --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/mlsl_n.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MLSL_N_H) +#define SIMDE_ARM_NEON_MLSL_N_H + +#include "mull_n.h" +#include "sub.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmlsl_n_s16(simde_int32x4_t a, simde_int16x4_t b, int16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsl_n_s16(a, b, c); + #else + return simde_vsubq_s32(a, simde_vmull_n_s16(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_n_s16 + #define vmlsl_n_s16(a, b, c) simde_vmlsl_n_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vmlsl_n_s32(simde_int64x2_t a, simde_int32x2_t b, int32_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsl_n_s32(a, b, c); + #else + return simde_vsubq_s64(a, simde_vmull_n_s32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_n_s32 + #define vmlsl_n_s32(a, b, c) simde_vmlsl_n_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmlsl_n_u16(simde_uint32x4_t a, simde_uint16x4_t b, uint16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsl_n_u16(a, b, c); + #else + return simde_vsubq_u32(a, simde_vmull_n_u16(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_n_u16 + #define vmlsl_n_u16(a, b, c) simde_vmlsl_n_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vmlsl_n_u32(simde_uint64x2_t a, simde_uint32x2_t b, uint32_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsl_n_u32(a, b, c); + #else + return simde_vsubq_u64(a, simde_vmull_n_u32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_n_u32 + #define vmlsl_n_u32(a, b, c) simde_vmlsl_n_u32((a), (b), (c)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MLSL_N_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/movn_high.h b/lib/mmseqs/lib/simde/simde/arm/neon/movn_high.h new file mode 100644 index 0000000..8e41caf --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/movn_high.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MOVN_HIGH_H) +#define SIMDE_ARM_NEON_MOVN_HIGH_H + +#include "types.h" +#include "movn.h" +#include "combine.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vmovn_high_s16(simde_int8x8_t r, simde_int16x8_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmovn_high_s16(r, a); + #else + return simde_vcombine_s8(r, simde_vmovn_s16(a)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmovn_high_s16 + #define vmovn_high_s16(r, a) simde_vmovn_high_s16((r), (a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vmovn_high_s32(simde_int16x4_t r, simde_int32x4_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmovn_high_s32(r, a); + #else + return simde_vcombine_s16(r, simde_vmovn_s32(a)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmovn_high_s32 + #define vmovn_high_s32(r, a) simde_vmovn_high_s32((r), (a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmovn_high_s64(simde_int32x2_t r, simde_int64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmovn_high_s64(r, a); + #else + return simde_vcombine_s32(r, simde_vmovn_s64(a)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmovn_high_s64 + #define vmovn_high_s64(r, a) simde_vmovn_high_s64((r), (a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vmovn_high_u16(simde_uint8x8_t r, simde_uint16x8_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmovn_high_u16(r, a); + #else + return simde_vcombine_u8(r, simde_vmovn_u16(a)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmovn_high_u16 + #define vmovn_high_u16(r, a) simde_vmovn_high_u16((r), (a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vmovn_high_u32(simde_uint16x4_t r, simde_uint32x4_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmovn_high_u32(r, a); + #else + return simde_vcombine_u16(r, simde_vmovn_u32(a)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmovn_high_u32 + #define vmovn_high_u32(r, a) simde_vmovn_high_u32((r), (a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmovn_high_u64(simde_uint32x2_t r, simde_uint64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmovn_high_u64(r, a); + #else + return simde_vcombine_u32(r, simde_vmovn_u64(a)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmovn_high_u64 + #define vmovn_high_u64(r, a) simde_vmovn_high_u64((r), (a)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MOVN_HIGH_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/mul.h b/lib/mmseqs/lib/simde/simde/arm/neon/mul.h index 5e07e66..a5701b2 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/mul.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/mul.h @@ -176,6 +176,26 @@ simde_vmul_s32(simde_int32x2_t a, simde_int32x2_t b) { #define vmul_s32(a, b) simde_vmul_s32((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_x_vmul_s64(simde_int64x1_t a, simde_int64x1_t b) { + simde_int64x1_private + r_, + a_ = simde_int64x1_to_private(a), + b_ = simde_int64x1_to_private(b); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values * b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[i]; + } + #endif + + return simde_int64x1_from_private(r_); +} + SIMDE_FUNCTION_ATTRIBUTES simde_uint8x8_t simde_vmul_u8(simde_uint8x8_t a, simde_uint8x8_t b) { @@ -260,6 +280,26 @@ simde_vmul_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #define vmul_u32(a, b) simde_vmul_u32((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1_t +simde_x_vmul_u64(simde_uint64x1_t a, simde_uint64x1_t b) { + simde_uint64x1_private + r_, + a_ = simde_uint64x1_to_private(a), + b_ = simde_uint64x1_to_private(b); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values * b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[i]; + } + #endif + + return simde_uint64x1_from_private(r_); +} + SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t simde_vmulq_f32(simde_float32x4_t a, simde_float32x4_t b) { @@ -412,6 +452,29 @@ simde_vmulq_s32(simde_int32x4_t a, simde_int32x4_t b) { #define vmulq_s32(a, b) simde_vmulq_s32((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_x_vmulq_s64(simde_int64x2_t a, simde_int64x2_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_mul(a, b); + #else + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a), + b_ = simde_int64x2_to_private(b); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values * b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[i]; + } + #endif + + return simde_int64x2_from_private(r_); + #endif +} SIMDE_FUNCTION_ATTRIBUTES simde_uint8x16_t @@ -501,6 +564,30 @@ simde_vmulq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #define vmulq_u32(a, b) simde_vmulq_u32((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_x_vmulq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_mul(a, b); + #else + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a), + b_ = simde_uint64x2_to_private(b); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values * b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[i]; + } + #endif + + return simde_uint64x2_from_private(r_); + #endif +} + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/mul_lane.h b/lib/mmseqs/lib/simde/simde/arm/neon/mul_lane.h new file mode 100644 index 0000000..1691988 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/mul_lane.h @@ -0,0 +1,472 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MUL_LANE_H) +#define SIMDE_ARM_NEON_MUL_LANE_H + +#include "types.h" +#include "mul.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vmul_lane_f32(simde_float32x2_t a, simde_float32x2_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float32x2_private + r_, + a_ = simde_float32x2_to_private(a), + b_ = simde_float32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_float32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmul_lane_f32(a, b, lane) vmul_lane_f32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmul_lane_f32 + #define vmul_lane_f32(a, b, lane) simde_vmul_lane_f32((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vmul_lane_f64(simde_float64x1_t a, simde_float64x1_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + simde_float64x1_private + r_, + a_ = simde_float64x1_to_private(a), + b_ = simde_float64x1_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_float64x1_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmul_lane_f64(a, b, lane) vmul_lane_f64((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmul_lane_f64 + #define vmul_lane_f64(a, b, lane) simde_vmul_lane_f64((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vmul_lane_s16(simde_int16x4_t a, simde_int16x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int16x4_private + r_, + a_ = simde_int16x4_to_private(a), + b_ = simde_int16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_int16x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmul_lane_s16(a, b, lane) vmul_lane_s16((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmul_lane_s16 + #define vmul_lane_s16(a, b, lane) simde_vmul_lane_s16((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vmul_lane_s32(simde_int32x2_t a, simde_int32x2_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_int32x2_private + r_, + a_ = simde_int32x2_to_private(a), + b_ = simde_int32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_int32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmul_lane_s32(a, b, lane) vmul_lane_s32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmul_lane_s32 + #define vmul_lane_s32(a, b, lane) simde_vmul_lane_s32((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vmul_lane_u16(simde_uint16x4_t a, simde_uint16x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint16x4_private + r_, + a_ = simde_uint16x4_to_private(a), + b_ = simde_uint16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_uint16x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmul_lane_u16(a, b, lane) vmul_lane_u16((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmul_lane_u16 + #define vmul_lane_u16(a, b, lane) simde_vmul_lane_u16((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vmul_lane_u32(simde_uint32x2_t a, simde_uint32x2_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_uint32x2_private + r_, + a_ = simde_uint32x2_to_private(a), + b_ = simde_uint32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_uint32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmul_lane_u32(a, b, lane) vmul_lane_u32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmul_lane_u32 + #define vmul_lane_u32(a, b, lane) simde_vmul_lane_u32((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vmulq_lane_f32(simde_float32x4_t a, simde_float32x2_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a); + simde_float32x2_private b_ = simde_float32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_float32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmulq_lane_f32(a, b, lane) vmulq_lane_f32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmulq_lane_f32 + #define vmulq_lane_f32(a, b, lane) simde_vmulq_lane_f32((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vmulq_lane_f64(simde_float64x2_t a, simde_float64x1_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a); + simde_float64x1_private b_ = simde_float64x1_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_float64x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmulq_lane_f64(a, b, lane) vmulq_lane_f64((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmulq_lane_f64 + #define vmulq_lane_f64(a, b, lane) simde_vmulq_lane_f64((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vmulq_lane_s16(simde_int16x8_t a, simde_int16x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a); + simde_int16x4_private b_ = simde_int16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_int16x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmulq_lane_s16(a, b, lane) vmulq_lane_s16((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmulq_lane_s16 + #define vmulq_lane_s16(a, b, lane) simde_vmulq_lane_s16((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmulq_lane_s32(simde_int32x4_t a, simde_int32x2_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a); + simde_int32x2_private b_ = simde_int32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_int32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmulq_lane_s32(a, b, lane) vmulq_lane_s32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmulq_lane_s32 + #define vmulq_lane_s32(a, b, lane) simde_vmulq_lane_s32((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vmulq_lane_u16(simde_uint16x8_t a, simde_uint16x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint16x8_private + r_, + a_ = simde_uint16x8_to_private(a); + simde_uint16x4_private b_ = simde_uint16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_uint16x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmulq_lane_u16(a, b, lane) vmulq_lane_u16((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmulq_lane_u16 + #define vmulq_lane_u16(a, b, lane) simde_vmulq_lane_u16((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmulq_lane_u32(simde_uint32x4_t a, simde_uint32x2_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a); + simde_uint32x2_private b_ = simde_uint32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_uint32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmulq_lane_u32(a, b, lane) vmulq_lane_u32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmulq_lane_u32 + #define vmulq_lane_u32(a, b, lane) simde_vmulq_lane_u32((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vmulq_laneq_f32(simde_float32x4_t a, simde_float32x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_float32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmulq_laneq_f32(a, b, lane) vmulq_laneq_f32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmulq_laneq_f32 + #define vmulq_laneq_f32(a, b, lane) simde_vmulq_laneq_f32((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vmulq_laneq_f64(simde_float64x2_t a, simde_float64x2_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_float64x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmulq_laneq_f64(a, b, lane) vmulq_laneq_f64((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmulq_laneq_f64 + #define vmulq_laneq_f64(a, b, lane) simde_vmulq_laneq_f64((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vmulq_laneq_s16(simde_int16x8_t a, simde_int16x8_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a), + b_ = simde_int16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_int16x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmulq_laneq_s16(a, b, lane) vmulq_laneq_s16((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmulq_laneq_s16 + #define vmulq_laneq_s16(a, b, lane) simde_vmulq_laneq_s16((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmulq_laneq_s32(simde_int32x4_t a, simde_int32x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a), + b_ = simde_int32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_int32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmulq_laneq_s32(a, b, lane) vmulq_laneq_s32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmulq_laneq_s32 + #define vmulq_laneq_s32(a, b, lane) simde_vmulq_laneq_s32((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vmulq_laneq_u16(simde_uint16x8_t a, simde_uint16x8_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_uint16x8_private + r_, + a_ = simde_uint16x8_to_private(a), + b_ = simde_uint16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_uint16x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmulq_laneq_u16(a, b, lane) vmulq_laneq_u16((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmulq_laneq_u16 + #define vmulq_laneq_u16(a, b, lane) simde_vmulq_laneq_u16((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmulq_laneq_u32(simde_uint32x4_t a, simde_uint32x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a), + b_ = simde_uint32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_uint32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmulq_laneq_u32(a, b, lane) vmulq_laneq_u32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmulq_laneq_u32 + #define vmulq_laneq_u32(a, b, lane) simde_vmulq_laneq_u32((a), (b), (lane)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MUL_LANE_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/mull_high.h b/lib/mmseqs/lib/simde/simde/arm/neon/mull_high.h new file mode 100644 index 0000000..658d151 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/mull_high.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MULL_HIGH_H) +#define SIMDE_ARM_NEON_MULL_HIGH_H + +#include "types.h" +#include "mul.h" +#include "movl_high.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vmull_high_s8(simde_int8x16_t a, simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmull_high_s8(a, b); + #else + return simde_vmulq_s16(simde_vmovl_high_s8(a), simde_vmovl_high_s8(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmull_high_s8 + #define vmull_high_s8(a, b) simde_vmull_high_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmull_high_s16(simde_int16x8_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmull_high_s16(a, b); + #else + return simde_vmulq_s32(simde_vmovl_high_s16(a), simde_vmovl_high_s16(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmull_high_s16 + #define vmull_high_s16(a, b) simde_vmull_high_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vmull_high_s32(simde_int32x4_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmull_high_s32(a, b); + #else + return simde_x_vmulq_s64(simde_vmovl_high_s32(a), simde_vmovl_high_s32(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmull_high_s32 + #define vmull_high_s32(a, b) simde_vmull_high_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vmull_high_u8(simde_uint8x16_t a, simde_uint8x16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmull_high_u8(a, b); + #else + return simde_vmulq_u16(simde_vmovl_high_u8(a), simde_vmovl_high_u8(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmull_high_u8 + #define vmull_high_u8(a, b) simde_vmull_high_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmull_high_u16(simde_uint16x8_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmull_high_u16(a, b); + #else + return simde_vmulq_u32(simde_vmovl_high_u16(a), simde_vmovl_high_u16(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmull_high_u16 + #define vmull_high_u16(a, b) simde_vmull_high_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vmull_high_u32(simde_uint32x4_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmull_high_u32(a, b); + #else + return simde_x_vmulq_u64(simde_vmovl_high_u32(a), simde_vmovl_high_u32(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmull_high_u32 + #define vmull_high_u32(a, b) simde_vmull_high_u32((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MULL_HIGH_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/mvn.h b/lib/mmseqs/lib/simde/simde/arm/neon/mvn.h index be6b701..0372119 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/mvn.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/mvn.h @@ -41,6 +41,8 @@ simde_int8x16_t simde_vmvnq_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_s8(a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, a, a, 0x55); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_andnot_si128(a, _mm_cmpeq_epi8(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) @@ -74,6 +76,8 @@ simde_int16x8_t simde_vmvnq_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_s16(a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, a, a, 0x55); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_andnot_si128(a, _mm_cmpeq_epi16(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) @@ -107,6 +111,8 @@ simde_int32x4_t simde_vmvnq_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_s32(a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, a, a, 0x55); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_andnot_si128(a, _mm_cmpeq_epi32(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) @@ -140,6 +146,8 @@ simde_uint8x16_t simde_vmvnq_u8(simde_uint8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_u8(a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, a, a, 0x55); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_andnot_si128(a, _mm_cmpeq_epi8(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) @@ -173,6 +181,8 @@ simde_uint16x8_t simde_vmvnq_u16(simde_uint16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_u16(a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, a, a, 0x55); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_andnot_si128(a, _mm_cmpeq_epi16(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) @@ -206,6 +216,8 @@ simde_uint32x4_t simde_vmvnq_u32(simde_uint32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_u32(a); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, a, a, 0x55); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_andnot_si128(a, _mm_cmpeq_epi32(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/orn.h b/lib/mmseqs/lib/simde/simde/arm/neon/orn.h new file mode 100644 index 0000000..fe52b2f --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/orn.h @@ -0,0 +1,505 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_ORN_H) +#define SIMDE_ARM_NEON_ORN_H + +#include "orr.h" +#include "mvn.h" +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vorn_s8(simde_int8x8_t a, simde_int8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vorn_s8(a, b); + #else + simde_int8x8_private + a_ = simde_int8x8_to_private(a), + b_ = simde_int8x8_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_int8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vorn_s8 + #define vorn_s8(a, b) simde_vorn_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vorn_s16(simde_int16x4_t a, simde_int16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vorn_s16(a, b); + #else + simde_int16x4_private + a_ = simde_int16x4_to_private(a), + b_ = simde_int16x4_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_int16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vorn_s16 + #define vorn_s16(a, b) simde_vorn_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vorn_s32(simde_int32x2_t a, simde_int32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vorn_s32(a, b); + #else + simde_int32x2_private + a_ = simde_int32x2_to_private(a), + b_ = simde_int32x2_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_int32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vorn_s32 + #define vorn_s32(a, b) simde_vorn_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_vorn_s64(simde_int64x1_t a, simde_int64x1_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vorn_s64(a, b); + #else + simde_int64x1_private + a_ = simde_int64x1_to_private(a), + b_ = simde_int64x1_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_int64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vorn_s64 + #define vorn_s64(a, b) simde_vorn_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vorn_u8(simde_uint8x8_t a, simde_uint8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vorn_u8(a, b); + #else + simde_uint8x8_private + a_ = simde_uint8x8_to_private(a), + b_ = simde_uint8x8_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_uint8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vorn_u8 + #define vorn_u8(a, b) simde_vorn_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vorn_u16(simde_uint16x4_t a, simde_uint16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vorn_u16(a, b); + #else + simde_uint16x4_private + a_ = simde_uint16x4_to_private(a), + b_ = simde_uint16x4_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vorn_u16 + #define vorn_u16(a, b) simde_vorn_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vorn_u32(simde_uint32x2_t a, simde_uint32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vorn_u32(a, b); + #else + simde_uint32x2_private + a_ = simde_uint32x2_to_private(a), + b_ = simde_uint32x2_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_uint32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vorn_u32 + #define vorn_u32(a, b) simde_vorn_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1_t +simde_vorn_u64(simde_uint64x1_t a, simde_uint64x1_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vorn_u64(a, b); + #else + simde_uint64x1_private + a_ = simde_uint64x1_to_private(a), + b_ = simde_uint64x1_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_uint64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vorn_u64 + #define vorn_u64(a, b) simde_vorn_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vornq_s8(simde_int8x16_t a, simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vornq_s8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_orc(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, a, 0xf3); + #else + simde_int8x16_private + a_ = simde_int8x16_to_private(a), + b_ = simde_int8x16_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vornq_s8 + #define vornq_s8(a, b) simde_vornq_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vornq_s16(simde_int16x8_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vornq_s16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_orc(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, a, 0xf3); + #else + simde_int16x8_private + a_ = simde_int16x8_to_private(a), + b_ = simde_int16x8_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vornq_s16 + #define vornq_s16(a, b) simde_vornq_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vornq_s32(simde_int32x4_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vornq_s32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_orc(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, a, 0xf3); + #else + simde_int32x4_private + a_ = simde_int32x4_to_private(a), + b_ = simde_int32x4_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vornq_s32 + #define vornq_s32(a, b) simde_vornq_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vornq_s64(simde_int64x2_t a, simde_int64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vornq_s64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_orc(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi64(a, b, a, 0xf3); + #else + simde_int64x2_private + a_ = simde_int64x2_to_private(a), + b_ = simde_int64x2_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vornq_s64 + #define vornq_s64(a, b) simde_vornq_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vornq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vornq_u8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_orc(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, a, 0xf3); + #else + simde_uint8x16_private + a_ = simde_uint8x16_to_private(a), + b_ = simde_uint8x16_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_uint8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vornq_u8 + #define vornq_u8(a, b) simde_vornq_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vornq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vornq_u16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_orc(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, a, 0xf3); + #else + simde_uint16x8_private + a_ = simde_uint16x8_to_private(a), + b_ = simde_uint16x8_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vornq_u16 + #define vornq_u16(a, b) simde_vornq_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vornq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vornq_u32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_orc(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, b, a, 0xf3); + #else + simde_uint32x4_private + a_ = simde_uint32x4_to_private(a), + b_ = simde_uint32x4_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vornq_u32 + #define vornq_u32(a, b) simde_vornq_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vornq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vornq_u64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_orc(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi64(a, b, a, 0xf3); + #else + simde_uint64x2_private + a_ = simde_uint64x2_to_private(a), + b_ = simde_uint64x2_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values | ~(b_.values); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] | ~b_.values[i]; + } + #endif + + return simde_uint64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vornq_u64 + #define vornq_u64(a, b) simde_vornq_u64((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ORN_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/paddl.h b/lib/mmseqs/lib/simde/simde/arm/neon/paddl.h index 28fc921..53c593d 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/paddl.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/paddl.h @@ -34,6 +34,9 @@ #include "movl.h" #include "movl_high.h" #include "padd.h" +#include "reinterpret.h" +#include "shl_n.h" +#include "shr_n.h" #include "types.h" HEDLEY_DIAGNOSTIC_PUSH @@ -136,9 +139,9 @@ simde_vpaddlq_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_s8(a); #else - simde_int16x8_t lo = simde_vmovl_s8(simde_vget_low_s8(a)); - simde_int16x8_t hi = simde_vmovl_s8(simde_vget_high_s8(a)); - return simde_vpaddq_s16(lo, hi); + simde_int16x8_t lo = simde_vshrq_n_s16(simde_vshlq_n_s16(simde_vreinterpretq_s16_s8(a), 8), 8); + simde_int16x8_t hi = simde_vshrq_n_s16(simde_vreinterpretq_s16_s8(a), 8); + return simde_vaddq_s16(lo, hi); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -152,9 +155,9 @@ simde_vpaddlq_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_s16(a); #else - simde_int32x4_t lo = simde_vmovl_s16(simde_vget_low_s16(a)); - simde_int32x4_t hi = simde_vmovl_s16(simde_vget_high_s16(a)); - return simde_vpaddq_s32(lo, hi); + simde_int32x4_t lo = simde_vshrq_n_s32(simde_vshlq_n_s32(simde_vreinterpretq_s32_s16(a), 16), 16); + simde_int32x4_t hi = simde_vshrq_n_s32(simde_vreinterpretq_s32_s16(a), 16); + return simde_vaddq_s32(lo, hi); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -167,10 +170,14 @@ simde_int64x2_t simde_vpaddlq_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_s32(a); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + __m128i lo = _mm_cvtepi32_epi64(_mm_shuffle_epi32(a, 0xe8)); + __m128i hi = _mm_cvtepi32_epi64(_mm_shuffle_epi32(a, 0xed)); + return _mm_add_epi64(lo, hi); #else - simde_int64x2_t lo = simde_vmovl_s32(simde_vget_low_s32(a)); - simde_int64x2_t hi = simde_vmovl_s32(simde_vget_high_s32(a)); - return simde_vpaddq_s64(lo, hi); + simde_int64x2_t lo = simde_vshrq_n_s64(simde_vshlq_n_s64(simde_vreinterpretq_s64_s32(a), 32), 32); + simde_int64x2_t hi = simde_vshrq_n_s64(simde_vreinterpretq_s64_s32(a), 32); + return simde_vaddq_s64(lo, hi); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -184,9 +191,9 @@ simde_vpaddlq_u8(simde_uint8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_u8(a); #else - simde_uint16x8_t lo = simde_vmovl_u8(simde_vget_low_u8(a)); - simde_uint16x8_t hi = simde_vmovl_u8(simde_vget_high_u8(a)); - return simde_vpaddq_u16(lo, hi); + simde_uint16x8_t lo = simde_vshrq_n_u16(simde_vshlq_n_u16(simde_vreinterpretq_u16_u8(a), 8), 8); + simde_uint16x8_t hi = simde_vshrq_n_u16(simde_vreinterpretq_u16_u8(a), 8); + return simde_vaddq_u16(lo, hi); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -200,9 +207,9 @@ simde_vpaddlq_u16(simde_uint16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_u16(a); #else - simde_uint32x4_t lo = simde_vmovl_u16(simde_vget_low_u16(a)); - simde_uint32x4_t hi = simde_vmovl_u16(simde_vget_high_u16(a)); - return simde_vpaddq_u32(lo, hi); + simde_uint32x4_t lo = simde_vshrq_n_u32(simde_vshlq_n_u32(simde_vreinterpretq_u32_u16(a), 16), 16); + simde_uint32x4_t hi = simde_vshrq_n_u32(simde_vreinterpretq_u32_u16(a), 16); + return simde_vaddq_u32(lo, hi); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -216,9 +223,9 @@ simde_vpaddlq_u32(simde_uint32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_u32(a); #else - simde_uint64x2_t lo = simde_vmovl_u32(simde_vget_low_u32(a)); - simde_uint64x2_t hi = simde_vmovl_u32(simde_vget_high_u32(a)); - return simde_vpaddq_u64(lo, hi); + simde_uint64x2_t lo = simde_vshrq_n_u64(simde_vshlq_n_u64(simde_vreinterpretq_u64_u32(a), 32), 32); + simde_uint64x2_t hi = simde_vshrq_n_u64(simde_vreinterpretq_u64_u32(a), 32); + return simde_vaddq_u64(lo, hi); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/qabs.h b/lib/mmseqs/lib/simde/simde/arm/neon/qabs.h new file mode 100644 index 0000000..bc05ea0 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/qabs.h @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_QABS_H) +#define SIMDE_ARM_NEON_QABS_H + +#include "types.h" + +#include "abs.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vqabsb_s8(int8_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqabsb_s8(a); + #else + return a == INT8_MIN ? INT8_MAX : (a < 0 ? -a : a); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqabsb_s8 + #define vqabsb_s8(a) simde_vqabsb_s8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vqabsh_s16(int16_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqabsh_s16(a); + #else + return a == INT16_MIN ? INT16_MAX : (a < 0 ? -a : a); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqabsh_s16 + #define vqabsh_s16(a) simde_vqabsh_s16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vqabss_s32(int32_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqabss_s32(a); + #else + return a == INT32_MIN ? INT32_MAX : (a < 0 ? -a : a); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqabss_s32 + #define vqabss_s32(a) simde_vqabss_s32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vqabsd_s64(int64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqabsd_s64(a); + #else + return a == INT64_MIN ? INT64_MAX : (a < 0 ? -a : a); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqabsd_s64 + #define vqabsd_s64(a) simde_vqabsd_s64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vqabs_s8(simde_int8x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqabs_s8(a); + #else + simde_int8x8_private + r_, + a_ = simde_int8x8_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqabsb_s8(a_.values[i]); + } + + return simde_int8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqabs_s8 + #define vqabs_s8(a) simde_vqabs_s8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vqabs_s16(simde_int16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqabs_s16(a); + #else + simde_int16x4_private + r_, + a_ = simde_int16x4_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqabsh_s16(a_.values[i]); + } + + return simde_int16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqabs_s16 + #define vqabs_s16(a) simde_vqabs_s16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vqabs_s32(simde_int32x2_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqabs_s32(a); + #else + simde_int32x2_private + r_, + a_ = simde_int32x2_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqabss_s32(a_.values[i]); + } + + return simde_int32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqabs_s32 + #define vqabs_s32(a) simde_vqabs_s32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_vqabs_s64(simde_int64x1_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqabs_s64(a); + #else + simde_int64x1_private + r_, + a_ = simde_int64x1_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqabsd_s64(a_.values[i]); + } + + return simde_int64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqabs_s64 + #define vqabs_s64(a) simde_vqabs_s64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vqabsq_s8(simde_int8x16_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqabsq_s8(a); + #else + simde_int8x16_private + r_, + a_ = simde_int8x16_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqabsb_s8(a_.values[i]); + } + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqabsq_s8 + #define vqabsq_s8(a) simde_vqabsq_s8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vqabsq_s16(simde_int16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqabsq_s16(a); + #else + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqabsh_s16(a_.values[i]); + } + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqabsq_s16 + #define vqabsq_s16(a) simde_vqabsq_s16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vqabsq_s32(simde_int32x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqabsq_s32(a); + #else + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqabss_s32(a_.values[i]); + } + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqabsq_s32 + #define vqabsq_s32(a) simde_vqabsq_s32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vqabsq_s64(simde_int64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqabsq_s64(a); + #else + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqabsd_s64(a_.values[i]); + } + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqabsq_s64 + #define vqabsq_s64(a) simde_vqabsq_s64(a) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QABS_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/qadd.h b/lib/mmseqs/lib/simde/simde/arm/neon/qadd.h index 0d5382c..89979df 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/qadd.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/qadd.h @@ -31,19 +31,97 @@ #include "types.h" #include "add.h" -#include "movl.h" -#include "qmovn.h" -#include "combine.h" -#include "get_low.h" -#include "get_high.h" #include "bsl.h" #include "cgt.h" +#include "dup_n.h" #include "sub.h" +#include + HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vqaddb_s8(int8_t a, int8_t b) { + return simde_math_adds_i8(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqaddb_s8 + #define vqaddb_s8(a, b) simde_vqaddb_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vqaddh_s16(int16_t a, int16_t b) { + return simde_math_adds_i16(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqaddh_s16 + #define vqaddh_s16(a, b) simde_vqaddh_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vqadds_s32(int32_t a, int32_t b) { + return simde_math_adds_i32(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqadds_s32 + #define vqadds_s32(a, b) simde_vqadds_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vqaddd_s64(int64_t a, int64_t b) { + return simde_math_adds_i64(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqaddd_s64 + #define vqaddd_s64(a, b) simde_vqaddd_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_vqaddb_u8(uint8_t a, uint8_t b) { + return simde_math_adds_u8(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqaddb_u8 + #define vqaddb_u8(a, b) simde_vqaddb_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vqaddh_u16(uint16_t a, uint16_t b) { + return simde_math_adds_u16(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqaddh_u16 + #define vqaddh_u16(a, b) simde_vqaddh_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vqadds_u32(uint32_t a, uint32_t b) { + return simde_math_adds_u32(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqadds_u32 + #define vqadds_u32(a, b) simde_vqadds_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vqaddd_u64(uint64_t a, uint64_t b) { + return simde_math_adds_u64(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqaddd_u64 + #define vqaddd_u64(a, b) simde_vqaddd_u64((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_int8x8_t simde_vqadd_s8(simde_int8x8_t a, simde_int8x8_t b) { @@ -51,8 +129,6 @@ simde_vqadd_s8(simde_int8x8_t a, simde_int8x8_t b) { return vqadd_s8(a, b); #elif defined(SIMDE_X86_MMX_NATIVE) return _mm_adds_pi8(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vqmovn_s16(simde_vaddq_s16(simde_vmovl_s8(a), simde_vmovl_s8(b))); #else simde_int8x8_private r_, @@ -61,7 +137,7 @@ simde_vqadd_s8(simde_int8x8_t a, simde_int8x8_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqmovnh_s16(HEDLEY_STATIC_CAST(int16_t, a_.values[i]) + HEDLEY_STATIC_CAST(int16_t, b_.values[i])); + r_.values[i] = simde_vqaddb_s8(a_.values[i], b_.values[i]); } return simde_int8x8_from_private(r_); @@ -79,8 +155,6 @@ simde_vqadd_s16(simde_int16x4_t a, simde_int16x4_t b) { return vqadd_s16(a, b); #elif defined(SIMDE_X86_MMX_NATIVE) return _mm_adds_pi16(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vqmovn_s32(simde_vaddq_s32(simde_vmovl_s16(a), simde_vmovl_s16(b))); #else simde_int16x4_private r_, @@ -89,7 +163,7 @@ simde_vqadd_s16(simde_int16x4_t a, simde_int16x4_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqmovns_s32(HEDLEY_STATIC_CAST(int32_t, a_.values[i]) + HEDLEY_STATIC_CAST(int32_t, b_.values[i])); + r_.values[i] = simde_vqaddh_s16(a_.values[i], b_.values[i]); } return simde_int16x4_from_private(r_); @@ -105,8 +179,6 @@ simde_int32x2_t simde_vqadd_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqadd_s32(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vqmovn_s64(simde_vaddq_s64(simde_vmovl_s32(a), simde_vmovl_s32(b))); #else simde_int32x2_private r_, @@ -115,7 +187,7 @@ simde_vqadd_s32(simde_int32x2_t a, simde_int32x2_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqmovnd_s64(HEDLEY_STATIC_CAST(int64_t, a_.values[i]) + HEDLEY_STATIC_CAST(int64_t, b_.values[i])); + r_.values[i] = simde_vqadds_s32(a_.values[i], b_.values[i]); } return simde_int32x2_from_private(r_); @@ -139,14 +211,9 @@ simde_vqadd_s64(simde_int64x1_t a, simde_int64x1_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - if (a_.values[i] > 0 && b_.values[i] > 0) { - r_.values[i] = (a_.values[i] > (INT64_MAX - b_.values[i])) ? INT64_MAX : a_.values[i] + b_.values[i]; - } else if (a_.values[i] < 0 && b_.values[i] < 0) { - r_.values[i] = (a_.values[i] < (INT64_MIN - b_.values[i])) ? INT64_MIN : a_.values[i] + b_.values[i]; - } else { - r_.values[i] = a_.values[i] + b_.values[i]; - } + r_.values[i] = simde_vqaddd_s64(a_.values[i], b_.values[i]); } + return simde_int64x1_from_private(r_); #endif } @@ -162,25 +229,16 @@ simde_vqadd_u8(simde_uint8x8_t a, simde_uint8x8_t b) { return vqadd_u8(a, b); #elif defined(SIMDE_X86_MMX_NATIVE) return _mm_adds_pu8(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - const simde_uint8x8_t max = simde_vdup_n_u8(UINT8_MAX); - return simde_vbsl_u8(simde_vcgt_u8(a, simde_vsub_u8(max, b)), max, simde_vadd_u8(a, b)); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SCALAR) - __typeof__(r_.values) m = HEDLEY_STATIC_CAST(__typeof__(r_.values), a_.values > (UINT8_MAX - b_.values)); - r_.values = m & UINT8_MAX; - r_.values |= ~m & (a_.values + b_.values); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > UINT8_MAX - b_.values[i]) ? UINT8_MAX : a_.values[i] + b_.values[i]; - } - #endif + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddb_u8(a_.values[i], b_.values[i]); + } return simde_uint8x8_from_private(r_); #endif @@ -197,25 +255,16 @@ simde_vqadd_u16(simde_uint16x4_t a, simde_uint16x4_t b) { return vqadd_u16(a, b); #elif defined(SIMDE_X86_MMX_NATIVE) return _mm_adds_pu16(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - const simde_uint16x4_t max = simde_vdup_n_u16(UINT16_MAX); - return simde_vbsl_u16(simde_vcgt_u16(a, simde_vsub_u16(max, b)), max, simde_vadd_u16(a, b)); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SCALAR) - __typeof__(r_.values) m = HEDLEY_STATIC_CAST(__typeof__(r_.values), a_.values > (UINT16_MAX - b_.values)); - r_.values = m & UINT16_MAX; - r_.values |= ~m & (a_.values + b_.values); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > UINT16_MAX - b_.values[i]) ? UINT16_MAX : a_.values[i] + b_.values[i]; - } - #endif + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddh_u16(a_.values[i], b_.values[i]); + } return simde_uint16x4_from_private(r_); #endif @@ -230,25 +279,16 @@ simde_uint32x2_t simde_vqadd_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqadd_u32(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - const simde_uint32x2_t max = simde_vdup_n_u32(UINT32_MAX); - return simde_vbsl_u32(simde_vcgt_u32(a, simde_vsub_u32(max, b)), max, simde_vadd_u32(a, b)); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SCALAR) - __typeof__(r_.values) m = HEDLEY_STATIC_CAST(__typeof__(r_.values), a_.values > (UINT32_MAX - b_.values)); - r_.values = m & UINT32_MAX; - r_.values |= ~m & (a_.values + b_.values); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > (UINT32_MAX - b_.values[i])) ? UINT32_MAX : a_.values[i] + b_.values[i]; - } - #endif + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqadds_u32(a_.values[i], b_.values[i]); + } return simde_uint32x2_from_private(r_); #endif @@ -263,25 +303,16 @@ simde_uint64x1_t simde_vqadd_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqadd_u64(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - const simde_uint64x1_t max = simde_vdup_n_u64(UINT64_MAX); - return simde_vbsl_u64(simde_vcgt_u64(a, simde_vsub_u64(max, b)), max, simde_vadd_u64(a, b)); #else simde_uint64x1_private r_, a_ = simde_uint64x1_to_private(a), b_ = simde_uint64x1_to_private(b); - #if defined(SIMDE_VECTOR_SCALAR) - __typeof__(r_.values) m = HEDLEY_STATIC_CAST(__typeof__(r_.values), a_.values > (UINT64_MAX - b_.values)); - r_.values = m & UINT64_MAX; - r_.values |= ~m & (a_.values + b_.values); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > UINT64_MAX - b_.values[i]) ? UINT64_MAX : a_.values[i] + b_.values[i]; - } - #endif + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddd_u64(a_.values[i], b_.values[i]); + } return simde_uint64x1_from_private(r_); #endif @@ -302,8 +333,6 @@ simde_vqaddq_s8(simde_int8x16_t a, simde_int8x16_t b) { return _mm_adds_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vcombine_s8(simde_vqadd_s8(simde_vget_low_s8(a), simde_vget_low_s8(b)), simde_vqadd_s8(simde_vget_high_s8(a), simde_vget_high_s8(b))); #else simde_int8x16_private r_, @@ -312,7 +341,7 @@ simde_vqaddq_s8(simde_int8x16_t a, simde_int8x16_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqmovnh_s16(HEDLEY_STATIC_CAST(int16_t, a_.values[i]) + HEDLEY_STATIC_CAST(int16_t, b_.values[i])); + r_.values[i] = simde_vqaddb_s8(a_.values[i], b_.values[i]); } return simde_int8x16_from_private(r_); @@ -334,8 +363,6 @@ simde_vqaddq_s16(simde_int16x8_t a, simde_int16x8_t b) { return _mm_adds_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vcombine_s16(simde_vqadd_s16(simde_vget_low_s16(a), simde_vget_low_s16(b)), simde_vqadd_s16(simde_vget_high_s16(a), simde_vget_high_s16(b))); #else simde_int16x8_private r_, @@ -344,7 +371,7 @@ simde_vqaddq_s16(simde_int16x8_t a, simde_int16x8_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqmovns_s32(HEDLEY_STATIC_CAST(int32_t, a_.values[i]) + HEDLEY_STATIC_CAST(int32_t, b_.values[i])); + r_.values[i] = simde_vqaddh_s16(a_.values[i], b_.values[i]); } return simde_int16x8_from_private(r_); @@ -364,8 +391,6 @@ simde_vqaddq_s32(simde_int32x4_t a, simde_int32x4_t b) { return _mm256_cvtsepi64_epi32(_mm256_add_epi64(_mm256_cvtepi32_epi64(a), _mm256_cvtepi32_epi64(b))); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vcombine_s32(simde_vqadd_s32(simde_vget_low_s32(a), simde_vget_low_s32(b)), simde_vqadd_s32(simde_vget_high_s32(a), simde_vget_high_s32(b))); #else simde_int32x4_private r_, @@ -374,7 +399,7 @@ simde_vqaddq_s32(simde_int32x4_t a, simde_int32x4_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqmovnd_s64(HEDLEY_STATIC_CAST(int64_t, a_.values[i]) + HEDLEY_STATIC_CAST(int64_t, b_.values[i])); + r_.values[i] = simde_vqadds_s32(a_.values[i], b_.values[i]); } return simde_int32x4_from_private(r_); @@ -398,14 +423,9 @@ simde_vqaddq_s64(simde_int64x2_t a, simde_int64x2_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - if (a_.values[i] > 0 && b_.values[i] > 0) { - r_.values[i] = (a_.values[i] > (INT64_MAX - b_.values[i])) ? INT64_MAX : a_.values[i] + b_.values[i]; - } else if (a_.values[i] < 0 && b_.values[i] < 0) { - r_.values[i] = (a_.values[i] < (INT64_MIN - b_.values[i])) ? INT64_MIN : a_.values[i] + b_.values[i]; - } else { - r_.values[i] = a_.values[i] + b_.values[i]; - } + r_.values[i] = simde_vqaddd_s64(a_.values[i], b_.values[i]); } + return simde_int64x2_from_private(r_); #endif } @@ -425,25 +445,16 @@ simde_vqaddq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { return _mm_adds_epu8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - const simde_uint8x16_t max = simde_vdupq_n_u8(UINT8_MAX); - return simde_vbslq_u8(simde_vcgtq_u8(a, simde_vsubq_u8(max, b)), max, simde_vaddq_u8(a, b)); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SCALAR) - __typeof__(r_.values) m = HEDLEY_STATIC_CAST(__typeof__(r_.values), a_.values > (UINT8_MAX - b_.values)); - r_.values = m & UINT8_MAX; - r_.values |= ~m & (a_.values + b_.values); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > UINT8_MAX - b_.values[i]) ? UINT8_MAX : a_.values[i] + b_.values[i]; - } - #endif + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddb_u8(a_.values[i], b_.values[i]); + } return simde_uint8x16_from_private(r_); #endif @@ -464,25 +475,16 @@ simde_vqaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { return _mm_adds_epu16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - const simde_uint16x8_t max = simde_vdupq_n_u16(UINT16_MAX); - return simde_vbslq_u16(simde_vcgtq_u16(a, simde_vsubq_u16(max, b)), max, simde_vaddq_u16(a, b)); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SCALAR) - __typeof__(r_.values) m = HEDLEY_STATIC_CAST(__typeof__(r_.values), a_.values > (UINT16_MAX - b_.values)); - r_.values = m & UINT16_MAX; - r_.values |= ~m & (a_.values + b_.values); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > UINT16_MAX - b_.values[i]) ? UINT16_MAX : a_.values[i] + b_.values[i]; - } - #endif + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddh_u16(a_.values[i], b_.values[i]); + } return simde_uint16x8_from_private(r_); #endif @@ -499,25 +501,16 @@ simde_vqaddq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { return vqaddq_u32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - const simde_uint32x4_t max = simde_vdupq_n_u32(UINT32_MAX); - return simde_vbslq_u32(simde_vcgtq_u32(a, simde_vsubq_u32(max, b)), max, simde_vaddq_u32(a, b)); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SCALAR) - __typeof__(r_.values) m = HEDLEY_STATIC_CAST(__typeof__(r_.values), a_.values > (UINT32_MAX - b_.values)); - r_.values = m & UINT32_MAX; - r_.values |= ~m & (a_.values + b_.values); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > UINT32_MAX - b_.values[i]) ? UINT32_MAX : a_.values[i] + b_.values[i]; - } - #endif + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqadds_u32(a_.values[i], b_.values[i]); + } return simde_uint32x4_from_private(r_); #endif @@ -541,16 +534,10 @@ simde_vqaddq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_VECTOR_SCALAR) - __typeof__(r_.values) m = HEDLEY_STATIC_CAST(__typeof__(r_.values), a_.values > (UINT64_MAX - b_.values)); - r_.values = m & UINT64_MAX; - r_.values |= ~m & (a_.values + b_.values); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > UINT64_MAX - b_.values[i]) ? UINT64_MAX : a_.values[i] + b_.values[i]; - } - #endif + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddd_u64(a_.values[i], b_.values[i]); + } return simde_uint64x2_from_private(r_); #endif diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/qneg.h b/lib/mmseqs/lib/simde/simde/arm/neon/qneg.h new file mode 100644 index 0000000..cd5e4ec --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/qneg.h @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_QNEG_H) +#define SIMDE_ARM_NEON_QNEG_H + +#include "types.h" + +#if !defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE) || 1 + #include "dup_n.h" + #include "max.h" + #include "neg.h" +#endif + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vqnegb_s8(int8_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqnegb_s8(a); + #else + return a == INT8_MIN ? INT8_MAX : -a; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqnegb_s8 + #define vqnegb_s8(a) simde_vqnegb_s8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vqnegh_s16(int16_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqnegh_s16(a); + #else + return a == INT16_MIN ? INT16_MAX : -a; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqnegh_s16 + #define vqnegh_s16(a) simde_vqnegh_s16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vqnegs_s32(int32_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqnegs_s32(a); + #else + return a == INT32_MIN ? INT32_MAX : -a; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqnegs_s32 + #define vqnegs_s32(a) simde_vqnegs_s32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vqnegd_s64(int64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqnegd_s64(a); + #else + return a == INT64_MIN ? INT64_MAX : -a; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqnegd_s64 + #define vqnegd_s64(a) simde_vqnegd_s64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vqneg_s8(simde_int8x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqneg_s8(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(64) + return simde_vneg_s8(simde_vmax_s8(a, simde_vdup_n_s8(INT8_MIN + 1))); + #else + simde_int8x8_private + r_, + a_ = simde_int8x8_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] == INT8_MIN) ? INT8_MAX : -(a_.values[i]); + } + + return simde_int8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqneg_s8 + #define vqneg_s8(a) simde_vqneg_s8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vqneg_s16(simde_int16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqneg_s16(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(64) + return simde_vneg_s16(simde_vmax_s16(a, simde_vdup_n_s16(INT16_MIN + 1))); + #else + simde_int16x4_private + r_, + a_ = simde_int16x4_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] == INT16_MIN) ? INT16_MAX : -(a_.values[i]); + } + + return simde_int16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqneg_s16 + #define vqneg_s16(a) simde_vqneg_s16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vqneg_s32(simde_int32x2_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqneg_s32(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(64) + return simde_vneg_s32(simde_vmax_s32(a, simde_vdup_n_s32(INT32_MIN + 1))); + #else + simde_int32x2_private + r_, + a_ = simde_int32x2_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] == INT32_MIN) ? INT32_MAX : -(a_.values[i]); + } + + return simde_int32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqneg_s32 + #define vqneg_s32(a) simde_vqneg_s32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_vqneg_s64(simde_int64x1_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqneg_s64(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vneg_s64(simde_x_vmax_s64(a, simde_vdup_n_s64(INT64_MIN + 1))); + #else + simde_int64x1_private + r_, + a_ = simde_int64x1_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] == INT64_MIN) ? INT64_MAX : -(a_.values[i]); + } + + return simde_int64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqneg_s64 + #define vqneg_s64(a) simde_vqneg_s64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vqnegq_s8(simde_int8x16_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqnegq_s8(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vnegq_s8(simde_vmaxq_s8(a, simde_vdupq_n_s8(INT8_MIN + 1))); + #else + simde_int8x16_private + r_, + a_ = simde_int8x16_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] == INT8_MIN) ? INT8_MAX : -(a_.values[i]); + } + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqnegq_s8 + #define vqnegq_s8(a) simde_vqnegq_s8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vqnegq_s16(simde_int16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqnegq_s16(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vnegq_s16(simde_vmaxq_s16(a, simde_vdupq_n_s16(INT16_MIN + 1))); + #else + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] == INT16_MIN) ? INT16_MAX : -(a_.values[i]); + } + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqnegq_s16 + #define vqnegq_s16(a) simde_vqnegq_s16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vqnegq_s32(simde_int32x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqnegq_s32(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vnegq_s32(simde_vmaxq_s32(a, simde_vdupq_n_s32(INT32_MIN + 1))); + #else + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] == INT32_MIN) ? INT32_MAX : -(a_.values[i]); + } + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqnegq_s32 + #define vqnegq_s32(a) simde_vqnegq_s32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vqnegq_s64(simde_int64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqnegq_s64(a); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vnegq_s64(simde_x_vmaxq_s64(a, simde_vdupq_n_s64(INT64_MIN + 1))); + #else + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] == INT64_MIN) ? INT64_MAX : -(a_.values[i]); + } + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqnegq_s64 + #define vqnegq_s64(a) simde_vqnegq_s64(a) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QNEG_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/qshl.h b/lib/mmseqs/lib/simde/simde/arm/neon/qshl.h new file mode 100644 index 0000000..279afe7 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/qshl.h @@ -0,0 +1,732 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2020 Christopher Moore + */ + +#if !defined(SIMDE_ARM_NEON_QSHL_H) +#define SIMDE_ARM_NEON_QSHL_H + +#include "types.h" +#include "cls.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vqshlb_s8(int8_t a, int8_t b) { + int8_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vqshlb_s8(a, b); + #else + if (b < -7) + b = -7; + + if (b <= 0) { + r = a >> -b; + } else if (b < 7) { + r = HEDLEY_STATIC_CAST(int8_t, a << b); + if ((r >> b) != a) { + r = (a < 0) ? INT8_MIN : INT8_MAX; + } + } else if (a == 0) { + r = 0; + } else { + r = (a < 0) ? INT8_MIN : INT8_MAX; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshlb_s8 + #define vqshlb_s8(a, b) simde_vqshlb_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vqshlh_s16(int16_t a, int16_t b) { + int16_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vqshlh_s16(a, b); + #else + int8_t b8 = HEDLEY_STATIC_CAST(int8_t, b); + + if (b8 < -15) + b8 = -15; + + if (b8 <= 0) { + r = a >> -b8; + } else if (b8 < 15) { + r = HEDLEY_STATIC_CAST(int16_t, a << b8); + if ((r >> b8) != a) { + r = (a < 0) ? INT16_MIN : INT16_MAX; + } + } else if (a == 0) { + r = 0; + } else { + r = (a < 0) ? INT16_MIN : INT16_MAX; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshlh_s16 + #define vqshlh_s16(a, b) simde_vqshlh_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vqshls_s32(int32_t a, int32_t b) { + int32_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vqshls_s32(a, b); + #else + int8_t b8 = HEDLEY_STATIC_CAST(int8_t, b); + + if (b8 < -31) + b8 = -31; + + if (b8 <= 0) { + r = a >> -b8; + } else if (b8 < 31) { + r = HEDLEY_STATIC_CAST(int32_t, a << b8); + if ((r >> b8) != a) { + r = (a < 0) ? INT32_MIN : INT32_MAX; + } + } else if (a == 0) { + r = 0; + } else { + r = (a < 0) ? INT32_MIN : INT32_MAX; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshls_s32 + #define vqshls_s32(a, b) simde_vqshls_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vqshld_s64(int64_t a, int64_t b) { + int64_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vqshld_s64(a, b); + #else + int8_t b8 = HEDLEY_STATIC_CAST(int8_t, b); + + if (b8 < -63) + b8 = -63; + + if (b8 <= 0) { + r = a >> -b8; + } else if (b8 < 63) { + r = HEDLEY_STATIC_CAST(int64_t, a << b8); + if ((r >> b8) != a) { + r = (a < 0) ? INT64_MIN : INT64_MAX; + } + } else if (a == 0) { + r = 0; + } else { + r = (a < 0) ? INT64_MIN : INT64_MAX; + } + #endif + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshld_s64 + #define vqshld_s64(a, b) simde_vqshld_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_vqshlb_u8(uint8_t a, int8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(HEDLEY_GCC_VERSION) && !HEDLEY_GCC_VERSION_CHECK(11,0,0) + return vqshlb_u8(a, HEDLEY_STATIC_CAST(uint8_t, b)); + #elif HEDLEY_HAS_WARNING("-Wsign-conversion") + /* https://github.com/llvm/llvm-project/commit/f0a78bdfdc6d56b25e0081884580b3960a3c2429 */ + HEDLEY_DIAGNOSTIC_PUSH + #pragma clang diagnostic ignored "-Wsign-conversion" + return vqshlb_u8(a, b); + HEDLEY_DIAGNOSTIC_POP + #else + return vqshlb_u8(a, b); + #endif + #else + uint8_t r; + + if (b < -7) + b = -7; + + if (b <= 0) { + r = a >> -b; + } else if (b < 7) { + r = HEDLEY_STATIC_CAST(uint8_t, a << b); + if ((r >> b) != a) { + r = UINT8_MAX; + } + } else if (a == 0) { + r = 0; + } else { + r = UINT8_MAX; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshlb_u8 + #define vqshlb_u8(a, b) simde_vqshlb_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vqshlh_u16(uint16_t a, int16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(HEDLEY_GCC_VERSION) && !HEDLEY_GCC_VERSION_CHECK(11,0,0) + return vqshlh_u16(a, HEDLEY_STATIC_CAST(uint16_t, b)); + #elif HEDLEY_HAS_WARNING("-Wsign-conversion") + HEDLEY_DIAGNOSTIC_PUSH + #pragma clang diagnostic ignored "-Wsign-conversion" + return vqshlh_u16(a, b); + HEDLEY_DIAGNOSTIC_POP + #else + return vqshlh_u16(a, b); + #endif + #else + uint16_t r; + + if (b < -15) + b = -15; + + if (b <= 0) { + r = a >> -b; + } else if (b < 15) { + r = HEDLEY_STATIC_CAST(uint16_t, a << b); + if ((r >> b) != a) { + r = UINT16_MAX; + } + } else if (a == 0) { + r = 0; + } else { + r = UINT16_MAX; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshlh_u16 + #define vqshlh_u16(a, b) simde_vqshlh_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vqshls_u32(uint32_t a, int32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(HEDLEY_GCC_VERSION) && !HEDLEY_GCC_VERSION_CHECK(11,0,0) + return vqshls_u32(a, HEDLEY_STATIC_CAST(uint16_t, b)); + #elif HEDLEY_HAS_WARNING("-Wsign-conversion") + HEDLEY_DIAGNOSTIC_PUSH + #pragma clang diagnostic ignored "-Wsign-conversion" + return vqshls_u32(a, b); + HEDLEY_DIAGNOSTIC_POP + #else + return vqshls_u32(a, b); + #endif + #else + uint32_t r; + + if (b < -31) + b = -31; + + if (b <= 0) { + r = HEDLEY_STATIC_CAST(uint32_t, a >> -b); + } else if (b < 31) { + r = a << b; + if ((r >> b) != a) { + r = UINT32_MAX; + } + } else if (a == 0) { + r = 0; + } else { + r = UINT32_MAX; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshls_u32 + #define vqshls_u32(a, b) simde_vqshls_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vqshld_u64(uint64_t a, int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(HEDLEY_GCC_VERSION) && !HEDLEY_GCC_VERSION_CHECK(11,0,0) + return vqshld_u64(a, HEDLEY_STATIC_CAST(uint16_t, b)); + #elif HEDLEY_HAS_WARNING("-Wsign-conversion") + HEDLEY_DIAGNOSTIC_PUSH + #pragma clang diagnostic ignored "-Wsign-conversion" + return vqshld_u64(a, b); + HEDLEY_DIAGNOSTIC_POP + #else + return vqshld_u64(a, b); + #endif + #else + uint64_t r; + + if (b < -63) + b = -63; + + if (b <= 0) { + r = a >> -b; + } else if (b < 63) { + r = HEDLEY_STATIC_CAST(uint64_t, a << b); + if ((r >> b) != a) { + r = UINT64_MAX; + } + } else if (a == 0) { + r = 0; + } else { + r = UINT64_MAX; + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshldb_u64 + #define vqshld_u64(a, b) simde_vqshld_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vqshl_s8 (const simde_int8x8_t a, const simde_int8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshl_s8(a, b); + #else + simde_int8x8_private + r_, + a_ = simde_int8x8_to_private(a), + b_ = simde_int8x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshlb_s8(a_.values[i], b_.values[i]); + } + + return simde_int8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshl_s8 + #define vqshl_s8(a, b) simde_vqshl_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vqshl_s16 (const simde_int16x4_t a, const simde_int16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshl_s16(a, b); + #else + simde_int16x4_private + r_, + a_ = simde_int16x4_to_private(a), + b_ = simde_int16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshlh_s16(a_.values[i], b_.values[i]); + } + + return simde_int16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshl_s16 + #define vqshl_s16(a, b) simde_vqshl_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vqshl_s32 (const simde_int32x2_t a, const simde_int32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshl_s32(a, b); + #else + simde_int32x2_private + r_, + a_ = simde_int32x2_to_private(a), + b_ = simde_int32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshls_s32(a_.values[i], b_.values[i]); + } + + return simde_int32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshl_s32 + #define vqshl_s32(a, b) simde_vqshl_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_vqshl_s64 (const simde_int64x1_t a, const simde_int64x1_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshl_s64(a, b); + #else + simde_int64x1_private + r_, + a_ = simde_int64x1_to_private(a), + b_ = simde_int64x1_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshld_s64(a_.values[i], b_.values[i]); + } + + return simde_int64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshl_s64 + #define vqshl_s64(a, b) simde_vqshl_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vqshl_u8 (const simde_uint8x8_t a, const simde_int8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshl_u8(a, b); + #else + simde_uint8x8_private + r_, + a_ = simde_uint8x8_to_private(a); + simde_int8x8_private + b_ = simde_int8x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshlb_u8(a_.values[i], b_.values[i]); + } + + return simde_uint8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshl_u8 + #define vqshl_u8(a, b) simde_vqshl_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vqshl_u16 (const simde_uint16x4_t a, const simde_int16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshl_u16(a, b); + #else + simde_uint16x4_private + r_, + a_ = simde_uint16x4_to_private(a); + simde_int16x4_private + b_ = simde_int16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshlh_u16(a_.values[i], b_.values[i]); + } + + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshl_u16 + #define vqshl_u16(a, b) simde_vqshl_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vqshl_u32 (const simde_uint32x2_t a, const simde_int32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshl_u32(a, b); + #else + simde_uint32x2_private + r_, + a_ = simde_uint32x2_to_private(a); + simde_int32x2_private + b_ = simde_int32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshls_u32(a_.values[i], b_.values[i]); + } + + return simde_uint32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshl_u32 + #define vqshl_u32(a, b) simde_vqshl_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1_t +simde_vqshl_u64 (const simde_uint64x1_t a, const simde_int64x1_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshl_u64(a, b); + #else + simde_uint64x1_private + r_, + a_ = simde_uint64x1_to_private(a); + simde_int64x1_private + b_ = simde_int64x1_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshld_u64(a_.values[i], b_.values[i]); + } + + return simde_uint64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshl_u64 + #define vqshl_u64(a, b) simde_vqshl_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vqshlq_s8 (const simde_int8x16_t a, const simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshlq_s8(a, b); + #else + simde_int8x16_private + r_, + a_ = simde_int8x16_to_private(a), + b_ = simde_int8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshlb_s8(a_.values[i], b_.values[i]); + } + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlq_s8 + #define vqshlq_s8(a, b) simde_vqshlq_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vqshlq_s16 (const simde_int16x8_t a, const simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshlq_s16(a, b); + #else + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a), + b_ = simde_int16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshlh_s16(a_.values[i], b_.values[i]); + } + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlq_s16 + #define vqshlq_s16(a, b) simde_vqshlq_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vqshlq_s32 (const simde_int32x4_t a, const simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshlq_s32(a, b); + #else + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a), + b_ = simde_int32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshls_s32(a_.values[i], b_.values[i]); + } + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlq_s32 + #define vqshlq_s32(a, b) simde_vqshlq_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vqshlq_s64 (const simde_int64x2_t a, const simde_int64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshlq_s64(a, b); + #else + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a), + b_ = simde_int64x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshld_s64(a_.values[i], b_.values[i]); + } + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlq_s64 + #define vqshlq_s64(a, b) simde_vqshlq_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vqshlq_u8 (const simde_uint8x16_t a, const simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshlq_u8(a, b); + #else + simde_uint8x16_private + r_, + a_ = simde_uint8x16_to_private(a); + simde_int8x16_private + b_ = simde_int8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshlb_u8(a_.values[i], b_.values[i]); + } + + return simde_uint8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlq_u8 + #define vqshlq_u8(a, b) simde_vqshlq_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vqshlq_u16 (const simde_uint16x8_t a, const simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshlq_u16(a, b); + #else + simde_uint16x8_private + r_, + a_ = simde_uint16x8_to_private(a); + simde_int16x8_private + b_ = simde_int16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshlh_u16(a_.values[i], b_.values[i]); + } + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlq_u16 + #define vqshlq_u16(a, b) simde_vqshlq_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vqshlq_u32 (const simde_uint32x4_t a, const simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshlq_u32(a, b); + #else + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a); + simde_int32x4_private + b_ = simde_int32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshls_u32(a_.values[i], b_.values[i]); + } + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlq_u32 + #define vqshlq_u32(a, b) simde_vqshlq_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vqshlq_u64 (const simde_uint64x2_t a, const simde_int64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqshlq_u64(a, b); + #else + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a); + simde_int64x2_private + b_ = simde_int64x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqshld_u64(a_.values[i], b_.values[i]); + } + + return simde_uint64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlq_u64 + #define vqshlq_u64(a, b) simde_vqshlq_u64((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QSHL_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/qsub.h b/lib/mmseqs/lib/simde/simde/arm/neon/qsub.h new file mode 100644 index 0000000..bd7a6bc --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/qsub.h @@ -0,0 +1,549 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_QSUB_H) +#define SIMDE_ARM_NEON_QSUB_H + +#include "types.h" + +#include "sub.h" +#include "bsl.h" +#include "cgt.h" +#include "dup_n.h" +#include "sub.h" + +#include + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vqsubb_s8(int8_t a, int8_t b) { + return simde_math_subs_i8(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqsubb_s8 + #define vqsubb_s8(a, b) simde_vqsubb_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vqsubh_s16(int16_t a, int16_t b) { + return simde_math_subs_i16(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqsubh_s16 + #define vqsubh_s16(a, b) simde_vqsubh_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vqsubs_s32(int32_t a, int32_t b) { + return simde_math_subs_i32(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqsubs_s32 + #define vqsubs_s32(a, b) simde_vqsubs_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vqsubd_s64(int64_t a, int64_t b) { + return simde_math_subs_i64(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqsubd_s64 + #define vqsubd_s64(a, b) simde_vqsubd_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_vqsubb_u8(uint8_t a, uint8_t b) { + return simde_math_subs_u8(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqsubb_u8 + #define vqsubb_u8(a, b) simde_vqsubb_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vqsubh_u16(uint16_t a, uint16_t b) { + return simde_math_subs_u16(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqsubh_u16 + #define vqsubh_u16(a, b) simde_vqsubh_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vqsubs_u32(uint32_t a, uint32_t b) { + return simde_math_subs_u32(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqsubs_u32 + #define vqsubs_u32(a, b) simde_vqsubs_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vqsubd_u64(uint64_t a, uint64_t b) { + return simde_math_subs_u64(a, b); +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqsubd_u64 + #define vqsubd_u64(a, b) simde_vqsubd_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vqsub_s8(simde_int8x8_t a, simde_int8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsub_s8(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_subs_pi8(a, b); + #else + simde_int8x8_private + r_, + a_ = simde_int8x8_to_private(a), + b_ = simde_int8x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubb_s8(a_.values[i], b_.values[i]); + } + + return simde_int8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsub_s8 + #define vqsub_s8(a, b) simde_vqsub_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vqsub_s16(simde_int16x4_t a, simde_int16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsub_s16(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_subs_pi16(a, b); + #else + simde_int16x4_private + r_, + a_ = simde_int16x4_to_private(a), + b_ = simde_int16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubh_s16(a_.values[i], b_.values[i]); + } + + return simde_int16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsub_s16 + #define vqsub_s16(a, b) simde_vqsub_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vqsub_s32(simde_int32x2_t a, simde_int32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsub_s32(a, b); + #else + simde_int32x2_private + r_, + a_ = simde_int32x2_to_private(a), + b_ = simde_int32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubs_s32(a_.values[i], b_.values[i]); + } + + return simde_int32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsub_s32 + #define vqsub_s32(a, b) simde_vqsub_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_vqsub_s64(simde_int64x1_t a, simde_int64x1_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsub_s64(a, b); + #else + simde_int64x1_private + r_, + a_ = simde_int64x1_to_private(a), + b_ = simde_int64x1_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubd_s64(a_.values[i], b_.values[i]); + } + + return simde_int64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsub_s64 + #define vqsub_s64(a, b) simde_vqsub_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vqsub_u8(simde_uint8x8_t a, simde_uint8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsub_u8(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_subs_pu8(a, b); + #else + simde_uint8x8_private + r_, + a_ = simde_uint8x8_to_private(a), + b_ = simde_uint8x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubb_u8(a_.values[i], b_.values[i]); + } + + return simde_uint8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsub_u8 + #define vqsub_u8(a, b) simde_vqsub_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vqsub_u16(simde_uint16x4_t a, simde_uint16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsub_u16(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_subs_pu16(a, b); + #else + simde_uint16x4_private + r_, + a_ = simde_uint16x4_to_private(a), + b_ = simde_uint16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubh_u16(a_.values[i], b_.values[i]); + } + + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsub_u16 + #define vqsub_u16(a, b) simde_vqsub_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vqsub_u32(simde_uint32x2_t a, simde_uint32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsub_u32(a, b); + #else + simde_uint32x2_private + r_, + a_ = simde_uint32x2_to_private(a), + b_ = simde_uint32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubs_u32(a_.values[i], b_.values[i]); + } + + return simde_uint32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsub_u32 + #define vqsub_u32(a, b) simde_vqsub_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1_t +simde_vqsub_u64(simde_uint64x1_t a, simde_uint64x1_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsub_u64(a, b); + #else + simde_uint64x1_private + r_, + a_ = simde_uint64x1_to_private(a), + b_ = simde_uint64x1_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubd_u64(a_.values[i], b_.values[i]); + } + + return simde_uint64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsub_u64 + #define vqsub_u64(a, b) simde_vqsub_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vqsubq_s8(simde_int8x16_t a, simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsubq_s8(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i8x16_sub_saturate(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_subs_epi8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_subs(a, b); + #else + simde_int8x16_private + r_, + a_ = simde_int8x16_to_private(a), + b_ = simde_int8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubb_s8(a_.values[i], b_.values[i]); + } + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsubq_s8 + #define vqsubq_s8(a, b) simde_vqsubq_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vqsubq_s16(simde_int16x8_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsubq_s16(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i16x8_sub_saturate(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_subs_epi16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_subs(a, b); + #else + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a), + b_ = simde_int16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubh_s16(a_.values[i], b_.values[i]); + } + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsubq_s16 + #define vqsubq_s16(a, b) simde_vqsubq_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vqsubq_s32(simde_int32x4_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsubq_s32(a, b); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cvtsepi64_epi32(_mm256_sub_epi64(_mm256_cvtepi32_epi64(a), _mm256_cvtepi32_epi64(b))); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_subs(a, b); + #else + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a), + b_ = simde_int32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubs_s32(a_.values[i], b_.values[i]); + } + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsubq_s32 + #define vqsubq_s32(a, b) simde_vqsubq_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vqsubq_s64(simde_int64x2_t a, simde_int64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsubq_s64(a, b); + #else + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a), + b_ = simde_int64x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubd_s64(a_.values[i], b_.values[i]); + } + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsubq_s64 + #define vqsubq_s64(a, b) simde_vqsubq_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vqsubq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsubq_u8(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u8x16_sub_saturate(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_subs_epu8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_subs(a, b); + #else + simde_uint8x16_private + r_, + a_ = simde_uint8x16_to_private(a), + b_ = simde_uint8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubb_u8(a_.values[i], b_.values[i]); + } + + return simde_uint8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsubq_u8 + #define vqsubq_u8(a, b) simde_vqsubq_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vqsubq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsubq_u16(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u16x8_sub_saturate(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_subs_epu16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_subs(a, b); + #else + simde_uint16x8_private + r_, + a_ = simde_uint16x8_to_private(a), + b_ = simde_uint16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubh_u16(a_.values[i], b_.values[i]); + } + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsubq_u16 + #define vqsubq_u16(a, b) simde_vqsubq_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vqsubq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsubq_u32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_subs(a, b); + #else + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a), + b_ = simde_uint32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubs_u32(a_.values[i], b_.values[i]); + } + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsubq_u32 + #define vqsubq_u32(a, b) simde_vqsubq_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vqsubq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqsubq_u64(a, b); + #else + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a), + b_ = simde_uint64x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubd_u64(a_.values[i], b_.values[i]); + } + + return simde_uint64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqsubq_u64 + #define vqsubq_u64(a, b) simde_vqsubq_u64((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QSUB_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/rbit.h b/lib/mmseqs/lib/simde/simde/arm/neon/rbit.h index e62f762..f98bc1f 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/rbit.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/rbit.h @@ -25,6 +25,10 @@ * 2020 Christopher Moore */ +/* The GFNI implementation is based on Wojciech Muła's work at + * http://0x80.pl/articles/avx512-galois-field-for-bit-shuffling.html#bit-shuffling via + * https://github.com/InstLatx64/InstLatX64_Demo/blob/49c27effdfd5a45f27e0ccb6e2f3be5f27c3845d/GFNI_Demo.h#L173 */ + #if !defined(SIMDE_ARM_NEON_RBIT_H) #define SIMDE_ARM_NEON_RBIT_H @@ -40,6 +44,10 @@ simde_uint8x8_t simde_vrbit_u8(simde_uint8x8_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vrbit_u8(a); + #elif defined(SIMDE_X86_MMX_NATIVE) && defined(SIMDE_X86_GFNI_NATIVE) + __m128i tmp = _mm_movpi64_epi64(a); + tmp = _mm_gf2p8affine_epi64_epi8(tmp, _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, UINT64_C(0x8040201008040201))), 0); + return _mm_movepi64_pi64(tmp); #elif defined(SIMDE_X86_MMX_NATIVE) __m64 mask; mask = _mm_set1_pi8(0x55); @@ -56,7 +64,11 @@ simde_vrbit_u8(simde_uint8x8_t a) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (((a_.values[i] * UINT64_C(0x80200802)) & UINT64_C(0x0884422110)) * UINT64_C(0x0101010101)) >> 32); + #if HEDLEY_HAS_BUILTIN(__builtin_bitreverse8) && !defined(HEDLEY_IBM_VERSION) + r_.values[i] = __builtin_bitreverse8(a_.values[i]); + #else + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (((a_.values[i] * UINT64_C(0x80200802)) & UINT64_C(0x0884422110)) * UINT64_C(0x0101010101)) >> 32); + #endif } return simde_uint8x8_from_private(r_); @@ -86,6 +98,8 @@ simde_uint8x16_t simde_vrbitq_u8(simde_uint8x16_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vrbitq_u8(a); + #elif defined(SIMDE_X86_GFNI_NATIVE) + return _mm_gf2p8affine_epi64_epi8(a, _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, UINT64_C(0x8040201008040201))), 0); #elif defined(SIMDE_X86_SSE2_NATIVE) __m128i mask; mask = _mm_set1_epi8(0x55); @@ -116,7 +130,11 @@ simde_vrbitq_u8(simde_uint8x16_t a) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (((a_.values[i] * UINT64_C(0x80200802)) & UINT64_C(0x0884422110)) * UINT64_C(0x0101010101)) >> 32); + #if HEDLEY_HAS_BUILTIN(__builtin_bitreverse8) && !defined(HEDLEY_IBM_VERSION) + r_.values[i] = __builtin_bitreverse8(a_.values[i]); + #else + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (((a_.values[i] * UINT64_C(0x80200802)) & UINT64_C(0x0884422110)) * UINT64_C(0x0101010101)) >> 32); + #endif } return simde_uint8x16_from_private(r_); diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/rnd.h b/lib/mmseqs/lib/simde/simde/arm/neon/rnd.h new file mode 100644 index 0000000..4387942 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/rnd.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_RND_H) +#define SIMDE_ARM_NEON_RND_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vrnd_f32(simde_float32x2_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) + return vrnd_f32(a); + #else + simde_float32x2_private + r_, + a_ = simde_float32x2_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_truncf(a_.values[i]); + } + + return simde_float32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrnd_f32 + #define vrnd_f32(a) simde_vrnd_f32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vrnd_f64(simde_float64x1_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrnd_f64(a); + #else + simde_float64x1_private + r_, + a_ = simde_float64x1_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_trunc(a_.values[i]); + } + + return simde_float64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrnd_f64 + #define vrnd_f64(a) simde_vrnd_f64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vrndq_f32(simde_float32x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) + return vrndq_f32(a); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + return _mm_round_ps(a, _MM_FROUND_TO_ZERO); + #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) + return _mm_trunc_ps(a); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_trunc(a); + #else + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_truncf(a_.values[i]); + } + + return simde_float32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrndq_f32 + #define vrndq_f32(a) simde_vrndq_f32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vrndq_f64(simde_float64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrndq_f64(a); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + return _mm_round_pd(a, _MM_FROUND_TO_ZERO); + #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) + return _mm_trunc_pd(a); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_trunc(a); + #else + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_trunc(a_.values[i]); + } + + return simde_float64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrndq_f64 + #define vrndq_f64(a) simde_vrndq_f64(a) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_RND_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/shl.h b/lib/mmseqs/lib/simde/simde/arm/neon/shl.h index c01c76b..4061ad7 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/shl.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/shl.h @@ -408,7 +408,7 @@ simde_vshlq_s8 (const simde_int8x16_t a, const simde_int8x16_t b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(signed char) a_shl, a_shr; SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) b_abs, b_max; - SIMDE_POWER_ALTIVEC_VECTOR(bool char) b_mask; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL char) b_mask; b_abs = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_abs(b)); b_max = vec_splat_u8(7); #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) @@ -463,7 +463,7 @@ simde_vshlq_s16 (const simde_int16x8_t a, const simde_int16x8_t b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(signed short) a_shl, a_shr; SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) b_abs, b_max; - SIMDE_POWER_ALTIVEC_VECTOR(bool short) b_mask; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL short) b_mask; b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))), vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0xFF))); @@ -512,7 +512,7 @@ simde_vshlq_s32 (const simde_int32x4_t a, const simde_int32x4_t b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(signed int) a_shl, a_shr; SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) b_abs, b_max; - SIMDE_POWER_ALTIVEC_VECTOR(bool int) b_mask; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL int) b_mask; b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))), vec_splats(HEDLEY_STATIC_CAST(unsigned int, 0xFF))); @@ -570,7 +570,7 @@ simde_vshlq_s64 (const simde_int64x2_t a, const simde_int64x2_t b) { #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) a_shl, a_shr; SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) b_abs, b_max; - SIMDE_POWER_ALTIVEC_VECTOR(bool long long) b_mask; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL long long) b_mask; b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))), vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 0xFF))); @@ -622,7 +622,7 @@ simde_vshlq_u8 (const simde_uint8x16_t a, const simde_int8x16_t b) { return _mm256_cvtepi16_epi8(r256); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) b_abs; - SIMDE_POWER_ALTIVEC_VECTOR(bool char) b_mask; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL char) b_mask; b_abs = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_abs(b)); b_mask = vec_cmplt(b, vec_splat_s8(0)); return vec_and(vec_sel(vec_sl(a, b_abs), vec_sr(a, b_abs), b_mask), @@ -670,7 +670,7 @@ simde_vshlq_u16 (const simde_uint16x8_t a, const simde_int16x8_t b) { return _mm_set_epi64x(_mm256_extract_epi64(r256, 2), _mm256_extract_epi64(r256, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) b_abs; - SIMDE_POWER_ALTIVEC_VECTOR(bool short) b_mask; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL short) b_mask; b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))), vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0xFF))); @@ -717,7 +717,7 @@ simde_vshlq_u32 (const simde_uint32x4_t a, const simde_int32x4_t b) { _mm_cmpgt_epi32(_mm_setzero_si128(), b_)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) b_abs; - SIMDE_POWER_ALTIVEC_VECTOR(bool int) b_mask; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL int) b_mask; b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))), vec_splats(HEDLEY_STATIC_CAST(unsigned int, 0xFF))); @@ -764,7 +764,7 @@ simde_vshlq_u64 (const simde_uint64x2_t a, const simde_int64x2_t b) { _mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b, 56))); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) b_abs; - SIMDE_POWER_ALTIVEC_VECTOR(bool long long) b_mask; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL long long) b_mask; b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))), vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 0xFF))); diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/shl_n.h b/lib/mmseqs/lib/simde/simde/arm/neon/shl_n.h index c4362da..a6fbd5b 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/shl_n.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/shl_n.h @@ -272,7 +272,10 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int8x16_t simde_vshlq_n_s8 (const simde_int8x16_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) { - #if defined(SIMDE_X86_SSE2_NATIVE) + #if defined(SIMDE_X86_GFNI_NATIVE) + /* https://wunkolo.github.io/post/2020/11/gf2p8affineqb-int8-shifting/ */ + return _mm_gf2p8affine_epi64_epi8(a, _mm_set1_epi64x(INT64_C(0x0102040810204080) >> (n * 8)), 0); + #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_andnot_si128(_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << n) - 1)), _mm_slli_epi64(a, n)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i8x16_shl(a, n); @@ -412,7 +415,10 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint8x16_t simde_vshlq_n_u8 (const simde_uint8x16_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) { - #if defined(SIMDE_X86_SSE2_NATIVE) + #if defined(SIMDE_X86_GFNI_NATIVE) + /* https://wunkolo.github.io/post/2020/11/gf2p8affineqb-int8-shifting/ */ + return _mm_gf2p8affine_epi64_epi8(a, _mm_set1_epi64x(INT64_C(0x0102040810204080) >> (n * 8)), 0); + #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_andnot_si128(_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << n) - 1)), _mm_slli_epi64((a), (n))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i8x16_shl((a), (n)); diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/shr_n.h b/lib/mmseqs/lib/simde/simde/arm/neon/shr_n.h index acddf97..95c7d67 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/shr_n.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/shr_n.h @@ -291,7 +291,12 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int8x16_t simde_vshrq_n_s8 (const simde_int8x16_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 8) { - #if defined(SIMDE_X86_SSE4_1_NATIVE) + #if defined(SIMDE_X86_GFNI_NATIVE) + /* https://wunkolo.github.io/post/2020/11/gf2p8affineqb-int8-shifting/ */ + const int shift = (n <= 7) ? n : 7; + const uint64_t matrix = (UINT64_C(0x8182848890A0C000) << (shift * 8)) ^ UINT64_C(0x8080808080808080); + return _mm_gf2p8affine_epi64_epi8(a, _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, matrix)), 0); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_blendv_epi8(_mm_srai_epi16((a), (n)), _mm_srai_epi16(_mm_slli_epi16((a), 8), 8 + (n)), @@ -442,7 +447,10 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint8x16_t simde_vshrq_n_u8 (const simde_uint8x16_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 8) { - #if defined(SIMDE_X86_SSE2_NATIVE) + #if defined(SIMDE_X86_GFNI_NATIVE) + /* https://wunkolo.github.io/post/2020/11/gf2p8affineqb-int8-shifting/ */ + return (n > 7) ? _mm_setzero_si128() : _mm_gf2p8affine_epi64_epi8(a, _mm_set1_epi64x(INT64_C(0x0102040810204080) << (n * 8)), 0); + #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_and_si128(_mm_srli_epi64((a), (n)), _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << (8 - (n))) - 1))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return (((n) == 8) ? wasm_i8x16_splat(0) : wasm_u8x16_shr((a), (n))); diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/subw.h b/lib/mmseqs/lib/simde/simde/arm/neon/subw.h new file mode 100644 index 0000000..51d6cf4 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/subw.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_SUBW_H) +#define SIMDE_ARM_NEON_SUBW_H + +#include "types.h" +#include "sub.h" +#include "movl.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vsubw_s8(simde_int16x8_t a, simde_int8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubw_s8(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_s16(a, simde_vmovl_s8(b)); + #else + simde_int16x8_private r_; + simde_int16x8_private a_ = simde_int16x8_to_private(a); + simde_int8x8_private b_ = simde_int8x8_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i]; + } + #endif + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubw_s8 + #define vsubw_s8(a, b) simde_vsubw_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vsubw_s16(simde_int32x4_t a, simde_int16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubw_s16(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_s32(a, simde_vmovl_s16(b)); + #else + simde_int32x4_private r_; + simde_int32x4_private a_ = simde_int32x4_to_private(a); + simde_int16x4_private b_ = simde_int16x4_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i]; + } + #endif + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubw_s16 + #define vsubw_s16(a, b) simde_vsubw_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vsubw_s32(simde_int64x2_t a, simde_int32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubw_s32(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_s64(a, simde_vmovl_s32(b)); + #else + simde_int64x2_private r_; + simde_int64x2_private a_ = simde_int64x2_to_private(a); + simde_int32x2_private b_ = simde_int32x2_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i]; + } + #endif + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubw_s32 + #define vsubw_s32(a, b) simde_vsubw_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vsubw_u8(simde_uint16x8_t a, simde_uint8x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubw_u8(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_u16(a, simde_vmovl_u8(b)); + #else + simde_uint16x8_private r_; + simde_uint16x8_private a_ = simde_uint16x8_to_private(a); + simde_uint8x8_private b_ = simde_uint8x8_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i]; + } + #endif + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubw_u8 + #define vsubw_u8(a, b) simde_vsubw_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vsubw_u16(simde_uint32x4_t a, simde_uint16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubw_u16(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_u32(a, simde_vmovl_u16(b)); + #else + simde_uint32x4_private r_; + simde_uint32x4_private a_ = simde_uint32x4_to_private(a); + simde_uint16x4_private b_ = simde_uint16x4_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i]; + } + #endif + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubw_u16 + #define vsubw_u16(a, b) simde_vsubw_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vsubw_u32(simde_uint64x2_t a, simde_uint32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubw_u32(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_u64(a, simde_vmovl_u32(b)); + #else + simde_uint64x2_private r_; + simde_uint64x2_private a_ = simde_uint64x2_to_private(a); + simde_uint32x2_private b_ = simde_uint32x2_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i]; + } + #endif + + return simde_uint64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubw_u32 + #define vsubw_u32(a, b) simde_vsubw_u32((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_SUBW_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/subw_high.h b/lib/mmseqs/lib/simde/simde/arm/neon/subw_high.h new file mode 100644 index 0000000..288dbef --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/subw_high.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_SUBW_HIGH_H) +#define SIMDE_ARM_NEON_SUBW_HIGH_H + +#include "types.h" +#include "movl.h" +#include "sub.h" +#include "get_high.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vsubw_high_s8(simde_int16x8_t a, simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubw_high_s8(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_s16(a, simde_vmovl_s8(simde_vget_high_s8(b))); + #else + simde_int16x8_private r_; + simde_int16x8_private a_ = simde_int16x8_to_private(a); + simde_int8x16_private b_ = simde_int8x16_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + #endif + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubw_high_s8 + #define vsubw_high_s8(a, b) simde_vsubw_high_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vsubw_high_s16(simde_int32x4_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubw_high_s16(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_s32(a, simde_vmovl_s16(simde_vget_high_s16(b))); + #else + simde_int32x4_private r_; + simde_int32x4_private a_ = simde_int32x4_to_private(a); + simde_int16x8_private b_ = simde_int16x8_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + #endif + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubw_high_s16 + #define vsubw_high_s16(a, b) simde_vsubw_high_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vsubw_high_s32(simde_int64x2_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubw_high_s32(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_s64(a, simde_vmovl_s32(simde_vget_high_s32(b))); + #else + simde_int64x2_private r_; + simde_int64x2_private a_ = simde_int64x2_to_private(a); + simde_int32x4_private b_ = simde_int32x4_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + #endif + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubw_high_s32 + #define vsubw_high_s32(a, b) simde_vsubw_high_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vsubw_high_u8(simde_uint16x8_t a, simde_uint8x16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubw_high_u8(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_u16(a, simde_vmovl_u8(simde_vget_high_u8(b))); + #else + simde_uint16x8_private r_; + simde_uint16x8_private a_ = simde_uint16x8_to_private(a); + simde_uint8x16_private b_ = simde_uint8x16_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + #endif + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubw_high_u8 + #define vsubw_high_u8(a, b) simde_vsubw_high_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vsubw_high_u16(simde_uint32x4_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubw_high_u16(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_u32(a, simde_vmovl_u16(simde_vget_high_u16(b))); + #else + simde_uint32x4_private r_; + simde_uint32x4_private a_ = simde_uint32x4_to_private(a); + simde_uint16x8_private b_ = simde_uint16x8_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + #endif + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubw_high_u16 + #define vsubw_high_u16(a, b) simde_vsubw_high_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vsubw_high_u32(simde_uint64x2_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubw_high_u32(a, b); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return simde_vsubq_u64(a, simde_vmovl_u32(simde_vget_high_u32(b))); + #else + simde_uint64x2_private r_; + simde_uint64x2_private a_ = simde_uint64x2_to_private(a); + simde_uint32x4_private b_ = simde_uint32x4_to_private(b); + + #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, b_.values); + r_.values -= a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)]; + } + #endif + + return simde_uint64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubw_high_u32 + #define vsubw_high_u32(a, b) simde_vsubw_high_u32((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_SUBW_HIGH_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/types.h b/lib/mmseqs/lib/simde/simde/arm/neon/types.h index c6a1d0a..0ce4bf1 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/types.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/types.h @@ -36,43 +36,43 @@ SIMDE_BEGIN_DECLS_ #if defined(SIMDE_VECTOR_SUBSCRIPT) #define SIMDE_ARM_NEON_TYPE_INT_DEFINE_(Element_Type_Name, Element_Count, Alignment) \ typedef struct simde_##Element_Type_Name##x##Element_Count##_private { \ - SIMDE_ALIGN(Alignment) Element_Type_Name##_t values SIMDE_VECTOR(sizeof(Element_Type_Name##_t) * Element_Count); \ + SIMDE_ALIGN_TO(Alignment) Element_Type_Name##_t values SIMDE_VECTOR(sizeof(Element_Type_Name##_t) * Element_Count); \ } simde_##Element_Type_Name##x##Element_Count##_private; #define SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(Element_Size, Element_Count, Alignment) \ typedef struct simde_float##Element_Size##x##Element_Count##_private { \ - SIMDE_ALIGN(Alignment) simde_float##Element_Size values SIMDE_VECTOR(sizeof(simde_float##Element_Size) * Element_Count); \ + SIMDE_ALIGN_TO(Alignment) simde_float##Element_Size values SIMDE_VECTOR(sizeof(simde_float##Element_Size) * Element_Count); \ } simde_float##Element_Size##x##Element_Count##_private; #else #define SIMDE_ARM_NEON_TYPE_INT_DEFINE_(Element_Type_Name, Element_Count, Alignment) \ typedef struct simde_##Element_Type_Name##x##Element_Count##_private { \ - SIMDE_ALIGN(Alignment) Element_Type_Name##_t values[Element_Count]; \ + SIMDE_ALIGN_TO(Alignment) Element_Type_Name##_t values[Element_Count]; \ } simde_##Element_Type_Name##x##Element_Count##_private; #define SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(Element_Size, Element_Count, Alignment) \ typedef struct simde_float##Element_Size##x##Element_Count##_private { \ - SIMDE_ALIGN(Alignment) simde_float##Element_Size values[Element_Count]; \ + SIMDE_ALIGN_TO(Alignment) simde_float##Element_Size values[Element_Count]; \ } simde_float##Element_Size##x##Element_Count##_private; #endif -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int8, 8, 8) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int16, 4, 8) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int32, 2, 8) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int64, 1, 8) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint8, 8, 8) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint16, 4, 8) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint32, 2, 8) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint64, 1, 8) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int8, 16, 16) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int16, 8, 16) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int32, 4, 16) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int64, 2, 16) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint8, 16, 16) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint16, 8, 16) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint32, 4, 16) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint64, 2, 16) -SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(32, 2, 8) -SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 1, 8) -SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(32, 4, 16) -SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, 16) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int8, 8, SIMDE_ALIGN_8_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int16, 4, SIMDE_ALIGN_8_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int32, 2, SIMDE_ALIGN_8_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int64, 1, SIMDE_ALIGN_8_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint8, 8, SIMDE_ALIGN_8_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint16, 4, SIMDE_ALIGN_8_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint32, 2, SIMDE_ALIGN_8_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint64, 1, SIMDE_ALIGN_8_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int8, 16, SIMDE_ALIGN_16_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int16, 8, SIMDE_ALIGN_16_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int32, 4, SIMDE_ALIGN_16_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int64, 2, SIMDE_ALIGN_16_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint8, 16, SIMDE_ALIGN_16_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint16, 8, SIMDE_ALIGN_16_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint32, 4, SIMDE_ALIGN_16_) +SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint64, 2, SIMDE_ALIGN_16_) +SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(32, 2, SIMDE_ALIGN_8_) +SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 1, SIMDE_ALIGN_8_) +SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(32, 4, SIMDE_ALIGN_16_) +SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, SIMDE_ALIGN_16_) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) typedef float32_t simde_float32_t; @@ -273,7 +273,7 @@ SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, 16) #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) typedef SIMDE_POWER_ALTIVEC_VECTOR(signed long long) simde_int64x2_t; typedef SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) simde_uint64x2_t; - typedef SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) simde_float64x2_t; + typedef SIMDE_POWER_ALTIVEC_VECTOR(double) simde_float64x2_t; #else #define SIMDE_ARM_NEON_NEED_PORTABLE_I64X2 #define SIMDE_ARM_NEON_NEED_PORTABLE_U64X2 diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/uqadd.h b/lib/mmseqs/lib/simde/simde/arm/neon/uqadd.h new file mode 100644 index 0000000..18b4f0d --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/arm/neon/uqadd.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_UQADD_H) +#define SIMDE_ARM_NEON_UQADD_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +int8_t +simde_vuqaddb_s8(int8_t a, uint8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqaddb_s8(a, b); + #else + int16_t r_ = HEDLEY_STATIC_CAST(int16_t, a) + HEDLEY_STATIC_CAST(int16_t, b); + return (r_ < INT8_MIN) ? INT8_MIN : ((r_ > INT8_MAX) ? INT8_MAX : HEDLEY_STATIC_CAST(int8_t, r_)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqaddb_s8 + #define vuqaddb_s8(a, b) simde_vuqaddb_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_vuqaddh_s16(int16_t a, uint16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqaddh_s16(a, b); + #else + int32_t r_ = HEDLEY_STATIC_CAST(int32_t, a) + HEDLEY_STATIC_CAST(int32_t, b); + return (r_ < INT16_MIN) ? INT16_MIN : ((r_ > INT16_MAX) ? INT16_MAX : HEDLEY_STATIC_CAST(int16_t, r_)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqaddh_s16 + #define vuqaddh_s16(a, b) simde_vuqaddh_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vuqadds_s32(int32_t a, uint32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqadds_s32(a, b); + #else + int64_t r_ = HEDLEY_STATIC_CAST(int64_t, a) + HEDLEY_STATIC_CAST(int64_t, b); + return (r_ < INT32_MIN) ? INT32_MIN : ((r_ > INT32_MAX) ? INT32_MAX : HEDLEY_STATIC_CAST(int32_t, r_)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqadds_s32 + #define vuqadds_s32(a, b) simde_vuqadds_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vuqaddd_s64(int64_t a, uint64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqaddd_s64(a, b); + #else + /* TODO: I suspect there is room for improvement here. This is + * just the first thing that worked, and I don't feel like messing + * with it now. */ + int64_t r; + + if (a < 0) { + uint64_t na = HEDLEY_STATIC_CAST(uint64_t, -a); + if (na > b) { + uint64_t t = na - b; + r = (t > (HEDLEY_STATIC_CAST(uint64_t, INT64_MAX) + 1)) ? INT64_MIN : -HEDLEY_STATIC_CAST(int64_t, t); + } else { + uint64_t t = b - na; + r = (t > (HEDLEY_STATIC_CAST(uint64_t, INT64_MAX) )) ? INT64_MAX : HEDLEY_STATIC_CAST(int64_t, t); + } + } else { + uint64_t ua = HEDLEY_STATIC_CAST(uint64_t, a); + r = ((INT64_MAX - ua) < b) ? INT64_MAX : HEDLEY_STATIC_CAST(int64_t, ua + b); + } + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqaddd_s64 + #define vuqaddd_s64(a, b) simde_vuqaddd_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vuqadd_s8(simde_int8x8_t a, simde_uint8x8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqadd_s8(a, b); + #else + simde_int8x8_private + r_, + a_ = simde_int8x8_to_private(a); + simde_uint8x8_private b_ = simde_uint8x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vuqaddb_s8(a_.values[i], b_.values[i]); + } + + return simde_int8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqadd_s8 + #define vuqadd_s8(a, b) simde_vuqadd_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vuqadd_s16(simde_int16x4_t a, simde_uint16x4_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqadd_s16(a, b); + #else + simde_int16x4_private + r_, + a_ = simde_int16x4_to_private(a); + simde_uint16x4_private b_ = simde_uint16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vuqaddh_s16(a_.values[i], b_.values[i]); + } + + return simde_int16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqadd_s16 + #define vuqadd_s16(a, b) simde_vuqadd_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vuqadd_s32(simde_int32x2_t a, simde_uint32x2_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqadd_s32(a, b); + #else + simde_int32x2_private + r_, + a_ = simde_int32x2_to_private(a); + simde_uint32x2_private b_ = simde_uint32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vuqadds_s32(a_.values[i], b_.values[i]); + } + + return simde_int32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqadd_s32 + #define vuqadd_s32(a, b) simde_vuqadd_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_vuqadd_s64(simde_int64x1_t a, simde_uint64x1_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqadd_s64(a, b); + #else + simde_int64x1_private + r_, + a_ = simde_int64x1_to_private(a); + simde_uint64x1_private b_ = simde_uint64x1_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vuqaddd_s64(a_.values[i], b_.values[i]); + } + + return simde_int64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqadd_s64 + #define vuqadd_s64(a, b) simde_vuqadd_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vuqaddq_s8(simde_int8x16_t a, simde_uint8x16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqaddq_s8(a, b); + #else + simde_int8x16_private + r_, + a_ = simde_int8x16_to_private(a); + simde_uint8x16_private b_ = simde_uint8x16_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vuqaddb_s8(a_.values[i], b_.values[i]); + } + + return simde_int8x16_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqaddq_s8 + #define vuqaddq_s8(a, b) simde_vuqaddq_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vuqaddq_s16(simde_int16x8_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqaddq_s16(a, b); + #else + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a); + simde_uint16x8_private b_ = simde_uint16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vuqaddh_s16(a_.values[i], b_.values[i]); + } + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqaddq_s16 + #define vuqaddq_s16(a, b) simde_vuqaddq_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vuqaddq_s32(simde_int32x4_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqaddq_s32(a, b); + #else + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a); + simde_uint32x4_private b_ = simde_uint32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vuqadds_s32(a_.values[i], b_.values[i]); + } + + return simde_int32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqaddq_s32 + #define vuqaddq_s32(a, b) simde_vuqaddq_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vuqaddq_s64(simde_int64x2_t a, simde_uint64x2_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vuqaddq_s64(a, b); + #else + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a); + simde_uint64x2_private b_ = simde_uint64x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vuqaddd_s64(a_.values[i], b_.values[i]); + } + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vuqaddq_s64 + #define vuqaddq_s64(a, b) simde_vuqaddq_s64((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_UQADD_H) */ diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/uzp1.h b/lib/mmseqs/lib/simde/simde/arm/neon/uzp1.h index 646af9f..5af7d95 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/uzp1.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/uzp1.h @@ -39,6 +39,9 @@ simde_float32x2_t simde_vuzp1_f32(simde_float32x2_t a, simde_float32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1_f32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x2x2_t t = vuzp_f32(a, b); + return t.val[0]; #else simde_float32x2_private r_, @@ -70,6 +73,9 @@ simde_int8x8_t simde_vuzp1_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1_s8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x8x2_t t = vuzp_s8(a, b); + return t.val[0]; #else simde_int8x8_private r_, @@ -101,6 +107,9 @@ simde_int16x4_t simde_vuzp1_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1_s16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x4x2_t t = vuzp_s16(a, b); + return t.val[0]; #else simde_int16x4_private r_, @@ -132,6 +141,9 @@ simde_int32x2_t simde_vuzp1_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1_s32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x2x2_t t = vuzp_s32(a, b); + return t.val[0]; #else simde_int32x2_private r_, @@ -163,6 +175,9 @@ simde_uint8x8_t simde_vuzp1_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1_u8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint8x8x2_t t = vuzp_u8(a, b); + return t.val[0]; #else simde_uint8x8_private r_, @@ -194,6 +209,9 @@ simde_uint16x4_t simde_vuzp1_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1_u16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint16x4x2_t t = vuzp_u16(a, b); + return t.val[0]; #else simde_uint16x4_private r_, @@ -225,6 +243,9 @@ simde_uint32x2_t simde_vuzp1_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1_u32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint32x2x2_t t = vuzp_u32(a, b); + return t.val[0]; #else simde_uint32x2_private r_, @@ -256,6 +277,13 @@ simde_float32x4_t simde_vuzp1q_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_f32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x4x2_t t = vuzpq_f32(a, b); + return t.val[0]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v32x4_shuffle(a, b, 0, 2, 4, 6); + #elif defined(SIMDE_X86_SSE_NATIVE) + return _mm_shuffle_ps(a, b, 0x88); #else simde_float32x4_private r_, @@ -287,6 +315,10 @@ simde_float64x2_t simde_vuzp1q_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_f64(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v64x2_shuffle(a, b, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(a), _mm_castpd_ps(b))); #else simde_float64x2_private r_, @@ -318,6 +350,11 @@ simde_int8x16_t simde_vuzp1q_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_s8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x16x2_t t = vuzpq_s8(a, b); + return t.val[0]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v8x16_shuffle(a, b, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); #else simde_int8x16_private r_, @@ -349,6 +386,11 @@ simde_int16x8_t simde_vuzp1q_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_s16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8x2_t t = vuzpq_s16(a, b); + return t.val[0]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v16x8_shuffle(a, b, 0, 2, 4, 6, 8, 10, 12, 14); #else simde_int16x8_private r_, @@ -380,6 +422,13 @@ simde_int32x4_t simde_vuzp1q_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_s32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4x2_t t = vuzpq_s32(a, b); + return t.val[0]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v32x4_shuffle(a, b, 0, 2, 4, 6); + #elif defined(SIMDE_X86_SSE_NATIVE) + return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), 0x88)); #else simde_int32x4_private r_, @@ -411,6 +460,10 @@ simde_int64x2_t simde_vuzp1q_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_s64(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v64x2_shuffle(a, b, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); #else simde_int64x2_private r_, @@ -443,6 +496,11 @@ simde_uint8x16_t simde_vuzp1q_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_u8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint8x16x2_t t = vuzpq_u8(a, b); + return t.val[0]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v8x16_shuffle(a, b, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); #else simde_uint8x16_private r_, @@ -474,6 +532,11 @@ simde_uint16x8_t simde_vuzp1q_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_u16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint16x8x2_t t = vuzpq_u16(a, b); + return t.val[0]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v16x8_shuffle(a, b, 0, 2, 4, 6, 8, 10, 12, 14); #else simde_uint16x8_private r_, @@ -505,6 +568,13 @@ simde_uint32x4_t simde_vuzp1q_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_u32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint32x4x2_t t = vuzpq_u32(a, b); + return t.val[0]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v32x4_shuffle(a, b, 0, 2, 4, 6); + #elif defined(SIMDE_X86_SSE_NATIVE) + return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), 0x88)); #else simde_uint32x4_private r_, @@ -536,6 +606,11 @@ simde_uint64x2_t simde_vuzp1q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_u64(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v64x2_shuffle(a, b, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* _mm_movelh_ps?!?! SSE is weird. */ + return _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); #else simde_uint64x2_private r_, diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/uzp2.h b/lib/mmseqs/lib/simde/simde/arm/neon/uzp2.h index a6b3138..5ba99d9 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/uzp2.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/uzp2.h @@ -39,6 +39,9 @@ simde_float32x2_t simde_vuzp2_f32(simde_float32x2_t a, simde_float32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2_f32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x2x2_t t = vuzp_f32(a, b); + return t.val[1]; #else simde_float32x2_private r_, @@ -70,6 +73,9 @@ simde_int8x8_t simde_vuzp2_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2_s8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x8x2_t t = vuzp_s8(a, b); + return t.val[1]; #else simde_int8x8_private r_, @@ -101,6 +107,9 @@ simde_int16x4_t simde_vuzp2_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2_s16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x4x2_t t = vuzp_s16(a, b); + return t.val[1]; #else simde_int16x4_private r_, @@ -132,6 +141,9 @@ simde_int32x2_t simde_vuzp2_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2_s32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x2x2_t t = vuzp_s32(a, b); + return t.val[1]; #else simde_int32x2_private r_, @@ -163,6 +175,9 @@ simde_uint8x8_t simde_vuzp2_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2_u8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint8x8x2_t t = vuzp_u8(a, b); + return t.val[1]; #else simde_uint8x8_private r_, @@ -194,6 +209,9 @@ simde_uint16x4_t simde_vuzp2_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2_u16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint16x4x2_t t = vuzp_u16(a, b); + return t.val[1]; #else simde_uint16x4_private r_, @@ -225,6 +243,9 @@ simde_uint32x2_t simde_vuzp2_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2_u32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint32x2x2_t t = vuzp_u32(a, b); + return t.val[1]; #else simde_uint32x2_private r_, @@ -256,6 +277,13 @@ simde_float32x4_t simde_vuzp2q_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_f32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x4x2_t t = vuzpq_f32(a, b); + return t.val[1]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v32x4_shuffle(a, b, 1, 3, 5, 7); + #elif defined(SIMDE_X86_SSE_NATIVE) + return _mm_shuffle_ps(a, b, 0xdd); #else simde_float32x4_private r_, @@ -287,6 +315,12 @@ simde_float64x2_t simde_vuzp2q_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_f64(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v64x2_shuffle(a, b, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_pd(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_mergel(a, b); #else simde_float64x2_private r_, @@ -318,6 +352,11 @@ simde_int8x16_t simde_vuzp2q_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_s8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x16x2_t t = vuzpq_s8(a, b); + return t.val[1]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v8x16_shuffle(a, b, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); #else simde_int8x16_private r_, @@ -349,6 +388,11 @@ simde_int16x8_t simde_vuzp2q_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_s16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8x2_t t = vuzpq_s16(a, b); + return t.val[1]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v16x8_shuffle(a, b, 1, 3, 5, 7, 9, 11, 13, 15); #else simde_int16x8_private r_, @@ -380,6 +424,13 @@ simde_int32x4_t simde_vuzp2q_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_s32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4x2_t t = vuzpq_s32(a, b); + return t.val[1]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v32x4_shuffle(a, b, 1, 3, 5, 7); + #elif defined(SIMDE_X86_SSE_NATIVE) + return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), 0xdd)); #else simde_int32x4_private r_, @@ -411,6 +462,12 @@ simde_int64x2_t simde_vuzp2q_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_s64(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v64x2_shuffle(a, b, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_epi64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_mergel(a, b); #else simde_int64x2_private r_, @@ -442,6 +499,11 @@ simde_uint8x16_t simde_vuzp2q_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_u8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint8x16x2_t t = vuzpq_u8(a, b); + return t.val[1]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v8x16_shuffle(a, b, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); #else simde_uint8x16_private r_, @@ -473,6 +535,11 @@ simde_uint16x8_t simde_vuzp2q_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_u16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint16x8x2_t t = vuzpq_u16(a, b); + return t.val[1]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v16x8_shuffle(a, b, 1, 3, 5, 7, 9, 11, 13, 15); #else simde_uint16x8_private r_, @@ -504,6 +571,13 @@ simde_uint32x4_t simde_vuzp2q_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_u32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint32x4x2_t t = vuzpq_u32(a, b); + return t.val[1]; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v32x4_shuffle(a, b, 1, 3, 5, 7); + #elif defined(SIMDE_X86_SSE_NATIVE) + return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), 0xdd)); #else simde_uint32x4_private r_, @@ -535,6 +609,12 @@ simde_uint64x2_t simde_vuzp2q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_u64(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v64x2_shuffle(a, b, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_epi64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_mergel(a, b); #else simde_uint64x2_private r_, diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/zip1.h b/lib/mmseqs/lib/simde/simde/arm/neon/zip1.h index 6c3d9f2..984bd9a 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/zip1.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/zip1.h @@ -39,18 +39,24 @@ simde_float32x2_t simde_vzip1_f32(simde_float32x2_t a, simde_float32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_f32(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpacklo_pi32(a, b); #else simde_float32x2_private r_, a_ = simde_float32x2_to_private(a), b_ = simde_float32x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_float32x2_from_private(r_); #endif @@ -65,18 +71,24 @@ simde_int8x8_t simde_vzip1_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_s8(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpacklo_pi8(a, b); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_int8x8_from_private(r_); #endif @@ -91,18 +103,24 @@ simde_int16x4_t simde_vzip1_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_s16(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpacklo_pi16(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 0, 4, 1, 5); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_int16x4_from_private(r_); #endif @@ -117,18 +135,24 @@ simde_int32x2_t simde_vzip1_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_s32(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpacklo_pi32(a, b); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_int32x2_from_private(r_); #endif @@ -143,18 +167,24 @@ simde_uint8x8_t simde_vzip1_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_u8(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpacklo_pi8(a, b); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -169,18 +199,24 @@ simde_uint16x4_t simde_vzip1_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_u16(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpacklo_pi16(a, b); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 0, 4, 1, 5); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_uint16x4_from_private(r_); #endif @@ -195,18 +231,24 @@ simde_uint32x2_t simde_vzip1_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_u32(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpacklo_pi32(a, b); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_uint32x2_from_private(r_); #endif @@ -222,20 +264,27 @@ simde_vzip1q_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_f32(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23); + return wasm_v32x4_shuffle(a, b, 0, 4, 1, 5); + #elif defined(SIMDE_X86_SSE_NATIVE) + return _mm_unpacklo_ps(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergeh(a, b); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 4, 1, 5); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_float32x4_from_private(r_); #endif @@ -251,20 +300,27 @@ simde_vzip1q_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_f64(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23); + return wasm_v64x2_shuffle(a, b, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_pd(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_mergeh(a, b); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_float64x2_from_private(r_); #endif @@ -280,20 +336,27 @@ simde_vzip1q_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_s8(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return wasm_v8x16_shuffle(a, b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergeh(a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_int8x16_from_private(r_); #endif @@ -309,20 +372,27 @@ simde_vzip1q_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_s16(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23); + return wasm_v16x8_shuffle(a, b, 0, 8, 1, 9, 2, 10, 3, 11); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergeh(a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_int16x8_from_private(r_); #endif @@ -338,20 +408,27 @@ simde_vzip1q_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_s32(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23); + return wasm_v32x4_shuffle(a, b, 0, 4, 1, 5); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergeh(a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 4, 1, 5); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_int32x4_from_private(r_); #endif @@ -367,20 +444,27 @@ simde_vzip1q_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_s64(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23); + return wasm_v64x2_shuffle(a, b, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_mergeh(a, b); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_int64x2_from_private(r_); #endif @@ -397,20 +481,27 @@ simde_vzip1q_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_u8(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return wasm_v8x16_shuffle(a, b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergeh(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -426,20 +517,27 @@ simde_vzip1q_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_u16(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23); + return wasm_v16x8_shuffle(a, b, 0, 8, 1, 9, 2, 10, 3, 11); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergeh(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -455,20 +553,27 @@ simde_vzip1q_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_u32(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23); + return wasm_v32x4_shuffle(a, b, 0, 4, 1, 5); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergeh(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 4, 1, 5); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_uint32x4_from_private(r_); #endif @@ -484,20 +589,27 @@ simde_vzip1q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_u64(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23); + return wasm_v64x2_shuffle(a, b, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_mergeh(a, b); #else simde_uint64x2_private r_, a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[2 * i ] = a_.values[i]; - r_.values[2 * i + 1] = b_.values[i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[2 * i ] = a_.values[i]; + r_.values[2 * i + 1] = b_.values[i]; + } + #endif return simde_uint64x2_from_private(r_); #endif diff --git a/lib/mmseqs/lib/simde/simde/arm/neon/zip2.h b/lib/mmseqs/lib/simde/simde/arm/neon/zip2.h index 6ea3ca3..ac7f9c1 100644 --- a/lib/mmseqs/lib/simde/simde/arm/neon/zip2.h +++ b/lib/mmseqs/lib/simde/simde/arm/neon/zip2.h @@ -39,18 +39,24 @@ simde_float32x2_t simde_vzip2_f32(simde_float32x2_t a, simde_float32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_f32(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpackhi_pi32(a, b); #else simde_float32x2_private r_, a_ = simde_float32x2_to_private(a), b_ = simde_float32x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 1, 3); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_float32x2_from_private(r_); #endif @@ -65,18 +71,24 @@ simde_int8x8_t simde_vzip2_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_s8(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpackhi_pi8(a, b); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_int8x8_from_private(r_); #endif @@ -91,18 +103,24 @@ simde_int16x4_t simde_vzip2_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_s16(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpackhi_pi16(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 2, 6, 3, 7); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_int16x4_from_private(r_); #endif @@ -117,18 +135,24 @@ simde_int32x2_t simde_vzip2_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_s32(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpackhi_pi32(a, b); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 1, 3); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_int32x2_from_private(r_); #endif @@ -143,18 +167,24 @@ simde_uint8x8_t simde_vzip2_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_u8(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpackhi_pi8(a, b); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -169,18 +199,24 @@ simde_uint16x4_t simde_vzip2_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_u16(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpackhi_pi16(a, b); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 2, 6, 3, 7); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_uint16x4_from_private(r_); #endif @@ -195,18 +231,24 @@ simde_uint32x2_t simde_vzip2_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_u32(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + return _mm_unpackhi_pi32(a, b); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 1, 3); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_uint32x2_from_private(r_); #endif @@ -222,20 +264,27 @@ simde_vzip2q_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_f32(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31); + return wasm_v32x4_shuffle(a, b, 2, 6, 3, 7); + #elif defined(SIMDE_X86_SSE_NATIVE) + return _mm_unpackhi_ps(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergel(a, b); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 2, 6, 3, 7); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_float32x4_from_private(r_); #endif @@ -251,20 +300,27 @@ simde_vzip2q_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_f64(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31); + return wasm_v64x2_shuffle(a, b, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_pd(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_mergel(a, b); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_float64x2_from_private(r_); #endif @@ -280,20 +336,27 @@ simde_vzip2q_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_s8(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return wasm_v8x16_shuffle(a, b, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_epi8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergel(a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_int8x16_from_private(r_); #endif @@ -309,20 +372,27 @@ simde_vzip2q_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_s16(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31); + return wasm_v16x8_shuffle(a, b, 4, 12, 5, 13, 6, 14, 7, 15); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_epi16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergel(a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_int16x8_from_private(r_); #endif @@ -338,20 +408,27 @@ simde_vzip2q_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_s32(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31); + return wasm_v32x4_shuffle(a, b, 2, 6, 3, 7); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_epi32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergel(a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 2, 6, 3, 7); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_int32x4_from_private(r_); #endif @@ -367,20 +444,27 @@ simde_vzip2q_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_s64(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31); + return wasm_v64x2_shuffle(a, b, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_epi64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_mergel(a, b); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_int64x2_from_private(r_); #endif @@ -397,20 +481,27 @@ simde_vzip2q_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_u8(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return wasm_v8x16_shuffle(a, b, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_epi8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergel(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -426,20 +517,27 @@ simde_vzip2q_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_u16(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31); + return wasm_v16x8_shuffle(a, b, 4, 12, 5, 13, 6, 14, 7, 15); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_epi16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergel(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -455,20 +553,27 @@ simde_vzip2q_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_u32(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31); + return wasm_v32x4_shuffle(a, b, 2, 6, 3, 7); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_epi32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mergel(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 2, 6, 3, 7); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_uint32x4_from_private(r_); #endif @@ -484,20 +589,27 @@ simde_vzip2q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_u64(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, - 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31); + return wasm_v64x2_shuffle(a, b, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpackhi_epi64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_mergel(a, b); #else simde_uint64x2_private r_, a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < halfway_point ; i++) { - r_.values[(2 * i) ] = a_.values[halfway_point + i]; - r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3); + #else + const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway_point ; i++) { + r_.values[(2 * i) ] = a_.values[halfway_point + i]; + r_.values[(2 * i) + 1] = b_.values[halfway_point + i]; + } + #endif return simde_uint64x2_from_private(r_); #endif diff --git a/lib/mmseqs/lib/simde/simde/simde-align.h b/lib/mmseqs/lib/simde/simde/simde-align.h new file mode 100644 index 0000000..00b96ab --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/simde-align.h @@ -0,0 +1,445 @@ +/* Alignment + * Created by Evan Nemerson + * + * To the extent possible under law, the authors have waived all + * copyright and related or neighboring rights to this code. For + * details, see the Creative Commons Zero 1.0 Universal license at + * + * + * SPDX-License-Identifier: CC0-1.0 + * + ********************************************************************** + * + * This is portability layer which should help iron out some + * differences across various compilers, as well as various verisons of + * C and C++. + * + * It was originally developed for SIMD Everywhere + * (), but since its only + * dependency is Hedley (, also CC0) + * it can easily be used in other projects, so please feel free to do + * so. + * + * If you do use this in your project, please keep a link to SIMDe in + * your code to remind you where to report any bugs and/or check for + * updated versions. + * + * # API Overview + * + * The API has several parts, and most macros have a few variations. + * There are APIs for declaring aligned fields/variables, optimization + * hints, and run-time alignment checks. + * + * Briefly, macros ending with "_TO" take numeric values and are great + * when you know the value you would like to use. Macros ending with + * "_LIKE", on the other hand, accept a type and are used when you want + * to use the alignment of a type instead of hardcoding a value. + * + * Documentation for each section of the API is inline. + * + * True to form, MSVC is the main problem and imposes several + * limitations on the effectiveness of the APIs. Detailed descriptions + * of the limitations of each macro are inline, but in general: + * + * * On C11+ or C++11+ code written using this API will work. The + * ASSUME macros may or may not generate a hint to the compiler, but + * that is only an optimization issue and will not actually cause + * failures. + * * If you're using pretty much any compiler other than MSVC, + * everything should basically work as well as in C11/C++11. + */ + +#if !defined(SIMDE_ALIGN_H) +#define SIMDE_ALIGN_H + +#include "hedley.h" + +/* I know this seems a little silly, but some non-hosted compilers + * don't have stddef.h, so we try to accomodate them. */ +#if !defined(SIMDE_ALIGN_SIZE_T_) + #if defined(__SIZE_TYPE__) + #define SIMDE_ALIGN_SIZE_T_ __SIZE_TYPE__ + #elif defined(__SIZE_T_TYPE__) + #define SIMDE_ALIGN_SIZE_T_ __SIZE_TYPE__ + #elif defined(__cplusplus) + #include + #define SIMDE_ALIGN_SIZE_T_ size_t + #else + #include + #define SIMDE_ALIGN_SIZE_T_ size_t + #endif +#endif + +#if !defined(SIMDE_ALIGN_INTPTR_T_) + #if defined(__INTPTR_TYPE__) + #define SIMDE_ALIGN_INTPTR_T_ __INTPTR_TYPE__ + #elif defined(__PTRDIFF_TYPE__) + #define SIMDE_ALIGN_INTPTR_T_ __PTRDIFF_TYPE__ + #elif defined(__PTRDIFF_T_TYPE__) + #define SIMDE_ALIGN_INTPTR_T_ __PTRDIFF_T_TYPE__ + #elif defined(__cplusplus) + #include + #define SIMDE_ALIGN_INTPTR_T_ ptrdiff_t + #else + #include + #define SIMDE_ALIGN_INTPTR_T_ ptrdiff_t + #endif +#endif + +#if defined(SIMDE_ALIGN_DEBUG) + #if defined(__cplusplus) + #include + #else + #include + #endif +#endif + +/* SIMDE_ALIGN_OF(Type) + * + * The SIMDE_ALIGN_OF macro works like alignof, or _Alignof, or + * __alignof, or __alignof__, or __ALIGNOF__, depending on the compiler. + * It isn't defined everywhere (only when the compiler has some alignof- + * like feature we can use to implement it), but it should work in most + * modern compilers, as well as C11 and C++11. + * + * If we can't find an implementation for SIMDE_ALIGN_OF then the macro + * will not be defined, so if you can handle that situation sensibly + * you may need to sprinkle some ifdefs into your code. + */ +#if \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \ + (0 && HEDLEY_HAS_FEATURE(c_alignof)) + #define SIMDE_ALIGN_OF(Type) _Alignof(Type) +#elif \ + (defined(__cplusplus) && (__cplusplus >= 201103L)) || \ + (0 && HEDLEY_HAS_FEATURE(cxx_alignof)) + #define SIMDE_ALIGN_OF(Type) alignof(Type) +#elif \ + HEDLEY_GCC_VERSION_CHECK(2,95,0) || \ + HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \ + HEDLEY_TINYC_VERSION_CHECK(0,9,24) || \ + HEDLEY_PGI_VERSION_CHECK(19,10,0) || \ + HEDLEY_CRAY_VERSION_CHECK(10,0,0) || \ + HEDLEY_TI_ARMCL_VERSION_CHECK(16,9,0) || \ + HEDLEY_TI_CL2000_VERSION_CHECK(16,9,0) || \ + HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ + HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + HEDLEY_TI_CL430_VERSION_CHECK(16,9,0) || \ + HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2) || \ + defined(__IBM__ALIGNOF__) || \ + defined(__clang__) + #define SIMDE_ALIGN_OF(Type) __alignof__(Type) +#elif \ + HEDLEY_IAR_VERSION_CHECK(8,40,0) + #define SIMDE_ALIGN_OF(Type) __ALIGNOF__(Type) +#elif \ + HEDLEY_MSVC_VERSION_CHECK(19,0,0) + /* Probably goes back much further, but MS takes down their old docs. + * If you can verify that this works in earlier versions please let + * me know! */ + #define SIMDE_ALIGN_OF(Type) __alignof(Type) +#endif + +/* SIMDE_ALIGN_MAXIMUM: + * + * This is the maximum alignment that the compiler supports. You can + * define the value prior to including SIMDe if necessary, but in that + * case *please* submit an issue so we can add the platform to the + * detection code. + * + * Most compilers are okay with types which are aligned beyond what + * they think is the maximum, as long as the alignment is a power + * of two. MSVC is the exception (of course), so we need to cap the + * alignment requests at values that the implementation supports. + * + * XL C/C++ will accept values larger than 16 (which is the alignment + * of an AltiVec vector), but will not reliably align to the larger + * value, so so we cap the value at 16 there. + * + * If the compiler accepts any power-of-two value within reason then + * this macro should be left undefined, and the SIMDE_ALIGN_CAP + * macro will just return the value passed to it. */ +#if !defined(SIMDE_ALIGN_MAXIMUM) + #if defined(HEDLEY_MSVC_VERSION) + #if defined(_M_IX86) || defined(_M_AMD64) + #if HEDLEY_MSVC_VERSION_CHECK(19,14,0) + #define SIMDE_ALIGN_PLATFORM_MAXIMUM 64 + #elif HEDLEY_MSVC_VERSION_CHECK(16,0,0) + /* VS 2010 is really a guess based on Wikipedia; if anyone can + * test with old VS versions I'd really appreciate it. */ + #define SIMDE_ALIGN_PLATFORM_MAXIMUM 32 + #else + #define SIMDE_ALIGN_PLATFORM_MAXIMUM 16 + #endif + #elif defined(_M_ARM) || defined(_M_ARM64) + #define SIMDE_ALIGN_PLATFORM_MAXIMUM 8 + #endif + #elif defined(HEDLEY_IBM_VERSION) + #define SIMDE_ALIGN_PLATFORM_MAXIMUM 16 + #endif +#endif + +/* You can mostly ignore these; they're intended for internal use. + * If you do need to use them please let me know; if they fulfill + * a common use case I'll probably drop the trailing underscore + * and make them part of the public API. */ +#if defined(SIMDE_ALIGN_PLATFORM_MAXIMUM) + #if SIMDE_ALIGN_PLATFORM_MAXIMUM >= 64 + #define SIMDE_ALIGN_64_ 64 + #define SIMDE_ALIGN_32_ 32 + #define SIMDE_ALIGN_16_ 16 + #define SIMDE_ALIGN_8_ 8 + #elif SIMDE_ALIGN_PLATFORM_MAXIMUM >= 32 + #define SIMDE_ALIGN_64_ 32 + #define SIMDE_ALIGN_32_ 32 + #define SIMDE_ALIGN_16_ 16 + #define SIMDE_ALIGN_8_ 8 + #elif SIMDE_ALIGN_PLATFORM_MAXIMUM >= 16 + #define SIMDE_ALIGN_64_ 16 + #define SIMDE_ALIGN_32_ 16 + #define SIMDE_ALIGN_16_ 16 + #define SIMDE_ALIGN_8_ 8 + #elif SIMDE_ALIGN_PLATFORM_MAXIMUM >= 8 + #define SIMDE_ALIGN_64_ 8 + #define SIMDE_ALIGN_32_ 8 + #define SIMDE_ALIGN_16_ 8 + #define SIMDE_ALIGN_8_ 8 + #else + #error Max alignment expected to be >= 8 + #endif +#else + #define SIMDE_ALIGN_64_ 64 + #define SIMDE_ALIGN_32_ 32 + #define SIMDE_ALIGN_16_ 16 + #define SIMDE_ALIGN_8_ 8 +#endif + +/** + * SIMDE_ALIGN_CAP(Alignment) + * + * Returns the minimum of Alignment or SIMDE_ALIGN_MAXIMUM. + */ +#if defined(SIMDE_ALIGN_MAXIMUM) + #define SIMDE_ALIGN_CAP(Alignment) (((Alignment) < (SIMDE_ALIGN_PLATFORM_MAXIMUM)) ? (Alignment) : (SIMDE_ALIGN_PLATFORM_MAXIMUM)) +#else + #define SIMDE_ALIGN_CAP(Alignment) (Alignment) +#endif + +/* SIMDE_ALIGN_TO(Alignment) + * + * SIMDE_ALIGN_TO is used to declare types or variables. It basically + * maps to the align attribute in most compilers, the align declspec + * in MSVC, or _Alignas/alignas in C11/C++11. + * + * Example: + * + * struct i32x4 { + * SIMDE_ALIGN_TO(16) int32_t values[4]; + * } + * + * Limitations: + * + * MSVC requires that the Alignment parameter be numeric; you can't do + * something like `SIMDE_ALIGN_TO(SIMDE_ALIGN_OF(int))`. This is + * unfortunate because that's really how the LIKE macros are + * implemented, and I am not aware of a way to get anything like this + * to work without using the C11/C++11 keywords. + * + * It also means that we can't use SIMDE_ALIGN_CAP to limit the + * alignment to the value specified, which MSVC also requires, so on + * MSVC you should use the `SIMDE_ALIGN_TO_8/16/32/64` macros instead. + * They work like `SIMDE_ALIGN_TO(SIMDE_ALIGN_CAP(Alignment))` would, + * but should be safe to use on MSVC. + * + * All this is to say that, if you want your code to work on MSVC, you + * should use the SIMDE_ALIGN_TO_8/16/32/64 macros below instead of + * SIMDE_ALIGN_TO(8/16/32/64). + */ +#if \ + HEDLEY_HAS_ATTRIBUTE(aligned) || \ + HEDLEY_GCC_VERSION_CHECK(2,95,0) || \ + HEDLEY_CRAY_VERSION_CHECK(8,4,0) || \ + HEDLEY_IBM_VERSION_CHECK(11,1,0) || \ + HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + HEDLEY_PGI_VERSION_CHECK(19,4,0) || \ + HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + HEDLEY_TINYC_VERSION_CHECK(0,9,24) || \ + HEDLEY_TI_ARMCL_VERSION_CHECK(16,9,0) || \ + HEDLEY_TI_CL2000_VERSION_CHECK(16,9,0) || \ + HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ + HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + HEDLEY_TI_CL430_VERSION_CHECK(16,9,0) || \ + HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2) + #define SIMDE_ALIGN_TO(Alignment) __attribute__((__aligned__(SIMDE_ALIGN_CAP(Alignment)))) +#elif \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) + #define SIMDE_ALIGN_TO(Alignment) _Alignas(SIMDE_ALIGN_CAP(Alignment)) +#elif \ + (defined(__cplusplus) && (__cplusplus >= 201103L)) + #define SIMDE_ALIGN_TO(Alignment) alignas(SIMDE_ALIGN_CAP(Alignment)) +#elif \ + defined(HEDLEY_MSVC_VERSION) + #define SIMDE_ALIGN_TO(Alignment) __declspec(align(Alignment)) + /* Unfortunately MSVC can't handle __declspec(align(__alignof(Type))); + * the alignment passed to the declspec has to be an integer. */ + #define SIMDE_ALIGN_OF_UNUSABLE_FOR_LIKE +#endif +#define SIMDE_ALIGN_TO_64 SIMDE_ALIGN_TO(SIMDE_ALIGN_64_) +#define SIMDE_ALIGN_TO_32 SIMDE_ALIGN_TO(SIMDE_ALIGN_32_) +#define SIMDE_ALIGN_TO_16 SIMDE_ALIGN_TO(SIMDE_ALIGN_16_) +#define SIMDE_ALIGN_TO_8 SIMDE_ALIGN_TO(SIMDE_ALIGN_8_) + +/* SIMDE_ALIGN_ASSUME_TO(Pointer, Alignment) + * + * SIMDE_ALIGN_ASSUME_TO is semantically similar to C++20's + * std::assume_aligned, or __builtin_assume_aligned. It tells the + * compiler to assume that the provided pointer is aligned to an + * `Alignment`-byte boundary. + * + * If you define SIMDE_ALIGN_DEBUG prior to including this header then + * SIMDE_ALIGN_ASSUME_TO will turn into a runtime check. We don't + * integrate with NDEBUG in this header, but it may be a good idea to + * put something like this in your code: + * + * #if !defined(NDEBUG) + * #define SIMDE_ALIGN_DEBUG + * #endif + * #include <.../simde-align.h> + */ +#if \ + HEDLEY_HAS_BUILTIN(__builtin_assume_aligned) || \ + HEDLEY_GCC_VERSION_CHECK(4,7,0) + #define SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) \ + HEDLEY_REINTERPRET_CAST(__typeof__(Pointer), __builtin_assume_aligned(HEDLEY_CONST_CAST(void*, HEDLEY_REINTERPRET_CAST(const void*, Pointer)), Alignment)) +#elif HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) (__extension__ ({ \ + __typeof__(v) simde_assume_aligned_t_ = (Pointer); \ + __assume_aligned(simde_assume_aligned_t_, Alignment); \ + simde_assume_aligned_t_; \ + })) +#elif defined(__cplusplus) && (__cplusplus > 201703L) + #include + #define SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) std::assume_aligned(Pointer) +#else + #if defined(__cplusplus) + template HEDLEY_ALWAYS_INLINE static T* simde_align_assume_to_unchecked(T* ptr, const size_t alignment) + #else + HEDLEY_ALWAYS_INLINE static void* simde_align_assume_to_unchecked(void* ptr, const size_t alignment) + #endif + { + HEDLEY_ASSUME((HEDLEY_REINTERPRET_CAST(size_t, (ptr)) % SIMDE_ALIGN_CAP(alignment)) == 0); + return ptr; + } + #if defined(__cplusplus) + #define SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) simde_align_assume_to_unchecked((Pointer), (Alignment)) + #else + #define SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) simde_align_assume_to_unchecked(HEDLEY_CONST_CAST(void*, HEDLEY_REINTERPRET_CAST(const void*, Pointer)), (Alignment)) + #endif +#endif + +#if !defined(SIMDE_ALIGN_DEBUG) + #define SIMDE_ALIGN_ASSUME_TO(Pointer, Alignment) SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) +#else + #include + #if defined(__cplusplus) + template + static HEDLEY_ALWAYS_INLINE + T* + simde_align_assume_to_checked_uncapped(T* ptr, const size_t alignment, const char* file, int line, const char* ptrname) + #else + static HEDLEY_ALWAYS_INLINE + void* + simde_align_assume_to_checked_uncapped(void* ptr, const size_t alignment, const char* file, int line, const char* ptrname) + #endif + { + if (HEDLEY_UNLIKELY((HEDLEY_REINTERPRET_CAST(SIMDE_ALIGN_INTPTR_T_, (ptr)) % HEDLEY_STATIC_CAST(SIMDE_ALIGN_INTPTR_T_, SIMDE_ALIGN_CAP(alignment))) != 0)) { + fprintf(stderr, "%s:%d: alignment check failed for `%s' (%p %% %u == %u)\n", + file, line, ptrname, HEDLEY_REINTERPRET_CAST(const void*, ptr), + HEDLEY_STATIC_CAST(unsigned int, SIMDE_ALIGN_CAP(alignment)), + HEDLEY_STATIC_CAST(unsigned int, HEDLEY_REINTERPRET_CAST(SIMDE_ALIGN_INTPTR_T_, (ptr)) % HEDLEY_STATIC_CAST(SIMDE_ALIGN_INTPTR_T_, SIMDE_ALIGN_CAP(alignment)))); + } + + return ptr; + } + + #if defined(__cplusplus) + #define SIMDE_ALIGN_ASSUME_TO(Pointer, Alignment) simde_align_assume_to_checked_uncapped((Pointer), (Alignment), __FILE__, __LINE__, #Pointer) + #else + #define SIMDE_ALIGN_ASSUME_TO(Pointer, Alignment) simde_align_assume_to_checked_uncapped(HEDLEY_CONST_CAST(void*, HEDLEY_REINTERPRET_CAST(const void*, Pointer)), (Alignment), __FILE__, __LINE__, #Pointer) + #endif +#endif + +/* SIMDE_ALIGN_LIKE(Type) + * SIMDE_ALIGN_LIKE_#(Type) + * + * The SIMDE_ALIGN_LIKE macros are similar to the SIMDE_ALIGN_TO macros + * except instead of an integer they take a type; basically, it's just + * a more convenient way to do something like: + * + * SIMDE_ALIGN_TO(SIMDE_ALIGN_OF(Type)) + * + * The versions with a numeric suffix will fall back on using a numeric + * value in the event we can't use SIMDE_ALIGN_OF(Type). This is + * mainly for MSVC, where __declspec(align()) can't handle anything + * other than hard-coded numeric values. + */ +#if defined(SIMDE_ALIGN_OF) && defined(SIMDE_ALIGN_TO) && !defined(SIMDE_ALIGN_OF_UNUSABLE_FOR_LIKE) + #define SIMDE_ALIGN_LIKE(Type) SIMDE_ALIGN_TO(SIMDE_ALIGN_OF(Type)) + #define SIMDE_ALIGN_LIKE_64(Type) SIMDE_ALIGN_LIKE(Type) + #define SIMDE_ALIGN_LIKE_32(Type) SIMDE_ALIGN_LIKE(Type) + #define SIMDE_ALIGN_LIKE_16(Type) SIMDE_ALIGN_LIKE(Type) + #define SIMDE_ALIGN_LIKE_8(Type) SIMDE_ALIGN_LIKE(Type) +#else + #define SIMDE_ALIGN_LIKE_64(Type) SIMDE_ALIGN_TO_64 + #define SIMDE_ALIGN_LIKE_32(Type) SIMDE_ALIGN_TO_32 + #define SIMDE_ALIGN_LIKE_16(Type) SIMDE_ALIGN_TO_16 + #define SIMDE_ALIGN_LIKE_8(Type) SIMDE_ALIGN_TO_8 +#endif + +/* SIMDE_ALIGN_ASSUME_LIKE(Pointer, Type) + * + * Tihs is similar to SIMDE_ALIGN_ASSUME_TO, except that it takes a + * type instead of a numeric value. */ +#if defined(SIMDE_ALIGN_OF) && defined(SIMDE_ALIGN_ASSUME_TO) + #define SIMDE_ALIGN_ASSUME_LIKE(Pointer, Type) SIMDE_ALIGN_ASSUME_TO(Pointer, SIMDE_ALIGN_OF(Type)) +#endif + +/* SIMDE_ALIGN_CAST(Type, Pointer) + * + * SIMDE_ALIGN_CAST is like C++'s reinterpret_cast, but it will try + * to silence warnings that some compilers may produce if you try + * to assign to a type with increased alignment requirements. + * + * Note that it does *not* actually attempt to tell the compiler that + * the pointer is aligned like the destination should be; that's the + * job of the next macro. This macro is necessary for stupid APIs + * like _mm_loadu_si128 where the input is a __m128i* but the function + * is specifically for data which isn't necessarily aligned to + * _Alignof(__m128i). + */ +#if HEDLEY_HAS_WARNING("-Wcast-align") || defined(__clang__) || HEDLEY_GCC_VERSION_CHECK(3,4,0) + #define SIMDE_ALIGN_CAST(Type, Pointer) (__extension__({ \ + HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("GCC diagnostic ignored \"-Wcast-align\"") \ + Type simde_r_ = HEDLEY_REINTERPRET_CAST(Type, Pointer); \ + HEDLEY_DIAGNOSTIC_POP \ + simde_r_; \ + })) +#else + #define SIMDE_ALIGN_CAST(Type, Pointer) HEDLEY_REINTERPRET_CAST(Type, Pointer) +#endif + +/* SIMDE_ALIGN_ASSUME_CAST(Type, Pointer) + * + * This is sort of like a combination of a reinterpret_cast and a + * SIMDE_ALIGN_ASSUME_LIKE. It uses SIMDE_ALIGN_ASSUME_LIKE to tell + * the compiler that the pointer is aligned like the specified type + * and casts the pointer to the specified type while suppressing any + * warnings from the compiler about casting to a type with greater + * alignment requirements. + */ +#define SIMDE_ALIGN_ASSUME_CAST(Type, Pointer) SIMDE_ALIGN_ASSUME_LIKE(SIMDE_ALIGN_CAST(Type, Pointer), Type) + +#endif /* !defined(SIMDE_ALIGN_H) */ diff --git a/lib/mmseqs/lib/simde/simde/simde-arch.h b/lib/mmseqs/lib/simde/simde/simde-arch.h index 72a6016..4e85953 100644 --- a/lib/mmseqs/lib/simde/simde/simde-arch.h +++ b/lib/mmseqs/lib/simde/simde/simde-arch.h @@ -69,7 +69,7 @@ /* AMD64 / x86_64 */ -#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X66) || defined(_M_AMD64) +#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) # define SIMDE_ARCH_AMD64 1000 #endif @@ -275,6 +275,12 @@ # define SIMDE_ARCH_X86_AVX 1 # endif # endif +# if defined(__AVX512VP2INTERSECT__) +# define SIMDE_ARCH_X86_AVX512VP2INTERSECT 1 +# endif +# if defined(__AVX512VBMI__) +# define SIMDE_ARCH_X86_AVX512VBMI 1 +# endif # if defined(__AVX512BW__) # define SIMDE_ARCH_X86_AVX512BW 1 # endif @@ -293,6 +299,12 @@ # if defined(__GFNI__) # define SIMDE_ARCH_X86_GFNI 1 # endif +# if defined(__PCLMUL__) +# define SIMDE_ARCH_X86_PCLMUL 1 +# endif +# if defined(__VPCLMULQDQ__) +# define SIMDE_ARCH_X86_VPCLMULQDQ 1 +# endif #endif /* Itanium @@ -361,6 +373,10 @@ # define SIMDE_ARCH_MIPS_CHECK(version) (0) #endif +#if defined(__mips_loongson_mmi) +# define SIMDE_ARCH_MIPS_LOONGSON_MMI 1 +#endif + /* Matsushita MN10300 */ #if defined(__MN10300__) || defined(__mn10300__) diff --git a/lib/mmseqs/lib/simde/simde/simde-common.h b/lib/mmseqs/lib/simde/simde/simde-common.h index a286530..976cbaa 100644 --- a/lib/mmseqs/lib/simde/simde/simde-common.h +++ b/lib/mmseqs/lib/simde/simde/simde-common.h @@ -30,8 +30,8 @@ #include "hedley.h" #define SIMDE_VERSION_MAJOR 0 -#define SIMDE_VERSION_MINOR 5 -#define SIMDE_VERSION_MICRO 0 +#define SIMDE_VERSION_MINOR 7 +#define SIMDE_VERSION_MICRO 1 #define SIMDE_VERSION HEDLEY_VERSION_ENCODE(SIMDE_VERSION_MAJOR, SIMDE_VERSION_MINOR, SIMDE_VERSION_MICRO) #include @@ -43,6 +43,7 @@ #include "simde-diagnostic.h" #include "simde-math.h" #include "simde-constify.h" +#include "simde-align.h" /* In some situations, SIMDe has to make large performance sacrifices * for small increases in how faithfully it reproduces an API, but @@ -96,58 +97,31 @@ #endif /* This controls how ties are rounded. For example, does 10.5 round to - * 10 or 11? IEEE 754 specifies round-towards-even, but on ARMv7 (for + * 10 or 11? IEEE 754 specifies round-towards-even, but ARMv7 (for * example) doesn't support it and it must be emulated (which is rather * slow). If you're okay with just using the default for whatever arch - * you're on, you should definitely define this. */ + * you're on, you should definitely define this. + * + * Note that we don't use this macro to avoid correct implementations + * in functions which are explicitly about rounding (such as vrnd* on + * NEON, _mm_round_* on x86, etc.); it is only used for code where + * rounding is a component in another function, and even then it isn't + * usually a problem since such functions will use the current rounding + * mode. */ #if !defined(SIMDE_FAST_ROUND_TIES) && !defined(SIMDE_NO_FAST_ROUND_TIES) && defined(SIMDE_FAST_MATH) #define SIMDE_FAST_ROUND_TIES #endif -#if \ - HEDLEY_HAS_ATTRIBUTE(aligned) || \ - HEDLEY_GCC_VERSION_CHECK(2,95,0) || \ - HEDLEY_CRAY_VERSION_CHECK(8,4,0) || \ - HEDLEY_IBM_VERSION_CHECK(11,1,0) || \ - HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - HEDLEY_PGI_VERSION_CHECK(19,4,0) || \ - HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - HEDLEY_TINYC_VERSION_CHECK(0,9,24) || \ - HEDLEY_TI_VERSION_CHECK(8,1,0) -# define SIMDE_ALIGN(alignment) __attribute__((aligned(alignment))) -#elif defined(_MSC_VER) && !(defined(_M_ARM) && !defined(_M_ARM64)) -# define SIMDE_ALIGN(alignment) __declspec(align(alignment)) -#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) -# define SIMDE_ALIGN(alignment) _Alignas(alignment) -#elif defined(__cplusplus) && (__cplusplus >= 201103L) -# define SIMDE_ALIGN(alignment) alignas(alignment) -#else -# define SIMDE_ALIGN(alignment) +/* For functions which convert from one type to another (mostly from + * floating point to integer types), sometimes we need to do a range + * check and potentially return a different result if the value + * falls outside that range. Skipping this check can provide a + * performance boost, at the expense of faithfulness to the API we're + * emulating. */ +#if !defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_NO_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_MATH) + #define SIMDE_FAST_CONVERSION_RANGE #endif -#if HEDLEY_GNUC_VERSION_CHECK(2,95,0) || \ - HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - HEDLEY_IBM_VERSION_CHECK(11,1,0) -# define SIMDE_ALIGN_OF(T) (__alignof__(T)) -#elif \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \ - HEDLEY_HAS_FEATURE(c11_alignof) -# define SIMDE_ALIGN_OF(T) (_Alignof(T)) -#elif \ - (defined(__cplusplus) && (__cplusplus >= 201103L)) || \ - HEDLEY_HAS_FEATURE(cxx_alignof) -# define SIMDE_ALIGN_OF(T) (alignof(T)) -#endif - -#if defined(SIMDE_ALIGN_OF) -# define SIMDE_ALIGN_AS(N, T) SIMDE_ALIGN(SIMDE_ALIGN_OF(T)) -#else -# define SIMDE_ALIGN_AS(N, T) SIMDE_ALIGN(N) -#endif - -#define simde_assert_aligned(alignment, val) \ - simde_assert_int(HEDLEY_REINTERPRET_CAST(uintptr_t, HEDLEY_REINTERPRET_CAST(const void*, (val))) % (alignment), ==, 0) - #if \ HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \ HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ @@ -201,44 +175,6 @@ # define SIMDE_STATIC_ASSERT(expr, message) HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message)) #endif -/* SIMDE_ASSUME_ALIGNED allows you to (try to) tell the compiler - * that a pointer is aligned to an `alignment`-byte boundary. */ -#if \ - HEDLEY_HAS_BUILTIN(__builtin_assume_aligned) || \ - HEDLEY_GCC_VERSION_CHECK(4,7,0) - #define SIMDE_ASSUME_ALIGNED(alignment, v) HEDLEY_REINTERPRET_CAST(__typeof__(v), __builtin_assume_aligned(v, alignment)) -#elif defined(__cplusplus) && (__cplusplus > 201703L) - #define SIMDE_ASSUME_ALIGNED(alignment, v) std::assume_aligned(v) -#elif HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define SIMDE_ASSUME_ALIGNED(alignment, v) (__extension__ ({ \ - __typeof__(v) simde_assume_aligned_t_ = (v); \ - __assume_aligned(simde_assume_aligned_t_, alignment); \ - simde_assume_aligned_t_; \ - })) -#else - #define SIMDE_ASSUME_ALIGNED(alignment, v) (v) -#endif - -#if defined(SIMDE_ALIGN_OF) - #define SIMDE_ASSUME_ALIGNED_AS(T, v) SIMDE_ASSUME_ALIGNED(SIMDE_ALIGN_OF(T), v) -#else - #define SIMDE_ASSUME_ALIGNED_AS(T, v) (v) -#endif - -/* SIMDE_ALIGN_CAST allows you to convert to a type with greater - * aligment requirements without triggering a warning. */ -#if HEDLEY_HAS_WARNING("-Wcast-align") || defined(__clang__) || HEDLEY_GCC_VERSION_CHECK(3,4,0) - #define SIMDE_ALIGN_CAST(T, v) (__extension__({ \ - HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("GCC diagnostic ignored \"-Wcast-align\"") \ - T simde_r_ = HEDLEY_REINTERPRET_CAST(T, v); \ - HEDLEY_DIAGNOSTIC_POP \ - simde_r_; \ - })) -#else - #define SIMDE_ALIGN_CAST(T, v) HEDLEY_REINTERPRET_CAST(T, v) -#endif - #if \ (HEDLEY_HAS_ATTRIBUTE(may_alias) && !defined(HEDLEY_SUNPRO_VERSION)) || \ HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ @@ -743,6 +679,93 @@ typedef SIMDE_FLOAT64_TYPE simde_float64; #include "check.h" +/* GCC/clang have a bunch of functionality in builtins which we would + * like to access, but the suffixes indicate whether the operate on + * int, long, or long long, not fixed width types (e.g., int32_t). + * we use these macros to attempt to map from fixed-width to the + * names GCC uses. Note that you should still cast the input(s) and + * return values (to/from SIMDE_BUILTIN_TYPE_*_) since often even if + * types are the same size they may not be compatible according to the + * compiler. For example, on x86 long and long lonsg are generally + * both 64 bits, but platforms vary on whether an int64_t is mapped + * to a long or long long. */ + +#include + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ + +#if (INT8_MAX == INT_MAX) && (INT8_MIN == INT_MIN) + #define SIMDE_BUILTIN_SUFFIX_8_ + #define SIMDE_BUILTIN_TYPE_8_ int +#elif (INT8_MAX == LONG_MAX) && (INT8_MIN == LONG_MIN) + #define SIMDE_BUILTIN_SUFFIX_8_ l + #define SIMDE_BUILTIN_TYPE_8_ long +#elif (INT8_MAX == LLONG_MAX) && (INT8_MIN == LLONG_MIN) + #define SIMDE_BUILTIN_SUFFIX_8_ ll + #define SIMDE_BUILTIN_TYPE_8_ long long +#endif + +#if (INT16_MAX == INT_MAX) && (INT16_MIN == INT_MIN) + #define SIMDE_BUILTIN_SUFFIX_16_ + #define SIMDE_BUILTIN_TYPE_16_ int +#elif (INT16_MAX == LONG_MAX) && (INT16_MIN == LONG_MIN) + #define SIMDE_BUILTIN_SUFFIX_16_ l + #define SIMDE_BUILTIN_TYPE_16_ long +#elif (INT16_MAX == LLONG_MAX) && (INT16_MIN == LLONG_MIN) + #define SIMDE_BUILTIN_SUFFIX_16_ ll + #define SIMDE_BUILTIN_TYPE_16_ long long +#endif + +#if (INT32_MAX == INT_MAX) && (INT32_MIN == INT_MIN) + #define SIMDE_BUILTIN_SUFFIX_32_ + #define SIMDE_BUILTIN_TYPE_32_ int +#elif (INT32_MAX == LONG_MAX) && (INT32_MIN == LONG_MIN) + #define SIMDE_BUILTIN_SUFFIX_32_ l + #define SIMDE_BUILTIN_TYPE_32_ long +#elif (INT32_MAX == LLONG_MAX) && (INT32_MIN == LLONG_MIN) + #define SIMDE_BUILTIN_SUFFIX_32_ ll + #define SIMDE_BUILTIN_TYPE_32_ long long +#endif + +#if (INT64_MAX == INT_MAX) && (INT64_MIN == INT_MIN) + #define SIMDE_BUILTIN_SUFFIX_64_ + #define SIMDE_BUILTIN_TYPE_64_ int +#elif (INT64_MAX == LONG_MAX) && (INT64_MIN == LONG_MIN) + #define SIMDE_BUILTIN_SUFFIX_64_ l + #define SIMDE_BUILTIN_TYPE_64_ long +#elif (INT64_MAX == LLONG_MAX) && (INT64_MIN == LLONG_MIN) + #define SIMDE_BUILTIN_SUFFIX_64_ ll + #define SIMDE_BUILTIN_TYPE_64_ long long +#endif + +#if defined(SIMDE_BUILTIN_SUFFIX_8_) + #define SIMDE_BUILTIN_8_(name) HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_8_) + #define SIMDE_BUILTIN_HAS_8_(name) HEDLEY_HAS_BUILTIN(HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_8_)) +#else + #define SIMDE_BUILTIN_HAS_8_(name) 0 +#endif +#if defined(SIMDE_BUILTIN_SUFFIX_16_) + #define SIMDE_BUILTIN_16_(name) HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_16_) + #define SIMDE_BUILTIN_HAS_16_(name) HEDLEY_HAS_BUILTIN(HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_16_)) +#else + #define SIMDE_BUILTIN_HAS_16_(name) 0 +#endif +#if defined(SIMDE_BUILTIN_SUFFIX_32_) + #define SIMDE_BUILTIN_32_(name) HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_32_) + #define SIMDE_BUILTIN_HAS_32_(name) HEDLEY_HAS_BUILTIN(HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_32_)) +#else + #define SIMDE_BUILTIN_HAS_32_(name) 0 +#endif +#if defined(SIMDE_BUILTIN_SUFFIX_64_) + #define SIMDE_BUILTIN_64_(name) HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_64_) + #define SIMDE_BUILTIN_HAS_64_(name) HEDLEY_HAS_BUILTIN(HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_64_)) +#else + #define SIMDE_BUILTIN_HAS_64_(name) 0 +#endif + +HEDLEY_DIAGNOSTIC_POP + /* Sometimes we run into problems with specific versions of compilers which make the native versions unusable for us. Often this is due to missing functions, sometimes buggy implementations, etc. These @@ -800,14 +823,24 @@ typedef SIMDE_FLOAT64_TYPE simde_float64; # if defined(SIMDE_ARCH_AARCH64) # define SIMDE_BUG_CLANG_45541 # define SIMDE_BUG_CLANG_46844 +# define SIMDE_BUG_CLANG_48257 +# if SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) && SIMDE_DETECT_CLANG_VERSION_NOT(11,0,0) +# define SIMDE_BUG_CLANG_BAD_VI64_OPS +# endif # endif # if defined(SIMDE_ARCH_POWER) # define SIMDE_BUG_CLANG_46770 # endif +# if defined(_ARCH_PWR9) && !SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0) && !defined(__OPTIMIZE__) +# define SIMDE_BUG_CLANG_POWER9_16x4_BAD_SHIFT +# endif # if defined(SIMDE_ARCH_X86) || defined(SIMDE_ARCH_AMD64) # if HEDLEY_HAS_WARNING("-Wsign-conversion") && SIMDE_DETECT_CLANG_VERSION_NOT(11,0,0) # define SIMDE_BUG_CLANG_45931 # endif +# if HEDLEY_HAS_WARNING("-Wvector-conversion") && SIMDE_DETECT_CLANG_VERSION_NOT(11,0,0) +# define SIMDE_BUG_CLANG_44589 +# endif # endif # define SIMDE_BUG_CLANG_45959 # elif defined(HEDLEY_MSVC_VERSION) @@ -817,10 +850,6 @@ typedef SIMDE_FLOAT64_TYPE simde_float64; # elif defined(HEDLEY_INTEL_VERSION) # define SIMDE_BUG_INTEL_857088 # endif -# if defined(HEDLEY_EMSCRIPTEN_VERSION) -# define SIMDE_BUG_EMSCRIPTEN_MISSING_IMPL /* Placeholder for (as yet) unfiled issues. */ -# define SIMDE_BUG_EMSCRIPTEN_5242 -# endif #endif /* GCC and Clang both have the same issue: diff --git a/lib/mmseqs/lib/simde/simde/simde-detect-clang.h b/lib/mmseqs/lib/simde/simde/simde-detect-clang.h index 2016392..93fcdfb 100644 --- a/lib/mmseqs/lib/simde/simde/simde-detect-clang.h +++ b/lib/mmseqs/lib/simde/simde/simde-detect-clang.h @@ -57,7 +57,9 @@ * anything we can detect. */ #if defined(__clang__) && !defined(SIMDE_DETECT_CLANG_VERSION) -# if __has_warning("-Wimplicit-const-int-float-conversion") +# if __has_warning("-Wformat-insufficient-args") +# define SIMDE_DETECT_CLANG_VERSION 120000 +# elif __has_warning("-Wimplicit-const-int-float-conversion") # define SIMDE_DETECT_CLANG_VERSION 110000 # elif __has_warning("-Wmisleading-indentation") # define SIMDE_DETECT_CLANG_VERSION 100000 @@ -101,7 +103,7 @@ # define SIMDE_DETECT_CLANG_VERSION_NOT(major, minor, revision) (SIMDE_DETECT_CLANG_VERSION < ((major * 10000) + (minor * 1000) + (revision))) #else # define SIMDE_DETECT_CLANG_VERSION_CHECK(major, minor, revision) (0) -# define SIMDE_DETECT_CLANG_VERSION_NOT(major, minor, revision) (1) +# define SIMDE_DETECT_CLANG_VERSION_NOT(major, minor, revision) (0) #endif #endif /* !defined(SIMDE_DETECT_CLANG_H) */ diff --git a/lib/mmseqs/lib/simde/simde/simde-diagnostic.h b/lib/mmseqs/lib/simde/simde/simde-diagnostic.h index c518fb0..c6ae3bb 100644 --- a/lib/mmseqs/lib/simde/simde/simde-diagnostic.h +++ b/lib/mmseqs/lib/simde/simde/simde-diagnostic.h @@ -181,7 +181,10 @@ * before we can access certain SIMD intrinsics, but this diagnostic * warns about it being a reserved name. It is a reserved name, but * it's reserved for the compiler and we are using it to convey - * information to the compiler. */ + * information to the compiler. + * + * This is also used when enabling native aliases since we don't get to + * choose the macro names. */ #if HEDLEY_HAS_WARNING("-Wdouble-promotion") #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"") #else @@ -298,7 +301,13 @@ * -Wc++98-compat-pedantic which says 'long long' is incompatible with * C++98. */ #if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") - #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") + #if HEDLEY_HAS_WARNING("-Wc++11-long-long") + #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \ + _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") \ + _Pragma("clang diagnostic ignored \"-Wc++11-long-long\"") + #else + #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") + #endif #else #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ #endif @@ -333,7 +342,7 @@ #define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") /* For NEON, the situation with -Wvector-conversion in clang < 10 is * bad enough that we just disable the warning altogether. */ - #if defined(__arm__) && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0) + #if defined(SIMDE_ARCH_ARM) && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ #endif #else @@ -343,6 +352,19 @@ #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ #endif +/* Prior to 5.0, clang didn't support disabling diagnostics in + * statement exprs. As a result, some macros we use don't + * properly silence warnings. */ +#if SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-qual") && HEDLEY_HAS_WARNING("-Wcast-align") + #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-qual\"") _Pragma("clang diagnostic ignored \"-Wcast-align\"") +#elif SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-qual") + #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-qual\"") +#elif SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-align") + #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-align\"") +#else + #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ +#endif + /* SLEEF triggers this a *lot* in their headers */ #if HEDLEY_HAS_WARNING("-Wignored-qualifiers") #define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("clang diagnostic ignored \"-Wignored-qualifiers\"") @@ -359,7 +381,33 @@ #define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_ #endif +/* MSVC doesn't like (__assume(0), code) and will warn about code being + * unreachable, but we want it there because not all compilers + * understand the unreachable macro and will complain if it is missing. + * I'm planning on adding a new macro to Hedley to handle this a bit + * more elegantly, but until then... */ +#if defined(HEDLEY_MSVC_VERSION) + #define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_ __pragma(warning(disable:4702)) +#else + #define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_ +#endif + +/* This is a false positive from GCC in a few places. */ +#if HEDLEY_GCC_VERSION_CHECK(4,7,0) + #define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") +#else + #define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ +#endif + +#if defined(SIMDE_ENABLE_NATIVE_ALIASES) + #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \ + SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ +#else + #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ +#endif + #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS \ + SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \ SIMDE_DIAGNOSTIC_DISABLE_PSABI_ \ SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ \ SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ \ @@ -374,6 +422,7 @@ SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \ SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ \ + SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ #endif /* !defined(SIMDE_DIAGNOSTIC_H) */ diff --git a/lib/mmseqs/lib/simde/simde/simde-features.h b/lib/mmseqs/lib/simde/simde/simde-features.h index d234238..528e585 100644 --- a/lib/mmseqs/lib/simde/simde/simde-features.h +++ b/lib/mmseqs/lib/simde/simde/simde-features.h @@ -43,6 +43,24 @@ #define SIMDE_X86_AVX512F_NATIVE #endif +#if !defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && !defined(SIMDE_X86_AVX512VP2INTERSECT_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_X86_AVX512VP2INTERSECT) + #define SIMDE_X86_AVX512VP2INTERSECT_NATIVE + #endif +#endif +#if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE) + #define SIMDE_X86_AVX512F_NATIVE +#endif + +#if !defined(SIMDE_X86_AVX512VBMI_NATIVE) && !defined(SIMDE_X86_AVX512VBMI_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_X86_AVX512VBMI) + #define SIMDE_X86_AVX512VBMI_NATIVE + #endif +#endif +#if defined(SIMDE_X86_AVX512VBMI_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE) + #define SIMDE_X86_AVX512F_NATIVE +#endif + #if !defined(SIMDE_X86_AVX512CD_NATIVE) && !defined(SIMDE_X86_AVX512CD_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) #if defined(SIMDE_ARCH_X86_AVX512CD) #define SIMDE_X86_AVX512CD_NATIVE @@ -178,6 +196,18 @@ #endif #endif +#if !defined(SIMDE_X86_PCLMUL_NATIVE) && !defined(SIMDE_X86_PCLMUL_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_X86_PCLMUL) + #define SIMDE_X86_PCLMUL_NATIVE + #endif +#endif + +#if !defined(SIMDE_X86_VPCLMULQDQ_NATIVE) && !defined(SIMDE_X86_VPCLMULQDQ_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_X86_VPCLMULQDQ) + #define SIMDE_X86_VPCLMULQDQ_NATIVE + #endif +#endif + #if !defined(SIMDE_X86_SVML_NATIVE) && !defined(SIMDE_X86_SVML_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) #if defined(__INTEL_COMPILER) #define SIMDE_X86_SVML_NATIVE @@ -190,9 +220,7 @@ #endif #if \ - defined(SIMDE_X86_AVX_NATIVE) || \ - defined(SIMDE_X86_GFNI_NATIVE) || \ - defined(SIMDE_X86_SVML_NATIVE) + defined(SIMDE_X86_AVX_NATIVE) || defined(SIMDE_X86_GFNI_NATIVE) #include #elif defined(SIMDE_X86_SSE4_2_NATIVE) #include @@ -224,7 +252,7 @@ #endif #if !defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_ARM_NEON_A32V8_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) - #if defined(SIMDE_ARCH_ARM_NEON) && SIMDE_ARCH_ARM_CHECK(80) + #if defined(SIMDE_ARCH_ARM_NEON) && SIMDE_ARCH_ARM_CHECK(80) && (__ARM_NEON_FP & 0x02) #define SIMDE_ARM_NEON_A32V8_NATIVE #endif #endif @@ -348,6 +376,15 @@ #endif #endif +#if !defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) && !defined(SIMDE_MIPS_LOONGSON_MMI_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_MIPS_LOONGSON_MMI) + #define SIMDE_MIPS_LOONGSON_MMI_NATIVE 1 + #endif +#endif +#if defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + #include +#endif + /* This is used to determine whether or not to fall back on a vector * function in an earlier ISA extensions, as well as whether * we expected any attempts at vectorization to be fruitful or if we @@ -371,8 +408,8 @@ #endif #endif -#define SIMDE_NATURAL_VECTOR_SIZE_LE(x) (SIMDE_NATURAL_VECTOR_SIZE <= (x)) -#define SIMDE_NATURAL_VECTOR_SIZE_GE(x) (SIMDE_NATURAL_VECTOR_SIZE >= (x)) +#define SIMDE_NATURAL_VECTOR_SIZE_LE(x) ((SIMDE_NATURAL_VECTOR_SIZE > 0) && (SIMDE_NATURAL_VECTOR_SIZE <= (x))) +#define SIMDE_NATURAL_VECTOR_SIZE_GE(x) ((SIMDE_NATURAL_VECTOR_SIZE > 0) && (SIMDE_NATURAL_VECTOR_SIZE >= (x))) /* Native aliases */ #if defined(SIMDE_ENABLE_NATIVE_ALIASES) @@ -424,6 +461,12 @@ #if !defined(SIMDE_X86_GFNI_NATIVE) #define SIMDE_X86_GFNI_ENABLE_NATIVE_ALIASES #endif + #if !defined(SIMDE_X86_PCLMUL_NATIVE) + #define SIMDE_X86_PCLMUL_ENABLE_NATIVE_ALIASES + #endif + #if !defined(SIMDE_X86_VPCLMULQDQ_NATIVE) + #define SIMDE_X86_VPCLMULQDQ_ENABLE_NATIVE_ALIASES + #endif #if !defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES diff --git a/lib/mmseqs/lib/simde/simde/simde-math.h b/lib/mmseqs/lib/simde/simde/simde-math.h index 4b7f0d6..c48a103 100644 --- a/lib/mmseqs/lib/simde/simde/simde-math.h +++ b/lib/mmseqs/lib/simde/simde/simde-math.h @@ -34,6 +34,14 @@ #include "hedley.h" #include "simde-features.h" +#include +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #include +#endif + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + /* SLEEF support * https://sleef.org/ * @@ -441,13 +449,13 @@ #endif #endif -#if !defined(simde_math_absf) - #if SIMDE_MATH_BUILTIN_LIBM(absf) - #define simde_math_absf(v) __builtin_absf(v) +#if !defined(simde_math_fabsf) + #if SIMDE_MATH_BUILTIN_LIBM(fabsf) + #define simde_math_fabsf(v) __builtin_fabsf(v) #elif defined(SIMDE_MATH_HAVE_CMATH) - #define simde_math_absf(v) std::abs(v) + #define simde_math_fabsf(v) std::abs(v) #elif defined(SIMDE_MATH_HAVE_MATH_H) - #define simde_math_absf(v) absf(v) + #define simde_math_fabsf(v) fabsf(v) #endif #endif @@ -869,6 +877,26 @@ #endif #endif +#if !defined(simde_math_fmax) + #if SIMDE_MATH_BUILTIN_LIBM(fmax) + #define simde_math_fmax(x, y, z) __builtin_fmax(x, y, z) + #elif defined(SIMDE_MATH_HAVE_CMATH) + #define simde_math_fmax(x, y, z) std::fmax(x, y, z) + #elif defined(SIMDE_MATH_HAVE_MATH_H) + #define simde_math_fmax(x, y, z) fmax(x, y, z) + #endif +#endif + +#if !defined(simde_math_fmaxf) + #if SIMDE_MATH_BUILTIN_LIBM(fmaxf) + #define simde_math_fmaxf(x, y, z) __builtin_fmaxf(x, y, z) + #elif defined(SIMDE_MATH_HAVE_CMATH) + #define simde_math_fmaxf(x, y, z) std::fmax(x, y, z) + #elif defined(SIMDE_MATH_HAVE_MATH_H) + #define simde_math_fmaxf(x, y, z) fmaxf(x, y, z) + #endif +#endif + #if !defined(simde_math_hypot) #if SIMDE_MATH_BUILTIN_LIBM(hypot) #define simde_math_hypot(y, x) __builtin_hypot(y, x) @@ -989,6 +1017,26 @@ #endif #endif +#if !defined(simde_math_modf) + #if SIMDE_MATH_BUILTIN_LIBM(modf) + #define simde_math_modf(x, iptr) __builtin_modf(x, iptr) + #elif defined(SIMDE_MATH_HAVE_CMATH) + #define simde_math_modf(x, iptr) std::modf(x, iptr) + #elif defined(SIMDE_MATH_HAVE_MATH_H) + #define simde_math_modf(x, iptr) modf(x, iptr) + #endif +#endif + +#if !defined(simde_math_modff) + #if SIMDE_MATH_BUILTIN_LIBM(modff) + #define simde_math_modff(x, iptr) __builtin_modff(x, iptr) + #elif defined(SIMDE_MATH_HAVE_CMATH) + #define simde_math_modff(x, iptr) std::modf(x, iptr) + #elif defined(SIMDE_MATH_HAVE_MATH_H) + #define simde_math_modff(x, iptr) modff(x, iptr) + #endif +#endif + #if !defined(simde_math_nearbyint) #if SIMDE_MATH_BUILTIN_LIBM(nearbyint) #define simde_math_nearbyint(v) __builtin_nearbyint(v) @@ -1069,6 +1117,46 @@ #endif #endif +#if !defined(simde_math_roundeven) + #if \ + HEDLEY_HAS_BUILTIN(__builtin_roundeven) || \ + HEDLEY_GCC_VERSION_CHECK(10,0,0) + #define simde_math_roundeven(v) __builtin_roundeven(v) + #elif defined(simde_math_round) && defined(simde_math_fabs) + static HEDLEY_INLINE + double + simde_math_roundeven(double v) { + double rounded = simde_math_round(v); + double diff = rounded - v; + if (HEDLEY_UNLIKELY(simde_math_fabs(diff) == 0.5) && (HEDLEY_STATIC_CAST(int64_t, rounded) & 1)) { + rounded = v - diff; + } + return rounded; + } + #define simde_math_roundeven simde_math_roundeven + #endif +#endif + +#if !defined(simde_math_roundevenf) + #if \ + HEDLEY_HAS_BUILTIN(__builtin_roundevenf) || \ + HEDLEY_GCC_VERSION_CHECK(10,0,0) + #define simde_math_roundevenf(v) __builtin_roundevenf(v) + #elif defined(simde_math_roundf) && defined(simde_math_fabsf) + static HEDLEY_INLINE + float + simde_math_roundevenf(float v) { + float rounded = simde_math_roundf(v); + float diff = rounded - v; + if (HEDLEY_UNLIKELY(simde_math_fabsf(diff) == 0.5f) && (HEDLEY_STATIC_CAST(int32_t, rounded) & 1)) { + rounded = v - diff; + } + return rounded; + } + #define simde_math_roundevenf simde_math_roundevenf + #endif +#endif + #if !defined(simde_math_sin) #if SIMDE_MATH_BUILTIN_LIBM(sin) #define simde_math_sin(v) __builtin_sin(v) @@ -1565,4 +1653,260 @@ simde_math_deg2radf(float degrees) { return degrees * (SIMDE_MATH_PI_OVER_180F); } +/*** Saturated arithmetic ***/ + +static HEDLEY_INLINE +int8_t +simde_math_adds_i8(int8_t a, int8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqaddb_s8(a, b); + #else + uint8_t a_ = HEDLEY_STATIC_CAST(uint8_t, a); + uint8_t b_ = HEDLEY_STATIC_CAST(uint8_t, b); + uint8_t r_ = a_ + b_; + + a_ = (a_ >> ((8 * sizeof(r_)) - 1)) + INT8_MAX; + if (HEDLEY_STATIC_CAST(int8_t, ((a_ ^ b_) | ~(b_ ^ r_))) >= 0) { + r_ = a_; + } + + return HEDLEY_STATIC_CAST(int8_t, r_); + #endif +} + +static HEDLEY_INLINE +int16_t +simde_math_adds_i16(int16_t a, int16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqaddh_s16(a, b); + #else + uint16_t a_ = HEDLEY_STATIC_CAST(uint16_t, a); + uint16_t b_ = HEDLEY_STATIC_CAST(uint16_t, b); + uint16_t r_ = a_ + b_; + + a_ = (a_ >> ((8 * sizeof(r_)) - 1)) + INT16_MAX; + if (HEDLEY_STATIC_CAST(int16_t, ((a_ ^ b_) | ~(b_ ^ r_))) >= 0) { + r_ = a_; + } + + return HEDLEY_STATIC_CAST(int16_t, r_); + #endif +} + +static HEDLEY_INLINE +int32_t +simde_math_adds_i32(int32_t a, int32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqadds_s32(a, b); + #else + uint32_t a_ = HEDLEY_STATIC_CAST(uint32_t, a); + uint32_t b_ = HEDLEY_STATIC_CAST(uint32_t, b); + uint32_t r_ = a_ + b_; + + a_ = (a_ >> ((8 * sizeof(r_)) - 1)) + INT32_MAX; + if (HEDLEY_STATIC_CAST(int32_t, ((a_ ^ b_) | ~(b_ ^ r_))) >= 0) { + r_ = a_; + } + + return HEDLEY_STATIC_CAST(int32_t, r_); + #endif +} + +static HEDLEY_INLINE +int64_t +simde_math_adds_i64(int64_t a, int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqaddd_s64(a, b); + #else + uint64_t a_ = HEDLEY_STATIC_CAST(uint64_t, a); + uint64_t b_ = HEDLEY_STATIC_CAST(uint64_t, b); + uint64_t r_ = a_ + b_; + + a_ = (a_ >> ((8 * sizeof(r_)) - 1)) + INT64_MAX; + if (HEDLEY_STATIC_CAST(int64_t, ((a_ ^ b_) | ~(b_ ^ r_))) >= 0) { + r_ = a_; + } + + return HEDLEY_STATIC_CAST(int64_t, r_); + #endif +} + +static HEDLEY_INLINE +uint8_t +simde_math_adds_u8(uint8_t a, uint8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqaddb_u8(a, b); + #else + uint8_t r = a + b; + r |= -(r < a); + return r; + #endif +} + +static HEDLEY_INLINE +uint16_t +simde_math_adds_u16(uint16_t a, uint16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqaddh_u16(a, b); + #else + uint16_t r = a + b; + r |= -(r < a); + return r; + #endif +} + +static HEDLEY_INLINE +uint32_t +simde_math_adds_u32(uint32_t a, uint32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqadds_u32(a, b); + #else + uint32_t r = a + b; + r |= -(r < a); + return r; + #endif +} + +static HEDLEY_INLINE +uint64_t +simde_math_adds_u64(uint64_t a, uint64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqaddd_u64(a, b); + #else + uint64_t r = a + b; + r |= -(r < a); + return r; + #endif +} + +static HEDLEY_INLINE +int8_t +simde_math_subs_i8(int8_t a, int8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqsubb_s8(a, b); + #else + uint8_t a_ = HEDLEY_STATIC_CAST(uint8_t, a); + uint8_t b_ = HEDLEY_STATIC_CAST(uint8_t, b); + uint8_t r_ = a_ - b_; + + a_ = (a_ >> 7) + INT8_MAX; + + if (HEDLEY_STATIC_CAST(int8_t, (a_ ^ b_) & (a_ ^ r_)) < 0) { + r_ = a_; + } + + return HEDLEY_STATIC_CAST(int8_t, r_); + #endif +} + +static HEDLEY_INLINE +int16_t +simde_math_subs_i16(int16_t a, int16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqsubh_s16(a, b); + #else + uint16_t a_ = HEDLEY_STATIC_CAST(uint16_t, a); + uint16_t b_ = HEDLEY_STATIC_CAST(uint16_t, b); + uint16_t r_ = a_ - b_; + + a_ = (a_ >> 15) + INT16_MAX; + + if (HEDLEY_STATIC_CAST(int16_t, (a_ ^ b_) & (a_ ^ r_)) < 0) { + r_ = a_; + } + + return HEDLEY_STATIC_CAST(int16_t, r_); + #endif +} + +static HEDLEY_INLINE +int32_t +simde_math_subs_i32(int32_t a, int32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqsubs_s32(a, b); + #else + uint32_t a_ = HEDLEY_STATIC_CAST(uint32_t, a); + uint32_t b_ = HEDLEY_STATIC_CAST(uint32_t, b); + uint32_t r_ = a_ - b_; + + a_ = (a_ >> 31) + INT32_MAX; + + if (HEDLEY_STATIC_CAST(int32_t, (a_ ^ b_) & (a_ ^ r_)) < 0) { + r_ = a_; + } + + return HEDLEY_STATIC_CAST(int32_t, r_); + #endif +} + +static HEDLEY_INLINE +int64_t +simde_math_subs_i64(int64_t a, int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqsubd_s64(a, b); + #else + uint64_t a_ = HEDLEY_STATIC_CAST(uint64_t, a); + uint64_t b_ = HEDLEY_STATIC_CAST(uint64_t, b); + uint64_t r_ = a_ - b_; + + a_ = (a_ >> 63) + INT64_MAX; + + if (HEDLEY_STATIC_CAST(int64_t, (a_ ^ b_) & (a_ ^ r_)) < 0) { + r_ = a_; + } + + return HEDLEY_STATIC_CAST(int64_t, r_); + #endif +} + +static HEDLEY_INLINE +uint8_t +simde_math_subs_u8(uint8_t a, uint8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqsubb_u8(a, b); + #else + uint8_t res = a - b; + res &= -(res <= a); + return res; + #endif +} + +static HEDLEY_INLINE +uint16_t +simde_math_subs_u16(uint16_t a, uint16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqsubh_u16(a, b); + #else + uint16_t res = a - b; + res &= -(res <= a); + return res; + #endif +} + +static HEDLEY_INLINE +uint32_t +simde_math_subs_u32(uint32_t a, uint32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqsubs_u32(a, b); + #else + uint32_t res = a - b; + res &= -(res <= a); + return res; + #endif +} + +static HEDLEY_INLINE +uint64_t +simde_math_subs_u64(uint64_t a, uint64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqsubd_u64(a, b); + #else + uint64_t res = a - b; + res &= -(res <= a); + return res; + #endif +} + +HEDLEY_DIAGNOSTIC_POP + #endif /* !defined(SIMDE_MATH_H) */ diff --git a/lib/mmseqs/lib/simde/simde/x86/avx.h b/lib/mmseqs/lib/simde/simde/x86/avx.h index c429bbd..31cb5ba 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx.h @@ -22,6 +22,7 @@ * * Copyright: * 2018-2020 Evan Nemerson + * 2020 Michael R. Crusoe */ #include "sse.h" @@ -36,174 +37,174 @@ SIMDE_BEGIN_DECLS_ typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) - SIMDE_ALIGN(32) int8_t i8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int16_t i16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int32_t i32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int64_t i64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint8_t u8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint16_t u16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint32_t u32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint64_t u64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int8_t i8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int16_t i16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int32_t i32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int64_t i64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint8_t u8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint16_t u16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint32_t u32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint64_t u64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; #if defined(SIMDE_HAVE_INT128_) - SIMDE_ALIGN(32) simde_int128 i128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) simde_uint128 u128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_int128 i128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_uint128 u128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; #endif - SIMDE_ALIGN(32) simde_float32 f32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) simde_float64 f64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int_fast32_t i32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint_fast32_t u32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - #else - SIMDE_ALIGN(32) int8_t i8[32]; - SIMDE_ALIGN(32) int16_t i16[16]; - SIMDE_ALIGN(32) int32_t i32[8]; - SIMDE_ALIGN(32) int64_t i64[4]; - SIMDE_ALIGN(32) uint8_t u8[32]; - SIMDE_ALIGN(32) uint16_t u16[16]; - SIMDE_ALIGN(32) uint32_t u32[8]; - SIMDE_ALIGN(32) uint64_t u64[4]; - SIMDE_ALIGN(32) int_fast32_t i32f[32 / sizeof(int_fast32_t)]; - SIMDE_ALIGN(32) uint_fast32_t u32f[32 / sizeof(uint_fast32_t)]; + SIMDE_ALIGN_TO_32 simde_float32 f32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_float64 f64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int_fast32_t i32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint_fast32_t u32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + #else + SIMDE_ALIGN_TO_32 int8_t i8[32]; + SIMDE_ALIGN_TO_32 int16_t i16[16]; + SIMDE_ALIGN_TO_32 int32_t i32[8]; + SIMDE_ALIGN_TO_32 int64_t i64[4]; + SIMDE_ALIGN_TO_32 uint8_t u8[32]; + SIMDE_ALIGN_TO_32 uint16_t u16[16]; + SIMDE_ALIGN_TO_32 uint32_t u32[8]; + SIMDE_ALIGN_TO_32 uint64_t u64[4]; + SIMDE_ALIGN_TO_32 int_fast32_t i32f[32 / sizeof(int_fast32_t)]; + SIMDE_ALIGN_TO_32 uint_fast32_t u32f[32 / sizeof(uint_fast32_t)]; #if defined(SIMDE_HAVE_INT128_) - SIMDE_ALIGN(32) simde_int128 i128[2]; - SIMDE_ALIGN(32) simde_uint128 u128[2]; + SIMDE_ALIGN_TO_32 simde_int128 i128[2]; + SIMDE_ALIGN_TO_32 simde_uint128 u128[2]; #endif - SIMDE_ALIGN(32) simde_float32 f32[8]; - SIMDE_ALIGN(32) simde_float64 f64[4]; + SIMDE_ALIGN_TO_32 simde_float32 f32[8]; + SIMDE_ALIGN_TO_32 simde_float64 f64[4]; #endif - SIMDE_ALIGN(32) simde__m128_private m128_private[2]; - SIMDE_ALIGN(32) simde__m128 m128[2]; + SIMDE_ALIGN_TO_32 simde__m128_private m128_private[2]; + SIMDE_ALIGN_TO_32 simde__m128 m128[2]; #if defined(SIMDE_X86_AVX_NATIVE) - SIMDE_ALIGN(32) __m256 n; + SIMDE_ALIGN_TO_32 __m256 n; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(int) altivec_i32[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(int) altivec_i32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[2]; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(long long) altivec_i64[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(long long) altivec_i64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[2]; #endif #endif } simde__m256_private; typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) - SIMDE_ALIGN(32) int8_t i8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int16_t i16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int32_t i32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int64_t i64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint8_t u8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint16_t u16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint32_t u32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint64_t u64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int8_t i8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int16_t i16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int32_t i32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int64_t i64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint8_t u8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint16_t u16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint32_t u32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint64_t u64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; #if defined(SIMDE_HAVE_INT128_) - SIMDE_ALIGN(32) simde_int128 i128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) simde_uint128 u128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_int128 i128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_uint128 u128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; #endif - SIMDE_ALIGN(32) simde_float32 f32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) simde_float64 f64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int_fast32_t i32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint_fast32_t u32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - #else - SIMDE_ALIGN(32) int8_t i8[32]; - SIMDE_ALIGN(32) int16_t i16[16]; - SIMDE_ALIGN(32) int32_t i32[8]; - SIMDE_ALIGN(32) int64_t i64[4]; - SIMDE_ALIGN(32) uint8_t u8[32]; - SIMDE_ALIGN(32) uint16_t u16[16]; - SIMDE_ALIGN(32) uint32_t u32[8]; - SIMDE_ALIGN(32) uint64_t u64[4]; + SIMDE_ALIGN_TO_32 simde_float32 f32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_float64 f64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int_fast32_t i32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint_fast32_t u32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + #else + SIMDE_ALIGN_TO_32 int8_t i8[32]; + SIMDE_ALIGN_TO_32 int16_t i16[16]; + SIMDE_ALIGN_TO_32 int32_t i32[8]; + SIMDE_ALIGN_TO_32 int64_t i64[4]; + SIMDE_ALIGN_TO_32 uint8_t u8[32]; + SIMDE_ALIGN_TO_32 uint16_t u16[16]; + SIMDE_ALIGN_TO_32 uint32_t u32[8]; + SIMDE_ALIGN_TO_32 uint64_t u64[4]; #if defined(SIMDE_HAVE_INT128_) - SIMDE_ALIGN(32) simde_int128 i128[2]; - SIMDE_ALIGN(32) simde_uint128 u128[2]; + SIMDE_ALIGN_TO_32 simde_int128 i128[2]; + SIMDE_ALIGN_TO_32 simde_uint128 u128[2]; #endif - SIMDE_ALIGN(32) simde_float32 f32[8]; - SIMDE_ALIGN(32) simde_float64 f64[4]; - SIMDE_ALIGN(32) int_fast32_t i32f[32 / sizeof(int_fast32_t)]; - SIMDE_ALIGN(32) uint_fast32_t u32f[32 / sizeof(uint_fast32_t)]; + SIMDE_ALIGN_TO_32 simde_float32 f32[8]; + SIMDE_ALIGN_TO_32 simde_float64 f64[4]; + SIMDE_ALIGN_TO_32 int_fast32_t i32f[32 / sizeof(int_fast32_t)]; + SIMDE_ALIGN_TO_32 uint_fast32_t u32f[32 / sizeof(uint_fast32_t)]; #endif - SIMDE_ALIGN(32) simde__m128d_private m128d_private[2]; - SIMDE_ALIGN(32) simde__m128d m128d[2]; + SIMDE_ALIGN_TO_32 simde__m128d_private m128d_private[2]; + SIMDE_ALIGN_TO_32 simde__m128d m128d[2]; #if defined(SIMDE_X86_AVX_NATIVE) - SIMDE_ALIGN(32) __m256d n; + SIMDE_ALIGN_TO_32 __m256d n; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[2]; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[2]; #endif #endif } simde__m256d_private; typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) - SIMDE_ALIGN(32) int8_t i8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int16_t i16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int32_t i32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int64_t i64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint8_t u8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint16_t u16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint32_t u32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint64_t u64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int8_t i8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int16_t i16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int32_t i32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int64_t i64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint8_t u8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint16_t u16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint32_t u32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint64_t u64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; #if defined(SIMDE_HAVE_INT128_) - SIMDE_ALIGN(32) simde_int128 i128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) simde_uint128 u128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_int128 i128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_uint128 u128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; #endif - SIMDE_ALIGN(32) simde_float32 f32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) simde_float64 f64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) int_fast32_t i32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(32) uint_fast32_t u32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - #else - SIMDE_ALIGN(32) int8_t i8[32]; - SIMDE_ALIGN(32) int16_t i16[16]; - SIMDE_ALIGN(32) int32_t i32[8]; - SIMDE_ALIGN(32) int64_t i64[4]; - SIMDE_ALIGN(32) uint8_t u8[32]; - SIMDE_ALIGN(32) uint16_t u16[16]; - SIMDE_ALIGN(32) uint32_t u32[8]; - SIMDE_ALIGN(32) uint64_t u64[4]; - SIMDE_ALIGN(32) int_fast32_t i32f[32 / sizeof(int_fast32_t)]; - SIMDE_ALIGN(32) uint_fast32_t u32f[32 / sizeof(uint_fast32_t)]; + SIMDE_ALIGN_TO_32 simde_float32 f32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_float64 f64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int_fast32_t i32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint_fast32_t u32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + #else + SIMDE_ALIGN_TO_32 int8_t i8[32]; + SIMDE_ALIGN_TO_32 int16_t i16[16]; + SIMDE_ALIGN_TO_32 int32_t i32[8]; + SIMDE_ALIGN_TO_32 int64_t i64[4]; + SIMDE_ALIGN_TO_32 uint8_t u8[32]; + SIMDE_ALIGN_TO_32 uint16_t u16[16]; + SIMDE_ALIGN_TO_32 uint32_t u32[8]; + SIMDE_ALIGN_TO_32 uint64_t u64[4]; + SIMDE_ALIGN_TO_32 int_fast32_t i32f[32 / sizeof(int_fast32_t)]; + SIMDE_ALIGN_TO_32 uint_fast32_t u32f[32 / sizeof(uint_fast32_t)]; #if defined(SIMDE_HAVE_INT128_) - SIMDE_ALIGN(32) simde_int128 i128[2]; - SIMDE_ALIGN(32) simde_uint128 u128[2]; + SIMDE_ALIGN_TO_32 simde_int128 i128[2]; + SIMDE_ALIGN_TO_32 simde_uint128 u128[2]; #endif - SIMDE_ALIGN(32) simde_float32 f32[8]; - SIMDE_ALIGN(32) simde_float64 f64[4]; + SIMDE_ALIGN_TO_32 simde_float32 f32[8]; + SIMDE_ALIGN_TO_32 simde_float64 f64[4]; #endif - SIMDE_ALIGN(32) simde__m128i_private m128i_private[2]; - SIMDE_ALIGN(32) simde__m128i m128i[2]; + SIMDE_ALIGN_TO_32 simde__m128i_private m128i_private[2]; + SIMDE_ALIGN_TO_32 simde__m128i m128i[2]; #if defined(SIMDE_X86_AVX_NATIVE) - SIMDE_ALIGN(32) __m256i n; + SIMDE_ALIGN_TO_32 __m256i n; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[2]; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[2]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[2]; #endif #endif } simde__m256i_private; @@ -213,9 +214,9 @@ typedef union { typedef __m256i simde__m256i; typedef __m256d simde__m256d; #elif defined(SIMDE_VECTOR_SUBSCRIPT) - typedef simde_float32 simde__m256 SIMDE_ALIGN(32) SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - typedef int_fast32_t simde__m256i SIMDE_ALIGN(32) SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; - typedef simde_float64 simde__m256d SIMDE_ALIGN(32) SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + typedef simde_float32 simde__m256 SIMDE_ALIGN_TO_32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + typedef int_fast32_t simde__m256i SIMDE_ALIGN_TO_32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + typedef simde_float64 simde__m256d SIMDE_ALIGN_TO_32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; #else typedef simde__m256_private simde__m256; typedef simde__m256i_private simde__m256i; @@ -513,9 +514,8 @@ simde_x_mm256_not_ps(simde__m256 a) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32; #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) { - r_.m128[i] = simde_x_mm_not_ps(a_.m128[i]); - } + r_.m128[0] = simde_x_mm_not_ps(a_.m128[0]); + r_.m128[1] = simde_x_mm_not_ps(a_.m128[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -549,9 +549,8 @@ simde_x_mm256_select_ps(simde__m256 a, simde__m256 b, simde__m256 mask) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) { - r_.m128[i] = simde_x_mm_select_ps(a_.m128[i], b_.m128[i], mask_.m128[i]); - } + r_.m128[0] = simde_x_mm_select_ps(a_.m128[0], b_.m128[0], mask_.m128[0]); + r_.m128[1] = simde_x_mm_select_ps(a_.m128[1], b_.m128[1], mask_.m128[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -573,9 +572,8 @@ simde_x_mm256_not_pd(simde__m256d a) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i64 = ~a_.i64; #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - for (size_t i = 0 ; i < (sizeof(r_.m128d) / sizeof(r_.m128d[0])) ; i++) { - r_.m128d[i] = simde_x_mm_not_pd(a_.m128d[i]); - } + r_.m128d[0] = simde_x_mm_not_pd(a_.m128d[0]); + r_.m128d[1] = simde_x_mm_not_pd(a_.m128d[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -609,9 +607,8 @@ simde_x_mm256_select_pd(simde__m256d a, simde__m256d b, simde__m256d mask) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i64 = a_.i64 ^ ((a_.i64 ^ b_.i64) & mask_.i64); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - for (size_t i = 0 ; i < (sizeof(r_.m128d) / sizeof(r_.m128d[0])) ; i++) { - r_.m128d[i] = simde_x_mm_select_pd(a_.m128d[i], b_.m128d[i], mask_.m128d[i]); - } + r_.m128d[0] = simde_x_mm_select_pd(a_.m128d[0], b_.m128d[0], mask_.m128d[0]); + r_.m128d[1] = simde_x_mm_select_pd(a_.m128d[1], b_.m128d[1], mask_.m128d[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -1258,9 +1255,8 @@ simde_x_mm256_deinterleaveeven_epi16 (simde__m256i a, simde__m256i b) { b_ = simde__m256i_to_private(b); #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - for(size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { - r_.m128i[i] = simde_x_mm_deinterleaveeven_epi16(a_.m128i[i], b_.m128i[i]); - } + r_.m128i[0] = simde_x_mm_deinterleaveeven_epi16(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_x_mm_deinterleaveeven_epi16(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 0, 2, 4, 6, 16, 18, 20, 22, 8, 10, 12, 14, 24, 26, 28, 30); #else @@ -1286,9 +1282,8 @@ simde_x_mm256_deinterleaveodd_epi16 (simde__m256i a, simde__m256i b) { b_ = simde__m256i_to_private(b); #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - for(size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { - r_.m128i[i] = simde_x_mm_deinterleaveodd_epi16(a_.m128i[i], b_.m128i[i]); - } + r_.m128i[0] = simde_x_mm_deinterleaveodd_epi16(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_x_mm_deinterleaveodd_epi16(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 1, 3, 5, 7, 17, 19, 21, 23, 9, 11, 13, 15, 25, 27, 29, 31); #else @@ -1314,9 +1309,8 @@ simde_x_mm256_deinterleaveeven_epi32 (simde__m256i a, simde__m256i b) { b_ = simde__m256i_to_private(b); #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - for(size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { - r_.m128i[i] = simde_x_mm_deinterleaveeven_epi32(a_.m128i[i], b_.m128i[i]); - } + r_.m128i[0] = simde_x_mm_deinterleaveeven_epi32(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_x_mm_deinterleaveeven_epi32(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.i32, b_.i32, 0, 2, 8, 10, 4, 6, 12, 14); #else @@ -1342,9 +1336,8 @@ simde_x_mm256_deinterleaveodd_epi32 (simde__m256i a, simde__m256i b) { b_ = simde__m256i_to_private(b); #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - for(size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { - r_.m128i[i] = simde_x_mm_deinterleaveodd_epi32(a_.m128i[i], b_.m128i[i]); - } + r_.m128i[0] = simde_x_mm_deinterleaveodd_epi32(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_x_mm_deinterleaveodd_epi32(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.i32, b_.i32, 1, 3, 9, 11, 5, 7, 13, 15); #else @@ -1370,9 +1363,8 @@ simde_x_mm256_deinterleaveeven_ps (simde__m256 a, simde__m256 b) { b_ = simde__m256_to_private(b); #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) { - r_.m128[i] = simde_x_mm_deinterleaveeven_ps(a_.m128[i], b_.m128[i]); - } + r_.m128[0] = simde_x_mm_deinterleaveeven_ps(a_.m128[0], b_.m128[0]); + r_.m128[1] = simde_x_mm_deinterleaveeven_ps(a_.m128[1], b_.m128[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.f32, b_.f32, 0, 2, 8, 10, 4, 6, 12, 14); #else @@ -1398,9 +1390,8 @@ simde_x_mm256_deinterleaveodd_ps (simde__m256 a, simde__m256 b) { b_ = simde__m256_to_private(b); #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) { - r_.m128[i] = simde_x_mm_deinterleaveodd_ps(a_.m128[i], b_.m128[i]); - } + r_.m128[0] = simde_x_mm_deinterleaveodd_ps(a_.m128[0], b_.m128[0]); + r_.m128[1] = simde_x_mm_deinterleaveodd_ps(a_.m128[1], b_.m128[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.f32, b_.f32, 1, 3, 9, 11, 5, 7, 13, 15); #else @@ -1426,9 +1417,8 @@ simde_x_mm256_deinterleaveeven_pd (simde__m256d a, simde__m256d b) { b_ = simde__m256d_to_private(b); #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - for (size_t i = 0 ; i < (sizeof(r_.m128d) / sizeof(r_.m128d[0])) ; i++) { - r_.m128d[i] = simde_x_mm_deinterleaveeven_pd(a_.m128d[i], b_.m128d[i]); - } + r_.m128d[0] = simde_x_mm_deinterleaveeven_pd(a_.m128d[0], b_.m128d[0]); + r_.m128d[1] = simde_x_mm_deinterleaveeven_pd(a_.m128d[1], b_.m128d[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 32, a_.f64, b_.f64, 0, 4, 2, 6); #else @@ -1454,9 +1444,8 @@ simde_x_mm256_deinterleaveodd_pd (simde__m256d a, simde__m256d b) { b_ = simde__m256d_to_private(b); #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - for (size_t i = 0 ; i < (sizeof(r_.m128d) / sizeof(r_.m128d[0])) ; i++) { - r_.m128d[i] = simde_x_mm_deinterleaveodd_pd(a_.m128d[i], b_.m128d[i]); - } + r_.m128d[0] = simde_x_mm_deinterleaveodd_pd(a_.m128d[0], b_.m128d[0]); + r_.m128d[1] = simde_x_mm_deinterleaveodd_pd(a_.m128d[1], b_.m128d[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 32, a_.f64, b_.f64, 1, 5, 3, 7); #else @@ -1778,7 +1767,7 @@ simde_mm256_andnot_pd (simde__m256d a, simde__m256d b) { SIMDE_FUNCTION_ATTRIBUTES simde__m256 simde_mm256_blend_ps (simde__m256 a, simde__m256 b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256_private r_, a_ = simde__m256_to_private(a), @@ -1807,7 +1796,7 @@ simde_mm256_blend_ps (simde__m256 a, simde__m256 b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256d simde_mm256_blend_pd (simde__m256d a, simde__m256d b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 15) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { simde__m256d_private r_, a_ = simde__m256d_to_private(a), @@ -1938,14 +1927,7 @@ simde_mm256_broadcast_sd (simde_float64 const * a) { #if defined(SIMDE_X86_AVX_NATIVE) return _mm256_broadcast_sd(a); #else - simde__m256d_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.f64[i] = *a; - } - - return simde__m256d_from_private(r_); + return simde_mm256_set1_pd(*a); #endif } #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) @@ -1959,14 +1941,7 @@ simde_mm_broadcast_ss (simde_float32 const * a) { #if defined(SIMDE_X86_AVX_NATIVE) return _mm_broadcast_ss(a); #else - simde__m128_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = *a; - } - - return simde__m128_from_private(r_); + return simde_mm_set1_ps(*a); #endif } #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) @@ -1980,14 +1955,7 @@ simde_mm256_broadcast_ss (simde_float32 const * a) { #if defined(SIMDE_X86_AVX_NATIVE) return _mm256_broadcast_ss(a); #else - simde__m256_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = *a; - } - - return simde__m256_from_private(r_); + return simde_mm256_set1_ps(*a); #endif } #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) @@ -2248,8 +2216,7 @@ SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_cmp_pd (simde__m128d a, simde__m128d b, const int imm8) - SIMDE_REQUIRE_CONSTANT(imm8) - HEDLEY_REQUIRE_MSG(((imm8 >= 0) && (imm8 <= 31)), "imm8 must be one of the SIMDE_CMP_* macros (values: [0, 31])") { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { switch (imm8) { case SIMDE_CMP_EQ_OQ: case SIMDE_CMP_EQ_UQ: @@ -2318,8 +2285,7 @@ simde_mm_cmp_pd (simde__m128d a, simde__m128d b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmp_ps (simde__m128 a, simde__m128 b, const int imm8) - SIMDE_REQUIRE_CONSTANT(imm8) - HEDLEY_REQUIRE_MSG(((imm8 >= 0) && (imm8 <= 31)), "imm8 must be one of the SIMDE_CMP_* macros (values: [0, 31])") { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { switch (imm8) { case SIMDE_CMP_EQ_OQ: case SIMDE_CMP_EQ_UQ: @@ -2391,8 +2357,7 @@ simde_mm_cmp_ps (simde__m128 a, simde__m128 b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_cmp_sd (simde__m128d a, simde__m128d b, const int imm8) - SIMDE_REQUIRE_CONSTANT(imm8) - HEDLEY_REQUIRE_MSG(((imm8 >= 0) && (imm8 <= 31)), "imm8 must be one of the SIMDE_CMP_* macros (values: [0, 31])") { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { simde__m128d_private r_, a_ = simde__m128d_to_private(a), @@ -2527,8 +2492,7 @@ simde_mm_cmp_sd (simde__m128d a, simde__m128d b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmp_ss (simde__m128 a, simde__m128 b, const int imm8) - SIMDE_REQUIRE_CONSTANT(imm8) - HEDLEY_REQUIRE_MSG(((imm8 >= 0) && (imm8 <= 31)), "imm8 must be one of the SIMDE_CMP_* macros (values: [0, 31])") { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { simde__m128_private r_, a_ = simde__m128_to_private(a), @@ -2665,8 +2629,7 @@ simde_mm_cmp_ss (simde__m128 a, simde__m128 b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256d simde_mm256_cmp_pd (simde__m256d a, simde__m256d b, const int imm8) - SIMDE_REQUIRE_CONSTANT(imm8) - HEDLEY_REQUIRE_MSG(((imm8 >= 0) && (imm8 <= 31)), "imm8 must be one of the SIMDE_CMP_* macros (values: [0, 31])") { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { simde__m256d_private r_, a_ = simde__m256d_to_private(a), @@ -2931,8 +2894,7 @@ simde_mm256_cmp_pd (simde__m256d a, simde__m256d b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256 simde_mm256_cmp_ps (simde__m256 a, simde__m256 b, const int imm8) - SIMDE_REQUIRE_CONSTANT(imm8) - HEDLEY_REQUIRE_MSG(((imm8 >= 0) && (imm8 <= 31)), "imm8 must be one of the SIMDE_CMP_* macros (values: [0, 31])") { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { simde__m256_private r_, a_ = simde__m256_to_private(a), @@ -3556,7 +3518,7 @@ simde_mm256_div_pd (simde__m256d a, simde__m256d b) { SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm256_extractf128_pd (simde__m256d a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 1) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { simde__m256d_private a_ = simde__m256d_to_private(a); return a_.m128d[imm8]; } @@ -3571,7 +3533,7 @@ simde_mm256_extractf128_pd (simde__m256d a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm256_extractf128_ps (simde__m256 a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 1) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { simde__m256_private a_ = simde__m256_to_private(a); return a_.m128[imm8]; } @@ -3586,7 +3548,7 @@ simde_mm256_extractf128_ps (simde__m256 a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm256_extractf128_si256 (simde__m256i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 1) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { simde__m256i_private a_ = simde__m256i_to_private(a); return a_.m128i[imm8]; } @@ -3694,7 +3656,7 @@ simde_mm256_insert_epi64 (simde__m256i a, int64_t i, const int index) SIMDE_FUNCTION_ATTRIBUTES simde__m256d simde_mm256_insertf128_pd(simde__m256d a, simde__m128d b, int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 3) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { simde__m256d_private a_ = simde__m256d_to_private(a); simde__m128d_private b_ = simde__m128d_to_private(b); @@ -3709,7 +3671,7 @@ simde__m256d simde_mm256_insertf128_pd(simde__m256d a, simde__m128d b, int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256 simde_mm256_insertf128_ps(simde__m256 a, simde__m128 b, int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 7) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { simde__m256_private a_ = simde__m256_to_private(a); simde__m128_private b_ = simde__m128_to_private(b); @@ -3724,7 +3686,7 @@ simde__m256 simde_mm256_insertf128_ps(simde__m256 a, simde__m128 b, int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_insertf128_si256(simde__m256i a, simde__m128i b, int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 7) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { simde__m256i_private a_ = simde__m256i_to_private(a); simde__m128i_private b_ = simde__m128i_to_private(b); @@ -3830,25 +3792,21 @@ simde_x_mm256_loadu_epi64(void const* mem_addr) { #if defined(SIMDE_X86_AVX_NATIVE) return _mm256_loadu_si256(SIMDE_ALIGN_CAST(simde__m256i const*, mem_addr)); #else - simde__m256i_private r_; - - simde_memcpy(&r_, mem_addr, sizeof(r_)); - - return simde__m256i_from_private(r_); + simde__m256i r; + simde_memcpy(&r, mem_addr, sizeof(r)); + return r; #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m256i -simde_mm256_lddqu_si256 (simde__m256i const * a) { +simde_mm256_lddqu_si256 (simde__m256i const * mem_addr) { #if defined(SIMDE_X86_AVX_NATIVE) - return _mm256_loadu_si256(a); + return _mm256_loadu_si256(mem_addr); #else - simde__m256i_private r_; - - simde_memcpy(&r_, a, sizeof(r_)); - - return simde__m256i_from_private(r_); + simde__m256i r; + simde_memcpy(&r, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256i), sizeof(r)); + return r; #endif } #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) @@ -3858,14 +3816,12 @@ simde_mm256_lddqu_si256 (simde__m256i const * a) { SIMDE_FUNCTION_ATTRIBUTES simde__m256d -simde_mm256_load_pd (const double a[HEDLEY_ARRAY_PARAM(4)]) { - simde_assert_aligned(32, a); - +simde_mm256_load_pd (const double mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_AVX_NATIVE) - return _mm256_load_pd(a); + return _mm256_load_pd(mem_addr); #else simde__m256d r; - r = *SIMDE_ALIGN_CAST(simde__m256d const*, SIMDE_ASSUME_ALIGNED(32, a)); + simde_memcpy(&r, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256d), sizeof(r)); return r; #endif } @@ -3876,14 +3832,12 @@ simde_mm256_load_pd (const double a[HEDLEY_ARRAY_PARAM(4)]) { SIMDE_FUNCTION_ATTRIBUTES simde__m256 -simde_mm256_load_ps (const float a[HEDLEY_ARRAY_PARAM(8)]) { - simde_assert_aligned(32, a); - +simde_mm256_load_ps (const float mem_addr[HEDLEY_ARRAY_PARAM(8)]) { #if defined(SIMDE_X86_AVX_NATIVE) - return _mm256_load_ps(a); + return _mm256_load_ps(mem_addr); #else simde__m256 r; - r = *SIMDE_ALIGN_CAST( simde__m256 const*, SIMDE_ASSUME_ALIGNED(32, a)); + simde_memcpy(&r, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256), sizeof(r)); return r; #endif } @@ -3895,13 +3849,11 @@ simde_mm256_load_ps (const float a[HEDLEY_ARRAY_PARAM(8)]) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_load_si256 (simde__m256i const * mem_addr) { - simde_assert_aligned(32, mem_addr); - #if defined(SIMDE_X86_AVX_NATIVE) - return _mm256_load_si256(HEDLEY_REINTERPRET_CAST(__m256i const*, mem_addr)); + return _mm256_load_si256(mem_addr); #else simde__m256i r; - r = *mem_addr; + simde_memcpy(&r, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256i), sizeof(r)); return r; #endif } @@ -3949,19 +3901,7 @@ simde_mm256_loadu_si256 (void const * mem_addr) { return _mm256_loadu_si256(SIMDE_ALIGN_CAST(const __m256i*, mem_addr)); #else simde__m256i r; - - #if HEDLEY_GNUC_HAS_ATTRIBUTE(may_alias,3,3,0) - HEDLEY_DIAGNOSTIC_PUSH - SIMDE_DIAGNOSTIC_DISABLE_PACKED_ - struct simde_mm256_loadu_si256_s { - __typeof__(r) v; - } __attribute__((__packed__, __may_alias__)); - r = HEDLEY_REINTERPRET_CAST(const struct simde_mm256_loadu_si256_s *, mem_addr)->v; - HEDLEY_DIAGNOSTIC_POP - #else - simde_memcpy(&r, mem_addr, sizeof(r)); - #endif - + simde_memcpy(&r, mem_addr, sizeof(r)); return r; #endif } @@ -4377,7 +4317,7 @@ simde_mm256_movehdup_ps (simde__m256 a) { SIMDE_FUNCTION_ATTRIBUTES simde__m256 simde_mm256_moveldup_ps (simde__m256 a) { - #if defined(SIMDE_X86_AVX_NATIVE) && 0 + #if defined(SIMDE_X86_AVX_NATIVE) return _mm256_moveldup_ps(a); #else simde__m256_private @@ -4571,7 +4511,8 @@ simde_mm256_or_pd (simde__m256d a, simde__m256d b) { SIMDE_FUNCTION_ATTRIBUTES simde__m256 -simde_mm256_permute_ps (simde__m256 a, const int imm8) { +simde_mm256_permute_ps (simde__m256 a, const int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256_private r_, a_ = simde__m256_to_private(a); @@ -4593,7 +4534,8 @@ simde_mm256_permute_ps (simde__m256 a, const int imm8) { SIMDE_FUNCTION_ATTRIBUTES simde__m256d -simde_mm256_permute_pd (simde__m256d a, const int imm8) { +simde_mm256_permute_pd (simde__m256d a, const int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { simde__m256d_private r_, a_ = simde__m256d_to_private(a); @@ -4615,7 +4557,8 @@ simde_mm256_permute_pd (simde__m256d a, const int imm8) { SIMDE_FUNCTION_ATTRIBUTES simde__m128 -simde_mm_permute_ps (simde__m128 a, const int imm8) { +simde_mm_permute_ps (simde__m128 a, const int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128_private r_, a_ = simde__m128_to_private(a); @@ -4638,7 +4581,8 @@ simde_mm_permute_ps (simde__m128 a, const int imm8) { SIMDE_FUNCTION_ATTRIBUTES simde__m128d -simde_mm_permute_pd (simde__m128d a, const int imm8) { +simde_mm_permute_pd (simde__m128d a, const int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m128d_private r_, a_ = simde__m128d_to_private(a); @@ -4757,7 +4701,7 @@ simde_mm256_permutevar_pd (simde__m256d a, simde__m256i b) { SIMDE_FUNCTION_ATTRIBUTES simde__m256 simde_mm256_permute2f128_ps (simde__m256 a, simde__m256 b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256_private r_, a_ = simde__m256_to_private(a), @@ -4779,7 +4723,7 @@ simde_mm256_permute2f128_ps (simde__m256 a, simde__m256 b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256d simde_mm256_permute2f128_pd (simde__m256d a, simde__m256d b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256d_private r_, a_ = simde__m256d_to_private(a), @@ -4801,7 +4745,7 @@ simde_mm256_permute2f128_pd (simde__m256d a, simde__m256d b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_permute2f128_si256 (simde__m256i a, simde__m256i b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a), @@ -4831,9 +4775,8 @@ simde_mm256_rcp_ps (simde__m256 a) { a_ = simde__m256_to_private(a); #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128_private[0])) ; i++) { - r_.m128[i] = simde_mm_rcp_ps(a_.m128[i]); - } + r_.m128[0] = simde_mm_rcp_ps(a_.m128[0]); + r_.m128[1] = simde_mm_rcp_ps(a_.m128[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -5041,7 +4984,7 @@ simde_mm256_setr_m128i (simde__m128i lo, simde__m128i hi) { SIMDE_FUNCTION_ATTRIBUTES simde__m256 simde_mm256_shuffle_ps (simde__m256 a, simde__m256 b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256_private r_, a_ = simde__m256_to_private(a), @@ -5085,7 +5028,7 @@ simde_mm256_shuffle_ps (simde__m256 a, simde__m256 b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256d simde_mm256_shuffle_pd (simde__m256d a, simde__m256d b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 15) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { simde__m256d_private r_, a_ = simde__m256d_to_private(a), @@ -5181,13 +5124,11 @@ simde_mm256_sqrt_pd (simde__m256d a) { SIMDE_FUNCTION_ATTRIBUTES void simde_mm256_store_ps (simde_float32 mem_addr[8], simde__m256 a) { - simde_assert_aligned(32, mem_addr); - -#if defined(SIMDE_X86_AVX_NATIVE) - _mm256_store_ps(mem_addr, a); -#else - *SIMDE_ALIGN_CAST(simde__m256*, SIMDE_ASSUME_ALIGNED(32, mem_addr)) = a; -#endif + #if defined(SIMDE_X86_AVX_NATIVE) + _mm256_store_ps(mem_addr, a); + #else + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256), &a, sizeof(a)); + #endif } #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) #undef _mm256_store_ps @@ -5197,13 +5138,11 @@ simde_mm256_store_ps (simde_float32 mem_addr[8], simde__m256 a) { SIMDE_FUNCTION_ATTRIBUTES void simde_mm256_store_pd (simde_float64 mem_addr[4], simde__m256d a) { - simde_assert_aligned(32, mem_addr); - -#if defined(SIMDE_X86_AVX_NATIVE) - _mm256_store_pd(mem_addr, a); -#else - *SIMDE_ALIGN_CAST(simde__m256d*, SIMDE_ASSUME_ALIGNED(32, mem_addr)) = a; -#endif + #if defined(SIMDE_X86_AVX_NATIVE) + _mm256_store_pd(mem_addr, a); + #else + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256d), &a, sizeof(a)); + #endif } #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) #undef _mm256_store_pd @@ -5213,13 +5152,11 @@ simde_mm256_store_pd (simde_float64 mem_addr[4], simde__m256d a) { SIMDE_FUNCTION_ATTRIBUTES void simde_mm256_store_si256 (simde__m256i* mem_addr, simde__m256i a) { - simde_assert_aligned(32, mem_addr); - -#if defined(SIMDE_X86_AVX_NATIVE) - _mm256_store_si256(mem_addr, a); -#else - *mem_addr = a; -#endif + #if defined(SIMDE_X86_AVX_NATIVE) + _mm256_store_si256(mem_addr, a); + #else + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256i), &a, sizeof(a)); + #endif } #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) #undef _mm256_store_si256 @@ -5316,13 +5253,11 @@ simde_mm256_storeu2_m128i (simde__m128i* hi_addr, simde__m128i* lo_addr, simde__ SIMDE_FUNCTION_ATTRIBUTES void simde_mm256_stream_ps (simde_float32 mem_addr[8], simde__m256 a) { - simde_assert_aligned(32, mem_addr); - -#if defined(SIMDE_X86_AVX_NATIVE) - _mm256_stream_ps(mem_addr, a); -#else - *SIMDE_ALIGN_CAST(simde__m256*, SIMDE_ASSUME_ALIGNED(32, mem_addr)) = a; -#endif + #if defined(SIMDE_X86_AVX_NATIVE) + _mm256_stream_ps(mem_addr, a); + #else + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256), &a, sizeof(a)); + #endif } #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) #undef _mm256_stream_ps @@ -5332,13 +5267,11 @@ simde_mm256_stream_ps (simde_float32 mem_addr[8], simde__m256 a) { SIMDE_FUNCTION_ATTRIBUTES void simde_mm256_stream_pd (simde_float64 mem_addr[4], simde__m256d a) { - simde_assert_aligned(32, mem_addr); - -#if defined(SIMDE_X86_AVX_NATIVE) - _mm256_stream_pd(mem_addr, a); -#else - *SIMDE_ALIGN_CAST(simde__m256d*, SIMDE_ASSUME_ALIGNED(32, mem_addr)) = a; -#endif + #if defined(SIMDE_X86_AVX_NATIVE) + _mm256_stream_pd(mem_addr, a); + #else + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256d), &a, sizeof(a)); + #endif } #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) #undef _mm256_stream_pd @@ -5348,13 +5281,11 @@ simde_mm256_stream_pd (simde_float64 mem_addr[4], simde__m256d a) { SIMDE_FUNCTION_ATTRIBUTES void simde_mm256_stream_si256 (simde__m256i* mem_addr, simde__m256i a) { - simde_assert_aligned(32, mem_addr); - -#if defined(SIMDE_X86_AVX_NATIVE) - _mm256_stream_si256(mem_addr, a); -#else - *mem_addr = a; -#endif + #if defined(SIMDE_X86_AVX_NATIVE) + _mm256_stream_si256(mem_addr, a); + #else + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256i), &a, sizeof(a)); + #endif } #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) #undef _mm256_stream_si256 diff --git a/lib/mmseqs/lib/simde/simde/x86/avx2.h b/lib/mmseqs/lib/simde/simde/x86/avx2.h index 91225bc..d6a8dac 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx2.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx2.h @@ -22,7 +22,7 @@ * * Copyright: * 2018-2020 Evan Nemerson - * 2019 Michael R. Crusoe + * 2019-2020 Michael R. Crusoe * 2020 Himanshi Mathur * 2020 Hidayat Khan */ @@ -46,10 +46,15 @@ simde_mm256_abs_epi8 (simde__m256i a) { r_, a_ = simde__m256i_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - r_.i8[i] = (a_.i8[i] < INT32_C(0)) ? -a_.i8[i] : a_.i8[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_abs_epi8(a_.m128i[0]); + r_.m128i[1] = simde_mm_abs_epi8(a_.m128i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = (a_.i8[i] < INT32_C(0)) ? -a_.i8[i] : a_.i8[i]; + } + #endif return simde__m256i_from_private(r_); #endif @@ -69,10 +74,15 @@ simde_mm256_abs_epi16 (simde__m256i a) { r_, a_ = simde__m256i_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i16[i] = (a_.i16[i] < INT32_C(0)) ? -a_.i16[i] : a_.i16[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_abs_epi16(a_.m128i[0]); + r_.m128i[1] = simde_mm_abs_epi16(a_.m128i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = (a_.i16[i] < INT32_C(0)) ? -a_.i16[i] : a_.i16[i]; + } + #endif return simde__m256i_from_private(r_); #endif @@ -92,10 +102,15 @@ simde_mm256_abs_epi32(simde__m256i a) { r_, a_ = simde__m256i_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) { - r_.i32[i] = (a_.i32[i] < INT32_C(0)) ? -a_.i32[i] : a_.i32[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_abs_epi32(a_.m128i[0]); + r_.m128i[1] = simde_mm_abs_epi32(a_.m128i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) { + r_.i32[i] = (a_.i32[i] < INT32_C(0)) ? -a_.i32[i] : a_.i32[i]; + } + #endif return simde__m256i_from_private(r_); #endif @@ -240,7 +255,7 @@ simde_mm256_add_epi64 (simde__m256i a, simde__m256i b) { #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_add_epi64(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_add_epi64(a_.m128i[1], b_.m128i[1]); - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS) r_.i64 = a_.i64 + b_.i64; #else SIMDE_VECTORIZE @@ -259,7 +274,8 @@ simde_mm256_add_epi64 (simde__m256i a, simde__m256i b) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i -simde_mm256_alignr_epi8 (simde__m256i a, simde__m256i b, int count) { +simde_mm256_alignr_epi8 (simde__m256i a, simde__m256i b, int count) + SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a), @@ -339,9 +355,9 @@ simde_mm256_andnot_si256 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) || defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.m128i_private[0] = simde__m128i_to_private(simde_mm_andnot_si128(simde__m128i_from_private(a_.m128i_private[0]), simde__m128i_from_private(b_.m128i_private[0]))); - r_.m128i_private[1] = simde__m128i_to_private(simde_mm_andnot_si128(simde__m128i_from_private(a_.m128i_private[1]), simde__m128i_from_private(b_.m128i_private[1]))); + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_andnot_si128(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_andnot_si128(a_.m128i[1], b_.m128i[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) { @@ -368,18 +384,13 @@ simde_mm256_adds_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) && !defined(HEDLEY_INTEL_VERSION) - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { - r_.m128i[i] = simde_mm_adds_epi8(a_.m128i[i], b_.m128i[i]); - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_adds_epi8(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_adds_epi8(a_.m128i[1], b_.m128i[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - const int32_t tmp = - HEDLEY_STATIC_CAST(int16_t, a_.i8[i]) + - HEDLEY_STATIC_CAST(int16_t, b_.i8[i]); - r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, ((tmp < INT8_MAX) ? ((tmp > INT8_MIN) ? tmp : INT8_MIN) : INT8_MAX)); + r_.i8[i] = simde_math_adds_i8(a_.i8[i], b_.i8[i]); } #endif @@ -402,18 +413,13 @@ simde_mm256_adds_epi16(simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) && !defined(HEDLEY_INTEL_VERSION) - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { - r_.m128i[i] = simde_mm_adds_epi16(a_.m128i[i], b_.m128i[i]); - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_adds_epi16(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_adds_epi16(a_.m128i[1], b_.m128i[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - const int32_t tmp = - HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) + - HEDLEY_STATIC_CAST(int32_t, b_.i16[i]); - r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, ((tmp < INT16_MAX) ? ((tmp > INT16_MIN) ? tmp : INT16_MIN) : INT16_MAX)); + r_.i16[i] = simde_math_adds_i16(a_.i16[i], b_.i16[i]); } #endif @@ -456,7 +462,7 @@ simde_mm256_adds_epu8 (simde__m256i a, simde__m256i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = ((UINT8_MAX - a_.u8[i]) > b_.u8[i]) ? (a_.u8[i] + b_.u8[i]) : UINT8_MAX; + r_.u8[i] = simde_math_adds_u8(a_.u8[i], b_.u8[i]); } #endif @@ -485,7 +491,7 @@ simde_mm256_adds_epu16(simde__m256i a, simde__m256i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = ((UINT16_MAX - a_.u16[i]) > b_.u16[i]) ? (a_.u16[i] + b_.u16[i]) : UINT16_MAX; + r_.u16[i] = simde_math_adds_u16(a_.u16[i], b_.u16[i]); } #endif @@ -548,7 +554,7 @@ simde_mm256_avg_epu16 (simde__m256i a, simde__m256i b) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_blend_epi32(simde__m128i a, simde__m128i b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { simde__m128i_private r_, a_ = simde__m128i_to_private(a), @@ -575,7 +581,7 @@ simde_mm_blend_epi32(simde__m128i a, simde__m128i b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_blend_epi16(simde__m256i a, simde__m256i b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a), @@ -605,7 +611,7 @@ simde_mm256_blend_epi16(simde__m256i a, simde__m256i b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_blend_epi32(simde__m256i a, simde__m256i b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a), @@ -645,8 +651,8 @@ simde_mm256_blendv_epi8(simde__m256i a, simde__m256i b, simde__m256i mask) { mask_ = simde__m256i_to_private(mask); #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - r_.m128i_private[0] = simde__m128i_to_private(simde_mm_blendv_epi8(simde__m128i_from_private(a_.m128i_private[0]), simde__m128i_from_private(b_.m128i_private[0]), simde__m128i_from_private(mask_.m128i_private[0]))); - r_.m128i_private[1] = simde__m128i_to_private(simde_mm_blendv_epi8(simde__m128i_from_private(a_.m128i_private[1]), simde__m128i_from_private(b_.m128i_private[1]), simde__m128i_from_private(mask_.m128i_private[1]))); + r_.m128i[0] = simde_mm_blendv_epi8(a_.m128i[0], b_.m128i[0], mask_.m128i[0]); + r_.m128i[1] = simde_mm_blendv_epi8(a_.m128i[1], b_.m128i[1], mask_.m128i[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { @@ -1068,10 +1074,15 @@ simde_mm256_cmpeq_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i16[i] = (a_.i16[i] == b_.i16[i]) ? ~INT16_C(0) : INT16_C(0); - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_cmpeq_epi16(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_cmpeq_epi16(a_.m128i[1], b_.m128i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = (a_.i16[i] == b_.i16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif return simde__m256i_from_private(r_); #endif @@ -1092,7 +1103,7 @@ simde_mm256_cmpeq_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) || defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_cmpeq_epi32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_cmpeq_epi32(a_.m128i[1], b_.m128i[1]); #else @@ -1121,7 +1132,7 @@ simde_mm256_cmpeq_epi64 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) || defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_cmpeq_epi64(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_cmpeq_epi64(a_.m128i[1], b_.m128i[1]); #else @@ -1600,7 +1611,7 @@ simde_mm256_extract_epi16 (simde__m256i a, const int index) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm256_extracti128_si256 (simde__m256i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 1) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { simde__m256i_private a_ = simde__m256i_to_private(a); return a_.m128i[imm8]; } @@ -2675,7 +2686,7 @@ simde_mm256_mask_i64gather_pd(simde__m256d src, const simde_float64* base_addr, SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_inserti128_si256(simde__m256i a, simde__m128i b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 7) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { simde__m256i_private a_ = simde__m256i_to_private(a); simde__m128i_private b_ = simde__m128i_to_private(b); @@ -2687,6 +2698,7 @@ simde_mm256_inserti128_si256(simde__m256i a, simde__m128i b, const int imm8) #define simde_mm256_inserti128_si256(a, b, imm8) _mm256_inserti128_si256(a, b, imm8) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) + #undef _mm256_inserti128_si256 #define _mm256_inserti128_si256(a, b, imm8) simde_mm256_inserti128_si256(a, b, imm8) #endif @@ -2701,8 +2713,15 @@ simde_mm256_madd_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - r_.m128i[0] = simde_mm_madd_epi16(a_.m128i[0], b_.m128i[0]); - r_.m128i[1] = simde_mm_madd_epi16(a_.m128i[1], b_.m128i[1]); + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_madd_epi16(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_madd_epi16(a_.m128i[1], b_.m128i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i += 2) { + r_.i32[i / 2] = (a_.i16[i] * b_.i16[i]) + (a_.i16[i + 1] * b_.i16[i + 1]); + } + #endif return simde__m256i_from_private(r_); #endif @@ -2971,7 +2990,7 @@ simde_mm256_max_epu8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) || defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_max_epu8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_max_epu8(a_.m128i[1], b_.m128i[1]); #else @@ -3000,7 +3019,7 @@ simde_mm256_max_epu16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) || defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_max_epu16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_max_epu16(a_.m128i[1], b_.m128i[1]); #else @@ -3029,7 +3048,7 @@ simde_mm256_max_epu32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) || defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_max_epu32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_max_epu32(a_.m128i[1], b_.m128i[1]); #else @@ -3296,7 +3315,7 @@ simde_mm256_movemask_epi8 (simde__m256i a) { r = 0; SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) { - r |= (a_.u8[31 - i] >> 7) << (31 - i); + r |= HEDLEY_STATIC_CAST(uint32_t, (a_.u8[31 - i] >> 7)) << (31 - i); } #endif @@ -3311,7 +3330,7 @@ simde_mm256_movemask_epi8 (simde__m256i a) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_mpsadbw_epu8 (simde__m256i a, simde__m256i b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 7) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a), @@ -3366,12 +3385,17 @@ simde_mm256_mul_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { - r_.i64[i] = - HEDLEY_STATIC_CAST(int64_t, a_.i32[i * 2]) * - HEDLEY_STATIC_CAST(int64_t, b_.i32[i * 2]); - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_mul_epi32(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_mul_epi32(a_.m128i[1], b_.m128i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = + HEDLEY_STATIC_CAST(int64_t, a_.i32[i * 2]) * + HEDLEY_STATIC_CAST(int64_t, b_.i32[i * 2]); + } + #endif return simde__m256i_from_private(r_); #endif @@ -3391,10 +3415,15 @@ simde_mm256_mul_epu32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_mul_epu32(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_mul_epu32(a_.m128i[1], b_.m128i[1]); + #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[i * 2]) * HEDLEY_STATIC_CAST(uint64_t, b_.u32[i * 2]); } + #endif return simde__m256i_from_private(r_); #endif @@ -3618,9 +3647,9 @@ simde_mm256_packs_epi32 (simde__m256i a, simde__m256i b) { simde__m256i_to_private(b) }; - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) || defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.m128i_private[0] = simde__m128i_to_private(simde_mm_packs_epi32(simde__m128i_from_private(v_[0].m128i_private[0]), simde__m128i_from_private(v_[1].m128i_private[0]))); - r_.m128i_private[1] = simde__m128i_to_private(simde_mm_packs_epi32(simde__m128i_from_private(v_[0].m128i_private[1]), simde__m128i_from_private(v_[1].m128i_private[1]))); + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_packs_epi32(v_[0].m128i[0], v_[1].m128i[0]); + r_.m128i[1] = simde_mm_packs_epi32(v_[0].m128i[1], v_[1].m128i[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -3708,7 +3737,7 @@ simde_mm256_packus_epi32 (simde__m256i a, simde__m256i b) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_permute2x128_si256 (simde__m256i a, simde__m256i b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a), @@ -3730,7 +3759,7 @@ simde_mm256_permute2x128_si256 (simde__m256i a, simde__m256i b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_permute4x64_epi64 (simde__m256i a, const int imm8) -SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -3753,7 +3782,7 @@ SIMDE_REQUIRE_RANGE(imm8, 0, 255) { SIMDE_FUNCTION_ATTRIBUTES simde__m256d simde_mm256_permute4x64_pd (simde__m256d a, const int imm8) -SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256d_private r_, a_ = simde__m256d_to_private(a); @@ -3888,7 +3917,8 @@ simde_mm256_shuffle_epi8 (simde__m256i a, simde__m256i b) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i -simde_mm256_shuffle_epi32 (simde__m256i a, const int imm8) { +simde_mm256_shuffle_epi32 (simde__m256i a, const int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -4234,7 +4264,7 @@ simde_mm256_slli_epi16 (simde__m256i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_slli_epi32 (simde__m256i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 31) { + SIMDE_REQUIRE_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -4271,7 +4301,7 @@ simde_mm256_slli_epi32 (simde__m256i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_slli_epi64 (simde__m256i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 63) { + SIMDE_REQUIRE_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -4302,7 +4332,8 @@ simde_mm256_slli_epi64 (simde__m256i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256i -simde_mm256_slli_si256 (simde__m256i a, const int imm8) { +simde_mm256_slli_si256 (simde__m256i a, const int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -4343,7 +4374,10 @@ simde_mm_sllv_epi32 (simde__m128i a, simde__m128i b) { b_ = simde__m128i_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vshlq_u32(a_.neon_u32, vreinterpretq_s32_u32(b_.neon_u32)); + r_.neon_u32 = vandq_u32(r_.neon_u32, vcltq_u32(b_.neon_u32, vdupq_n_u32(32))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.u32 = HEDLEY_STATIC_CAST(__typeof__(r_.u32), (b_.u32 < 32) & (a_.u32 << b_.u32)); #else SIMDE_VECTORIZE @@ -4370,7 +4404,10 @@ simde_mm256_sllv_epi32 (simde__m256i a, simde__m256i b) { b_ = simde__m256i_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_sllv_epi32(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_sllv_epi32(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.u32 = HEDLEY_STATIC_CAST(__typeof__(r_.u32), (b_.u32 < 32) & (a_.u32 << b_.u32)); #else SIMDE_VECTORIZE @@ -4397,7 +4434,10 @@ simde_mm_sllv_epi64 (simde__m128i a, simde__m128i b) { b_ = simde__m128i_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u64 = vshlq_u64(a_.neon_u64, vreinterpretq_s64_u64(b_.neon_u64)); + r_.neon_u64 = vandq_u64(r_.neon_u64, vcltq_u64(b_.neon_u64, vdupq_n_u64(64))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.u64 = HEDLEY_STATIC_CAST(__typeof__(r_.u64), (b_.u64 < 64) & (a_.u64 << b_.u64)); #else SIMDE_VECTORIZE @@ -4424,7 +4464,10 @@ simde_mm256_sllv_epi64 (simde__m256i a, simde__m256i b) { b_ = simde__m256i_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_sllv_epi64(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_sllv_epi64(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.u64 = HEDLEY_STATIC_CAST(__typeof__(r_.u64), (b_.u64 < 64) & (a_.u64 << b_.u64)); #else SIMDE_VECTORIZE @@ -4522,7 +4565,8 @@ simde_mm256_sra_epi32 (simde__m256i a, simde__m128i count) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i -simde_mm256_srai_epi16 (simde__m256i a, const int imm8) { +simde_mm256_srai_epi16 (simde__m256i a, const int imm8) + SIMDE_REQUIRE_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -4556,7 +4600,8 @@ simde_mm256_srai_epi16 (simde__m256i a, const int imm8) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i -simde_mm256_srai_epi32 (simde__m256i a, const int imm8) { +simde_mm256_srai_epi32 (simde__m256i a, const int imm8) + SIMDE_REQUIRE_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -4599,12 +4644,16 @@ simde_mm_srav_epi32 (simde__m128i a, simde__m128i count) { a_ = simde__m128i_to_private(a), count_ = simde__m128i_to_private(count); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - uint32_t shift = HEDLEY_STATIC_CAST(uint32_t, count_.i32[i]); - if (shift > 31) shift = 31; - r_.i32[i] = a_.i32[i] >> shift; - } + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4_t cnt = vreinterpretq_s32_u32(vminq_u32(count_.neon_u32, vdupq_n_u32(31))); + r_.neon_i32 = vshlq_s32(a_.neon_i32, vnegq_s32(cnt)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + uint32_t shift = HEDLEY_STATIC_CAST(uint32_t, count_.i32[i]); + r_.i32[i] = a_.i32[i] >> HEDLEY_STATIC_CAST(int, shift > 31 ? 31 : shift); + } + #endif return simde__m128i_from_private(r_); #endif @@ -4625,12 +4674,17 @@ simde_mm256_srav_epi32 (simde__m256i a, simde__m256i count) { a_ = simde__m256i_to_private(a), count_ = simde__m256i_to_private(count); + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_srav_epi32(a_.m128i[0], count_.m128i[0]); + r_.m128i[1] = simde_mm_srav_epi32(a_.m128i[1], count_.m128i[1]); + #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { uint32_t shift = HEDLEY_STATIC_CAST(uint32_t, count_.i32[i]); if (shift > 31) shift = 31; r_.i32[i] = a_.i32[i] >> shift; } + #endif return simde__m256i_from_private(r_); #endif @@ -4754,7 +4808,7 @@ simde_mm256_srl_epi64 (simde__m256i a, simde__m128i count) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_srli_epi16 (simde__m256i a, const int imm8) - SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -4799,7 +4853,8 @@ simde_mm256_srli_epi16 (simde__m256i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m256i -simde_mm256_srli_epi32 (simde__m256i a, const int imm8) { +simde_mm256_srli_epi32 (simde__m256i a, const int imm8) + SIMDE_REQUIRE_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -4835,7 +4890,8 @@ simde_mm256_srli_epi32 (simde__m256i a, const int imm8) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i -simde_mm256_srli_epi64 (simde__m256i a, const int imm8) { +simde_mm256_srli_epi64 (simde__m256i a, const int imm8) + SIMDE_REQUIRE_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -4866,7 +4922,8 @@ simde_mm256_srli_epi64 (simde__m256i a, const int imm8) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i -simde_mm256_srli_si256 (simde__m256i a, const int imm8) { +simde_mm256_srli_si256 (simde__m256i a, const int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m256i_private r_, a_ = simde__m256i_to_private(a); @@ -5010,14 +5067,11 @@ simde_mm256_srlv_epi64 (simde__m256i a, simde__m256i b) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_stream_load_si256 (const simde__m256i* mem_addr) { - simde_assert_aligned(32, mem_addr); - #if defined(SIMDE_X86_AVX2_NATIVE) return _mm256_stream_load_si256(HEDLEY_CONST_CAST(simde__m256i*, mem_addr)); #else - /* Use memcpy to avoid aliasing; data must still be 32-byte aligned */ simde__m256i r; - simde_memcpy(&r, mem_addr, sizeof(r)); + simde_memcpy(&r, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m256i), sizeof(r)); return r; #endif } @@ -5187,6 +5241,9 @@ simde_x_mm256_sub_epu32 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u32 = a_.u32 - b_.u32; + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_x_mm_sub_epu32(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_x_mm_sub_epu32(a_.m128i[1], b_.m128i[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { @@ -5208,18 +5265,13 @@ simde_mm256_subs_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) && !defined(HEDLEY_INTEL_VERSION) - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { - r_.m128i[i] = simde_mm_subs_epi8(a_.m128i[i], b_.m128i[i]); - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_subs_epi8(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_subs_epi8(a_.m128i[1], b_.m128i[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - const int32_t tmp = - HEDLEY_STATIC_CAST(int16_t, a_.i8[i]) - - HEDLEY_STATIC_CAST(int16_t, b_.i8[i]); - r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, ((tmp < INT8_MAX) ? ((tmp > INT8_MIN) ? tmp : INT8_MIN) : INT8_MAX)); + r_.i8[i] = simde_math_subs_i8(a_.i8[i], b_.i8[i]); } #endif @@ -5242,18 +5294,13 @@ simde_mm256_subs_epi16(simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) && !defined(HEDLEY_INTEL_VERSION) - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { - r_.m128i[i] = simde_mm_subs_epi16(a_.m128i[i], b_.m128i[i]); - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_subs_epi16(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_subs_epi16(a_.m128i[1], b_.m128i[1]); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - const int32_t tmp = - HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) - - HEDLEY_STATIC_CAST(int32_t, b_.i16[i]); - r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, ((tmp < INT16_MAX) ? ((tmp > INT16_MIN) ? tmp : INT16_MIN) : INT16_MAX)); + r_.i16[i] = simde_math_subs_i16(a_.i16[i], b_.i16[i]); } #endif @@ -5296,7 +5343,7 @@ simde_mm256_subs_epu8 (simde__m256i a, simde__m256i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? (a_.u8[i] - b_.u8[i]) : UINT8_C(0); + r_.u8[i] = simde_math_subs_u8(a_.u8[i], b_.u8[i]); } #endif @@ -5325,7 +5372,7 @@ simde_mm256_subs_epu16(simde__m256i a, simde__m256i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = (a_.u16[i] > b_.u16[i]) ? (a_.u16[i] - b_.u16[i]) : UINT16_C(0); + r_.u16[i] = simde_math_subs_u16(a_.u16[i], b_.u16[i]); } #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512.h b/lib/mmseqs/lib/simde/simde/x86/avx512.h index e4ffbfe..27cdba5 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512.h @@ -29,6 +29,7 @@ #include "avx512/types.h" +#include "avx512/2intersect.h" #include "avx512/abs.h" #include "avx512/add.h" #include "avx512/adds.h" @@ -54,9 +55,12 @@ #include "avx512/fnmadd.h" #include "avx512/fnmsub.h" #include "avx512/insert.h" +#include "avx512/kshift.h" #include "avx512/load.h" #include "avx512/loadu.h" #include "avx512/lzcnt.h" +#include "avx512/madd.h" +#include "avx512/maddubs.h" #include "avx512/max.h" #include "avx512/min.h" #include "avx512/mov.h" diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/2intersect.h b/lib/mmseqs/lib/simde/simde/x86/avx512/2intersect.h new file mode 100644 index 0000000..66884f1 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/2intersect.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2020 Ashleigh Newman-Jones + */ + +#if !defined(SIMDE_X86_AVX512_2INTERSECT_H) +#define SIMDE_X86_AVX512_2INTERSECT_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm_2intersect_epi32(simde__m128i a, simde__m128i b, simde__mmask8 *k1, simde__mmask8 *k2) { + #if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm_2intersect_epi32(a, b, k1, k2); + #else + simde__m128i_private + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + simde__mmask8 + k1_ = 0, + k2_ = 0; + + for (size_t i = 0 ; i < sizeof(a_.i32) / sizeof(a_.i32[0]) ; i++) { + #if defined(SIMDE_ENABLE_OPENMP) + #pragma omp simd reduction(|:k1_) reduction(|:k2_) + #else + SIMDE_VECTORIZE + #endif + for (size_t j = 0 ; j < sizeof(b_.i32) / sizeof(b_.i32[0]) ; j++) { + const int32_t m = a_.i32[i] == b_.i32[j]; + k1_ |= m << i; + k2_ |= m << j; + } + } + + *k1 = k1_; + *k2 = k2_; + #endif +} +#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef __mm_2intersect_epi32 + #define __mm_2intersect_epi32(a,b, k1, k2) simde_mm_2intersect_epi32(a, b, k1, k2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm_2intersect_epi64(simde__m128i a, simde__m128i b, simde__mmask8 *k1, simde__mmask8 *k2) { + #if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm_2intersect_epi64(a, b, k1, k2); + #else + simde__m128i_private + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + simde__mmask8 + k1_ = 0, + k2_ = 0; + + for (size_t i = 0 ; i < sizeof(a_.i64) / sizeof(a_.i64[0]) ; i++) { + #if defined(SIMDE_ENABLE_OPENMP) + #pragma omp simd reduction(|:k1_) reduction(|:k2_) + #else + SIMDE_VECTORIZE + #endif + for (size_t j = 0 ; j < sizeof(b_.i64) / sizeof(b_.i64[0]) ; j++) { + const int32_t m = a_.i64[i] == b_.i64[j]; + k1_ |= m << i; + k2_ |= m << j; + } + } + + *k1 = k1_; + *k2 = k2_; + #endif +} +#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef __mm_2intersect_epi64 + #define __mm_2intersect_epi64(a,b, k1, k2) simde_mm_2intersect_epi64(a, b, k1, k2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm256_2intersect_epi32(simde__m256i a, simde__m256i b, simde__mmask8 *k1, simde__mmask8 *k2) { + #if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_2intersect_epi32(a, b, k1, k2); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + simde__mmask8 + k1_ = 0, + k2_ = 0; + + for (size_t i = 0 ; i < sizeof(a_.i32) / sizeof(a_.i32[0]) ; i++) { + #if defined(SIMDE_ENABLE_OPENMP) + #pragma omp simd reduction(|:k1_) reduction(|:k2_) + #else + SIMDE_VECTORIZE + #endif + for (size_t j = 0 ; j < sizeof(b_.i32) / sizeof(b_.i32[0]) ; j++) { + const int32_t m = a_.i32[i] == b_.i32[j]; + k1_ |= m << i; + k2_ |= m << j; + } + } + + *k1 = k1_; + *k2 = k2_; + #endif +} +#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_2intersect_epi32 + #define _mm256_2intersect_epi32(a,b, k1, k2) simde_mm256_2intersect_epi32(a, b, k1, k2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm256_2intersect_epi64(simde__m256i a, simde__m256i b, simde__mmask8 *k1, simde__mmask8 *k2) { + #if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_2intersect_epi64(a, b, k1, k2); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + simde__mmask8 + k1_ = 0, + k2_ = 0; + + for (size_t i = 0 ; i < sizeof(a_.i64) / sizeof(a_.i64[0]) ; i++) { + #if defined(SIMDE_ENABLE_OPENMP) + #pragma omp simd reduction(|:k1_) reduction(|:k2_) + #else + SIMDE_VECTORIZE + #endif + for (size_t j = 0 ; j < sizeof(b_.i64) / sizeof(b_.i64[0]) ; j++) { + const int32_t m = a_.i64[i] == b_.i64[j]; + k1_ |= m << i; + k2_ |= m << j; + } + } + + *k1 = k1_; + *k2 = k2_; + #endif +} +#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_2intersect_epi64 + #define _mm256_2intersect_epi64(a,b, k1, k2) simde_mm256_2intersect_epi64(a, b, k1, k2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm512_2intersect_epi32(simde__m512i a, simde__m512i b, simde__mmask16 *k1, simde__mmask16 *k2) { + #if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) + _mm512_2intersect_epi32(a, b, k1, k2); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + simde__mmask16 + k1_ = 0, + k2_ = 0; + + for (size_t i = 0 ; i < sizeof(a_.i32) / sizeof(a_.i32[0]) ; i++) { + #if defined(SIMDE_ENABLE_OPENMP) + #pragma omp simd reduction(|:k1_) reduction(|:k2_) + #else + SIMDE_VECTORIZE + #endif + for (size_t j = 0 ; j < sizeof(b_.i32) / sizeof(b_.i32[0]) ; j++) { + const int32_t m = a_.i32[i] == b_.i32[j]; + k1_ |= m << i; + k2_ |= m << j; + } + } + + *k1 = k1_; + *k2 = k2_; + #endif +} +#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES) + #undef _mm512_2intersect_epi32 + #define _mm512_2intersect_epi32(a, b, k1, k2) simde_mm512_2intersect_epi32(a, b, k1, k2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm512_2intersect_epi64(simde__m512i a, simde__m512i b, simde__mmask8 *k1, simde__mmask8 *k2) { + #if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) + _mm512_2intersect_epi64(a, b, k1, k2); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + simde__mmask8 + k1_ = 0, + k2_ = 0; + + for (size_t i = 0 ; i < sizeof(a_.i64) / sizeof(a_.i64[0]) ; i++) { + #if defined(SIMDE_ENABLE_OPENMP) + #pragma omp simd reduction(|:k1_) reduction(|:k2_) + #else + SIMDE_VECTORIZE + #endif + for (size_t j = 0 ; j < sizeof(b_.i64) / sizeof(b_.i64[0]) ; j++) { + const int32_t m = a_.i64[i] == b_.i64[j]; + k1_ |= m << i; + k2_ |= m << j; + } + } + + *k1 = k1_; + *k2 = k2_; + #endif +} +#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES) + #undef _mm512_2intersect_epi64 + #define _mm512_2intersect_epi64(a, b, k1, k2) simde_mm512_2intersect_epi64(a, b, k1, k2) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_2INTERSECT_H) */ diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/abs.h b/lib/mmseqs/lib/simde/simde/x86/avx512/abs.h index 4221b64..493c51c 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/abs.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/abs.h @@ -30,6 +30,7 @@ #include "types.h" #include "mov.h" +#include "../avx2.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -129,10 +130,16 @@ simde_mm_abs_epi64(simde__m128i a) { r_, a_ = simde__m128i_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) { - r_.i64[i] = (a_.i64[i] < INT64_C(0)) ? -a_.i64[i] : a_.i64[i]; - } + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i64 = vabsq_s64(a_.neon_i64); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(HEDLEY_IBM_VERSION) + r_.altivec_i64 = vec_abs(a_.altivec_i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) { + r_.i64[i] = (a_.i64[i] < INT64_C(0)) ? -a_.i64[i] : a_.i64[i]; + } + #endif return simde__m128i_from_private(r_); #endif @@ -180,10 +187,16 @@ simde_mm256_abs_epi64(simde__m256i a) { r_, a_ = simde__m256i_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) { - r_.i64[i] = (a_.i64[i] < INT64_C(0)) ? -a_.i64[i] : a_.i64[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_abs_epi64(a_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) { + r_.i64[i] = (a_.i64[i] < INT64_C(0)) ? -a_.i64[i] : a_.i64[i]; + } + #endif return simde__m256i_from_private(r_); #endif @@ -231,10 +244,16 @@ simde_mm512_abs_epi8 (simde__m512i a) { r_, a_ = simde__m512i_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - r_.i8[i] = (a_.i8[i] < INT32_C(0)) ? -a_.i8[i] : a_.i8[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_abs_epi8(a_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = (a_.i8[i] < INT32_C(0)) ? -a_.i8[i] : a_.i8[i]; + } + #endif return simde__m512i_from_private(r_); #endif @@ -282,10 +301,16 @@ simde_mm512_abs_epi16 (simde__m512i a) { r_, a_ = simde__m512i_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i16[i] = (a_.i16[i] < INT32_C(0)) ? -a_.i16[i] : a_.i16[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_abs_epi16(a_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = (a_.i16[i] < INT32_C(0)) ? -a_.i16[i] : a_.i16[i]; + } + #endif return simde__m512i_from_private(r_); #endif @@ -333,10 +358,16 @@ simde_mm512_abs_epi32(simde__m512i a) { r_, a_ = simde__m512i_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) { - r_.i32[i] = (a_.i32[i] < INT64_C(0)) ? -a_.i32[i] : a_.i32[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_abs_epi32(a_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) { + r_.i32[i] = (a_.i32[i] < INT64_C(0)) ? -a_.i32[i] : a_.i32[i]; + } + #endif return simde__m512i_from_private(r_); #endif @@ -356,7 +387,8 @@ simde_mm512_mask_abs_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_abs_epi32(src, k, a) simde_mm512_mask_abs_epi32(src, k, a) + #undef _mm512_mask_abs_epi32 + #define _mm512_mask_abs_epi32(src, k, a) simde_mm512_mask_abs_epi32(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -369,7 +401,8 @@ simde_mm512_maskz_abs_epi32(simde__mmask16 k, simde__m512i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_abs_epi32(k, a) simde_mm512_maskz_abs_epi32(k, a) + #undef _mm512_maskz_abs_epi32 + #define _mm512_maskz_abs_epi32(k, a) simde_mm512_maskz_abs_epi32(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -382,10 +415,16 @@ simde_mm512_abs_epi64(simde__m512i a) { r_, a_ = simde__m512i_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) { - r_.i64[i] = (a_.i64[i] < INT64_C(0)) ? -a_.i64[i] : a_.i64[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_abs_epi64(a_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) { + r_.i64[i] = (a_.i64[i] < INT64_C(0)) ? -a_.i64[i] : a_.i64[i]; + } + #endif return simde__m512i_from_private(r_); #endif @@ -405,7 +444,8 @@ simde_mm512_mask_abs_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_abs_epi64(src, k, a) simde_mm512_mask_abs_epi64(src, k, a) + #undef _mm512_mask_abs_epi64 + #define _mm512_mask_abs_epi64(src, k, a) simde_mm512_mask_abs_epi64(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -418,7 +458,8 @@ simde_mm512_maskz_abs_epi64(simde__mmask8 k, simde__m512i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_abs_epi64(k, a) simde_mm512_maskz_abs_epi64(k, a) + #undef _mm512_maskz_abs_epi64 + #define _mm512_maskz_abs_epi64(k, a) simde_mm512_maskz_abs_epi64(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -431,10 +472,20 @@ simde_mm512_abs_ps(simde__m512 v2) { r_, v2_ = simde__m512_to_private(v2); - SIMDE_VECTORIZE - for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { - r_.f32[i] = (v2_.f32[i] < INT64_C(0)) ? -v2_.f32[i] : v2_.f32[i]; - } + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].neon_f32 = vabsq_f32(v2_.m128_private[i].neon_f32); + } + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].altivec_f32 = vec_abs(v2_.m128_private[i].altivec_f32); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { + r_.f32[i] = (v2_.f32[i] < INT64_C(0)) ? -v2_.f32[i] : v2_.f32[i]; + } + #endif return simde__m512_from_private(r_); #endif @@ -454,7 +505,8 @@ simde_mm512_mask_abs_ps(simde__m512 src, simde__mmask16 k, simde__m512 v2) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_abs_ps(src, k, v2) simde_mm512_mask_abs_ps(src, k, v2) + #undef _mm512_mask_abs_ps + #define _mm512_mask_abs_ps(src, k, v2) simde_mm512_mask_abs_ps(src, k, v2) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -467,10 +519,20 @@ simde_mm512_abs_pd(simde__m512d v2) { r_, v2_ = simde__m512d_to_private(v2); - SIMDE_VECTORIZE - for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) { - r_.f64[i] = (v2_.f64[i] < INT64_C(0)) ? -v2_.f64[i] : v2_.f64[i]; - } + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].neon_f64 = vabsq_f64(v2_.m128d_private[i].neon_f64); + } + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].altivec_f64 = vec_abs(v2_.m128d_private[i].altivec_f64); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) { + r_.f64[i] = (v2_.f64[i] < INT64_C(0)) ? -v2_.f64[i] : v2_.f64[i]; + } + #endif return simde__m512d_from_private(r_); #endif @@ -490,7 +552,8 @@ simde_mm512_mask_abs_pd(simde__m512d src, simde__mmask8 k, simde__m512d v2) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_abs_pd(src, k, v2) simde_mm512_mask_abs_pd(src, k, v2) + #undef _mm512_mask_abs_pd + #define _mm512_mask_abs_pd(src, k, v2) simde_mm512_mask_abs_pd(src, k, v2) #endif SIMDE_END_DECLS_ diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/add.h b/lib/mmseqs/lib/simde/simde/x86/avx512/add.h index 8df23b5..2c4c98e 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/add.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/add.h @@ -478,7 +478,7 @@ simde_mm512_add_epi64 (simde__m512i a, simde__m512i b) { for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { r_.m256i[i] = simde_mm256_add_epi64(a_.m256i[i], b_.m256i[i]); } - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS) r_.i64 = a_.i64 + b_.i64; #else SIMDE_VECTORIZE diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/adds.h b/lib/mmseqs/lib/simde/simde/x86/avx512/adds.h index d7bbada..7a7c82c 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/adds.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/adds.h @@ -167,10 +167,7 @@ simde_mm512_adds_epi8 (simde__m512i a, simde__m512i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - const int16_t tmp = - HEDLEY_STATIC_CAST(int16_t, a_.i8[i]) + - HEDLEY_STATIC_CAST(int16_t, b_.i8[i]); - r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, ((tmp < INT8_MAX) ? ((tmp > INT8_MIN) ? tmp : INT8_MIN) : INT8_MAX)); + r_.i8[i] = simde_math_adds_i8(a_.i8[i], b_.i8[i]); } #endif @@ -229,10 +226,7 @@ simde_mm512_adds_epi16 (simde__m512i a, simde__m512i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - const int32_t tmp = - HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) + - HEDLEY_STATIC_CAST(int32_t, b_.i16[i]); - r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, ((tmp < INT16_MAX) ? ((tmp > INT16_MIN) ? tmp : INT16_MIN) : INT16_MAX)); + r_.i16[i] = simde_math_adds_i16(a_.i16[i], b_.i16[i]); } #endif @@ -291,7 +285,7 @@ simde_mm512_adds_epu8 (simde__m512i a, simde__m512i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = ((UINT8_MAX - a_.u8[i]) > b_.u8[i]) ? (a_.u8[i] + b_.u8[i]) : UINT8_MAX; + r_.u8[i] = simde_math_adds_u8(a_.u8[i], b_.u8[i]); } #endif @@ -350,7 +344,7 @@ simde_mm512_adds_epu16 (simde__m512i a, simde__m512i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = ((UINT16_MAX - a_.u16[i]) > b_.u16[i]) ? (a_.u16[i] + b_.u16[i]) : UINT16_MAX; + r_.u16[i] = simde_math_adds_u16(a_.u16[i], b_.u16[i]); } #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/and.h b/lib/mmseqs/lib/simde/simde/x86/avx512/and.h index 7348a8a..fd7118f 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/and.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/and.h @@ -110,7 +110,8 @@ simde_mm512_mask_and_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde_ #endif } #if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_and_ps(src, k, a, b) simde_mm512_mask_and_ps(src, k, a, b) + #undef _mm512_mask_and_ps + #define _mm512_mask_and_ps(src, k, a, b) simde_mm512_mask_and_ps(src, k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -123,7 +124,8 @@ simde_mm512_maskz_and_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) { #endif } #if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_and_ps(k, a, b) simde_mm512_maskz_and_ps(k, a, b) + #undef _mm512_maskz_and_ps + #define _mm512_maskz_and_ps(k, a, b) simde_mm512_maskz_and_ps(k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -136,7 +138,8 @@ simde_mm512_mask_and_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde #endif } #if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_and_pd(src, k, a, b) simde_mm512_mask_and_pd(src, k, a, b) + #undef _mm512_mask_and_pd + #define _mm512_mask_and_pd(src, k, a, b) simde_mm512_mask_and_pd(src, k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -149,7 +152,8 @@ simde_mm512_maskz_and_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) { #endif } #if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_and_pd(k, a, b) simde_mm512_maskz_and_pd(k, a, b) + #undef _mm512_maskz_and_pd + #define _mm512_maskz_and_pd(k, a, b) simde_mm512_maskz_and_pd(k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/andnot.h b/lib/mmseqs/lib/simde/simde/x86/avx512/andnot.h index 4092f81..ddc3dcb 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/andnot.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/andnot.h @@ -52,7 +52,8 @@ SIMDE_BEGIN_DECLS_ #define simde_mm512_mask_andnot_ps(src, k, a, b) simde_mm512_castsi512_ps(simde_mm512_mask_andnot_epi32(simde_mm512_castps_si512(src), k, simde_mm512_castps_si512(a), simde_mm512_castps_si512(b))) #endif #if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_andnot_ps(src, k, a, b) simde_mm512_mask_andnot_ps(src, k, a, b) + #undef _mm512_mask_andnot_ps + #define _mm512_mask_andnot_ps(src, k, a, b) simde_mm512_mask_andnot_ps(src, k, a, b) #endif #if defined(SIMDE_X86_AVX512DQ_NATIVE) @@ -61,7 +62,8 @@ SIMDE_BEGIN_DECLS_ #define simde_mm512_maskz_andnot_ps(k, a, b) simde_mm512_castsi512_ps(simde_mm512_maskz_andnot_epi32(k, simde_mm512_castps_si512(a), simde_mm512_castps_si512(b))) #endif #if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_andnot_ps(k, a, b) simde_mm512_maskz_andnot_ps(k, a, b) + #undef _mm512_maskz_andnot_ps + #define _mm512_maskz_andnot_ps(k, a, b) simde_mm512_maskz_andnot_ps(k, a, b) #endif #if defined(SIMDE_X86_AVX512DQ_NATIVE) @@ -80,7 +82,8 @@ SIMDE_BEGIN_DECLS_ #define simde_mm512_mask_andnot_pd(src, k, a, b) simde_mm512_castsi512_pd(simde_mm512_mask_andnot_epi64(simde_mm512_castpd_si512(src), k, simde_mm512_castpd_si512(a), simde_mm512_castpd_si512(b))) #endif #if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_andnot_pd(src, k, a, b) simde_mm512_mask_andnot_pd(src, k, a, b) + #undef _mm512_mask_andnot_pd + #define _mm512_mask_andnot_pd(src, k, a, b) simde_mm512_mask_andnot_pd(src, k, a, b) #endif #if defined(SIMDE_X86_AVX512DQ_NATIVE) @@ -89,7 +92,8 @@ SIMDE_BEGIN_DECLS_ #define simde_mm512_maskz_andnot_pd(k, a, b) simde_mm512_castsi512_pd(simde_mm512_maskz_andnot_epi64(k, simde_mm512_castpd_si512(a), simde_mm512_castpd_si512(b))) #endif #if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_andnot_pd(k, a, b) simde_mm512_maskz_andnot_pd(k, a, b) + #undef _mm512_maskz_andnot_pd + #define _mm512_maskz_andnot_pd(k, a, b) simde_mm512_maskz_andnot_pd(k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -137,7 +141,8 @@ simde_mm512_mask_andnot_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_andnot_epi32(src, k, a, b) simde_mm512_mask_andnot_epi32(src, k, a, b) + #undef _mm512_mask_andnot_epi32 + #define _mm512_mask_andnot_epi32(src, k, a, b) simde_mm512_mask_andnot_epi32(src, k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -150,7 +155,8 @@ simde_mm512_maskz_andnot_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_andnot_epi32(k, a, b) simde_mm512_maskz_andnot_epi32(k, a, b) + #undef _mm512_maskz_andnot_epi32 + #define _mm512_maskz_andnot_epi32(k, a, b) simde_mm512_maskz_andnot_epi32(k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -163,7 +169,8 @@ simde_mm512_mask_andnot_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_andnot_epi64(src, k, a, b) simde_mm512_mask_andnot_epi64(src, k, a, b) + #undef _mm512_mask_andnot_epi64 + #define _mm512_mask_andnot_epi64(src, k, a, b) simde_mm512_mask_andnot_epi64(src, k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -176,7 +183,8 @@ simde_mm512_maskz_andnot_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_andnot_epi64(k, a, b) simde_mm512_maskz_andnot_epi64(k, a, b) + #undef _mm512_maskz_andnot_epi64 + #define _mm512_maskz_andnot_epi64(k, a, b) simde_mm512_maskz_andnot_epi64(k, a, b) #endif SIMDE_END_DECLS_ diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/avg.h b/lib/mmseqs/lib/simde/simde/x86/avx512/avg.h index adab801..2ec3441 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/avg.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/avg.h @@ -30,11 +30,124 @@ #include "types.h" #include "mov.h" +#include "../avx2.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_avg_epu8(simde__m128i src, simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_avg_epu8(src, k, a, b); + #else + return simde_mm_mask_mov_epi8(src, k, simde_mm_avg_epu8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_avg_epu8 + #define _mm_mask_avg_epu8(src, k, a, b) simde_mm_mask_avg_epu8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_avg_epu8(simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_maskz_avg_epu8(k, a, b); + #else + return simde_mm_maskz_mov_epi8(k, simde_mm_avg_epu8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_avg_epu8 + #define _mm_maskz_avg_epu8(k, a, b) simde_mm_maskz_avg_epu8(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_avg_epu16(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_avg_epu16(src, k, a, b); + #else + return simde_mm_mask_mov_epi16(src, k, simde_mm_avg_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_avg_epu16 + #define _mm_mask_avg_epu16(src, k, a, b) simde_mm_mask_avg_epu16(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_avg_epu16(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_maskz_avg_epu16(k, a, b); + #else + return simde_mm_maskz_mov_epi16(k, simde_mm_avg_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_avg_epu16 + #define _mm_maskz_avg_epu16(k, a, b) simde_mm_maskz_avg_epu16(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_avg_epu8(simde__m256i src, simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_avg_epu8(src, k, a, b); + #else + return simde_mm256_mask_mov_epi8(src, k, simde_mm256_avg_epu8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_avg_epu8 + #define _mm256_mask_avg_epu8(src, k, a, b) simde_mm256_mask_avg_epu8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_avg_epu8(simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_maskz_avg_epu8(k, a, b); + #else + return simde_mm256_maskz_mov_epi8(k, simde_mm256_avg_epu8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_avg_epu8 + #define _mm256_maskz_avg_epu8(k, a, b) simde_mm256_maskz_avg_epu8(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_avg_epu16(simde__m256i src, simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_avg_epu16(src, k, a, b); + #else + return simde_mm256_mask_mov_epi16(src, k, simde_mm256_avg_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_avg_epu16 + #define _mm256_mask_avg_epu16(src, k, a, b) simde_mm256_mask_avg_epu16(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_avg_epu16(simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_maskz_avg_epu16(k, a, b); + #else + return simde_mm256_maskz_mov_epi16(k, simde_mm256_avg_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_avg_epu16 + #define _mm256_maskz_avg_epu16(k, a, b) simde_mm256_maskz_avg_epu16(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_avg_epu8 (simde__m512i a, simde__m512i b) { @@ -111,6 +224,34 @@ simde_mm512_avg_epu16 (simde__m512i a, simde__m512i b) { #define _mm512_avg_epu16(a, b) simde_mm512_avg_epu16(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_avg_epu16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_avg_epu16(src, k, a, b); + #else + return simde_mm512_mask_mov_epi16(src, k, simde_mm512_avg_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_avg_epu16 + #define _mm512_mask_avg_epu16(src, k, a, b) simde_mm512_mask_avg_epu16(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_avg_epu16 (simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maskz_avg_epu16(k, a, b); + #else + return simde_mm512_maskz_mov_epi16(k, simde_mm512_avg_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_avg_epu16 + #define _mm512_maskz_avg_epu16(k, a, b) simde_mm512_maskz_avg_epu16(k, a, b) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/blend.h b/lib/mmseqs/lib/simde/simde/x86/avx512/blend.h index 24ba3d4..e094a07 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/blend.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/blend.h @@ -35,6 +35,202 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_blend_epi8(simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_blend_epi8(k, a, b); + #else + return simde_mm_mask_mov_epi8(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_blend_epi8 + #define _mm_mask_blend_epi8(k, a, b) simde_mm_mask_blend_epi8(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_blend_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_blend_epi16(k, a, b); + #else + return simde_mm_mask_mov_epi16(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_blend_epi16 + #define _mm_mask_blend_epi16(k, a, b) simde_mm_mask_blend_epi16(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_blend_epi32(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_blend_epi32(k, a, b); + #else + return simde_mm_mask_mov_epi32(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_blend_epi32 + #define _mm_mask_blend_epi32(k, a, b) simde_mm_mask_blend_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_blend_epi64(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_blend_epi64(k, a, b); + #else + return simde_mm_mask_mov_epi64(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_blend_epi64 + #define _mm_mask_blend_epi64(k, a, b) simde_mm_mask_blend_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_mask_blend_ps(simde__mmask8 k, simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_blend_ps(k, a, b); + #else + return simde_mm_mask_mov_ps(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_blend_ps + #define _mm_mask_blend_ps(k, a, b) simde_mm_mask_blend_ps(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_mask_blend_pd(simde__mmask8 k, simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_blend_pd(k, a, b); + #else + return simde_mm_mask_mov_pd(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_blend_pd + #define _mm_mask_blend_pd(k, a, b) simde_mm_mask_blend_pd(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_blend_epi8(simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_blend_epi8(k, a, b); + #else + return simde_mm256_mask_mov_epi8(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_blend_epi8 + #define _mm256_mask_blend_epi8(k, a, b) simde_mm256_mask_blend_epi8(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_blend_epi16(simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_blend_epi16(k, a, b); + #else + return simde_mm256_mask_mov_epi16(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_blend_epi16 + #define _mm256_mask_blend_epi16(k, a, b) simde_mm256_mask_blend_epi16(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_blend_epi32(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_blend_epi32(k, a, b); + #else + return simde_mm256_mask_mov_epi32(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_blend_epi32 + #define _mm256_mask_blend_epi32(k, a, b) simde_mm256_mask_blend_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_blend_epi64(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_blend_epi64(k, a, b); + #else + return simde_mm256_mask_mov_epi64(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_blend_epi64 + #define _mm256_mask_blend_epi64(k, a, b) simde_mm256_mask_blend_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask_blend_ps(simde__mmask8 k, simde__m256 a, simde__m256 b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_blend_ps(k, a, b); + #else + return simde_mm256_mask_mov_ps(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_blend_ps + #define _mm256_mask_blend_ps(k, a, b) simde_mm256_mask_blend_ps(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_mask_blend_pd(simde__mmask8 k, simde__m256d a, simde__m256d b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_blend_pd(k, a, b); + #else + return simde_mm256_mask_mov_pd(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_blend_pd + #define _mm256_mask_blend_pd(k, a, b) simde_mm256_mask_blend_pd(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_blend_epi8(simde__mmask64 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_blend_epi8(k, a, b); + #else + return simde_mm512_mask_mov_epi8(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_blend_epi8 + #define _mm512_mask_blend_epi8(k, a, b) simde_mm512_mask_blend_epi8(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_blend_epi16(simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_blend_epi16(k, a, b); + #else + return simde_mm512_mask_mov_epi16(a, k, b); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_blend_epi16 + #define _mm512_mask_blend_epi16(k, a, b) simde_mm512_mask_blend_epi16(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_mask_blend_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) { @@ -45,7 +241,8 @@ simde_mm512_mask_blend_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_blend_epi32(k, a, b) simde_mm512_mask_blend_epi32(k, a, b) + #undef _mm512_mask_blend_epi32 + #define _mm512_mask_blend_epi32(k, a, b) simde_mm512_mask_blend_epi32(k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -58,7 +255,8 @@ simde_mm512_mask_blend_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_blend_epi64(k, a, b) simde_mm512_mask_blend_epi64(k, a, b) + #undef _mm512_mask_blend_epi64 + #define _mm512_mask_blend_epi64(k, a, b) simde_mm512_mask_blend_epi64(k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -71,7 +269,8 @@ simde_mm512_mask_blend_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_blend_ps(k, a, b) simde_mm512_mask_blend_ps(k, a, b) + #undef _mm512_mask_blend_ps + #define _mm512_mask_blend_ps(k, a, b) simde_mm512_mask_blend_ps(k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -84,7 +283,8 @@ simde_mm512_mask_blend_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_blend_pd(k, a, b) simde_mm512_mask_blend_pd(k, a, b) + #undef _mm512_mask_blend_pd + #define _mm512_mask_blend_pd(k, a, b) simde_mm512_mask_blend_pd(k, a, b) #endif SIMDE_END_DECLS_ diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/broadcast.h b/lib/mmseqs/lib/simde/simde/x86/avx512/broadcast.h index 51e3851..33b41ab 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/broadcast.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/broadcast.h @@ -220,12 +220,12 @@ simde_mm512_broadcast_f64x2 (simde__m128d a) { simde__m512d_private r_; simde__m128d_private a_ = simde__m128d_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS) r_.f64 = __builtin_shufflevector(a_.f64, a_.f64, 0, 1, 0, 1, 0, 1, 0, 1); #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i+=2) { - r_.f64[i] = a_.f64[0]; + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i += 2) { + r_.f64[ i ] = a_.f64[0]; r_.f64[i + 1] = a_.f64[1]; } #endif @@ -419,7 +419,8 @@ simde_mm512_mask_broadcast_f32x4(simde__m512 src, simde__mmask16 k, simde__m128 #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_broadcast_f32x4(src, k, a) simde_mm512_mask_broadcast_f32x4(src, k, a) + #undef _mm512_mask_broadcast_f32x4 + #define _mm512_mask_broadcast_f32x4(src, k, a) simde_mm512_mask_broadcast_f32x4(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -432,7 +433,8 @@ simde_mm512_maskz_broadcast_f32x4(simde__mmask16 k, simde__m128 a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_broadcast_f32x4(k, a) simde_mm512_maskz_broadcast_f32x4(k, a) + #undef _mm512_maskz_broadcast_f32x4 + #define _mm512_maskz_broadcast_f32x4(k, a) simde_mm512_maskz_broadcast_f32x4(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -466,7 +468,8 @@ simde_mm512_mask_broadcast_f64x4(simde__m512d src, simde__mmask8 k, simde__m256d #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_broadcast_f64x4(src, k, a) simde_mm512_mask_broadcast_f64x4(src, k, a) + #undef _mm512_mask_broadcast_f64x4 + #define _mm512_mask_broadcast_f64x4(src, k, a) simde_mm512_mask_broadcast_f64x4(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -479,7 +482,8 @@ simde_mm512_maskz_broadcast_f64x4(simde__mmask8 k, simde__m256d a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_broadcast_f64x4(k, a) simde_mm512_maskz_broadcast_f64x4(k, a) + #undef _mm512_maskz_broadcast_f64x4 + #define _mm512_maskz_broadcast_f64x4(k, a) simde_mm512_maskz_broadcast_f64x4(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -519,7 +523,8 @@ simde_mm512_mask_broadcast_i32x4(simde__m512i src, simde__mmask16 k, simde__m128 #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_broadcast_i32x4(src, k, a) simde_mm512_mask_broadcast_i32x4(src, k, a) + #undef _mm512_mask_broadcast_i32x4 + #define _mm512_mask_broadcast_i32x4(src, k, a) simde_mm512_mask_broadcast_i32x4(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -532,7 +537,8 @@ simde_mm512_maskz_broadcast_i32x4(simde__mmask16 k, simde__m128i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_broadcast_i32x4(k, a) simde_mm512_maskz_broadcast_i32x4(k, a) + #undef _mm512_maskz_broadcast_i32x4 + #define _mm512_maskz_broadcast_i32x4(k, a) simde_mm512_maskz_broadcast_i32x4(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -566,7 +572,8 @@ simde_mm512_mask_broadcast_i64x4(simde__m512i src, simde__mmask8 k, simde__m256i #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_broadcast_i64x4(src, k, a) simde_mm512_mask_broadcast_i64x4(src, k, a) + #undef _mm512_mask_broadcast_i64x4 + #define _mm512_mask_broadcast_i64x4(src, k, a) simde_mm512_mask_broadcast_i64x4(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -579,7 +586,8 @@ simde_mm512_maskz_broadcast_i64x4(simde__mmask8 k, simde__m256i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_broadcast_i64x4(k, a) simde_mm512_maskz_broadcast_i64x4(k, a) + #undef _mm512_maskz_broadcast_i64x4 + #define _mm512_maskz_broadcast_i64x4(k, a) simde_mm512_maskz_broadcast_i64x4(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -614,7 +622,8 @@ simde_mm512_mask_broadcastd_epi32(simde__m512i src, simde__mmask16 k, simde__m12 #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_broadcastd_epi32(src, k, a) simde_mm512_mask_broadcastd_epi32(src, k, a) + #undef _mm512_mask_broadcastd_epi32 + #define _mm512_mask_broadcastd_epi32(src, k, a) simde_mm512_mask_broadcastd_epi32(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -627,7 +636,8 @@ simde_mm512_maskz_broadcastd_epi32(simde__mmask16 k, simde__m128i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_broadcastd_epi32(k, a) simde_mm512_maskz_broadcastd_epi32(k, a) + #undef _mm512_maskz_broadcastd_epi32 + #define _mm512_maskz_broadcastd_epi32(k, a) simde_mm512_maskz_broadcastd_epi32(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -662,7 +672,8 @@ simde_mm512_mask_broadcastq_epi64(simde__m512i src, simde__mmask8 k, simde__m128 #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_broadcastq_epi64(src, k, a) simde_mm512_mask_broadcastq_epi64(src, k, a) + #undef _mm512_mask_broadcastq_epi64 + #define _mm512_mask_broadcastq_epi64(src, k, a) simde_mm512_mask_broadcastq_epi64(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -675,7 +686,8 @@ simde_mm512_maskz_broadcastq_epi64(simde__mmask8 k, simde__m128i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_broadcastq_epi64(k, a) simde_mm512_maskz_broadcastq_epi64(k, a) + #undef _mm512_maskz_broadcastq_epi64 + #define _mm512_maskz_broadcastq_epi64(k, a) simde_mm512_maskz_broadcastq_epi64(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -722,7 +734,8 @@ simde_mm512_mask_broadcastss_ps(simde__m512 src, simde__mmask16 k, simde__m128 a #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_broadcastss_ps(src, k, a) simde_mm512_mask_broadcastss_ps(src, k, a) + #undef _mm512_mask_broadcastss_ps + #define _mm512_mask_broadcastss_ps(src, k, a) simde_mm512_mask_broadcastss_ps(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -745,7 +758,8 @@ simde_mm512_maskz_broadcastss_ps(simde__mmask16 k, simde__m128 a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_broadcastss_ps(k, a) simde_mm512_maskz_broadcastss_ps(k, a) + #undef _mm512_maskz_broadcastss_ps + #define _mm512_maskz_broadcastss_ps(k, a) simde_mm512_maskz_broadcastss_ps(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -791,7 +805,8 @@ simde_mm512_mask_broadcastsd_pd(simde__m512d src, simde__mmask8 k, simde__m128d #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_broadcastsd_pd(src, k, a) simde_mm512_mask_broadcastsd_pd(src, k, a) + #undef _mm512_mask_broadcastsd_pd + #define _mm512_mask_broadcastsd_pd(src, k, a) simde_mm512_mask_broadcastsd_pd(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -814,7 +829,8 @@ simde_mm512_maskz_broadcastsd_pd(simde__mmask8 k, simde__m128d a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_broadcastsd_pd(k, a) simde_mm512_maskz_broadcastsd_pd(k, a) + #undef _mm512_maskz_broadcastsd_pd + #define _mm512_maskz_broadcastsd_pd(k, a) simde_mm512_maskz_broadcastsd_pd(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/cmp.h b/lib/mmseqs/lib/simde/simde/x86/avx512/cmp.h index 2ab39e5..79cfc14 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/cmp.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/cmp.h @@ -42,8 +42,7 @@ SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES simde__mmask16 simde_mm512_cmp_ps_mask (simde__m512 a, simde__m512 b, const int imm8) - SIMDE_REQUIRE_CONSTANT(imm8) - HEDLEY_REQUIRE_MSG(((imm8 >= 0) && (imm8 <= 31)), "imm8 must be one of the SIMDE_CMP_* macros (values: [0, 31])") { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { #if defined(SIMDE_X86_AVX512F_NATIVE) simde__mmask16 r; SIMDE_CONSTIFY_32_(_mm512_cmp_ps_mask, r, (HEDLEY_UNREACHABLE(), 0), imm8, a, b); @@ -314,8 +313,7 @@ simde_mm512_cmp_ps_mask (simde__m512 a, simde__m512 b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__mmask8 simde_mm512_cmp_pd_mask (simde__m512d a, simde__m512d b, const int imm8) - SIMDE_REQUIRE_CONSTANT(imm8) - HEDLEY_REQUIRE_MSG(((imm8 >= 0) && (imm8 <= 31)), "imm8 must be one of the SIMDE_CMP_* macros (values: [0, 31])") { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { #if defined(SIMDE_X86_AVX512F_NATIVE) simde__mmask8 r; SIMDE_CONSTIFY_32_(_mm512_cmp_pd_mask, r, (HEDLEY_UNREACHABLE(), 0), imm8, a, b); diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/cvts.h b/lib/mmseqs/lib/simde/simde/x86/avx512/cvts.h index 2058ca6..c35c2f9 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/cvts.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/cvts.h @@ -358,7 +358,8 @@ simde_mm512_mask_cvtsepi32_epi8 (simde__m128i src, simde__mmask16 k, simde__m512 #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_cvtsepi32_epi8(src, k, a) simde_mm512_mask_cvtsepi32_epi8(src, k, a) + #undef _mm512_mask_cvtsepi32_epi8 + #define _mm512_mask_cvtsepi32_epi8(src, k, a) simde_mm512_mask_cvtsepi32_epi8(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -384,7 +385,8 @@ simde_mm512_maskz_cvtsepi32_epi8 (simde__mmask16 k, simde__m512i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_cvtsepi32_epi8(k, a) simde_mm512_maskz_cvtsepi32_epi8(k, a) + #undef _mm512_maskz_cvtsepi32_epi8 + #define _mm512_maskz_cvtsepi32_epi8(k, a) simde_mm512_maskz_cvtsepi32_epi8(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -438,7 +440,8 @@ simde_mm512_mask_cvtsepi32_epi16 (simde__m256i src, simde__mmask16 k, simde__m51 #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_cvtsepi32_epi16(src, k, a) simde_mm512_mask_cvtsepi32_epi16(src, k, a) + #undef _mm512_mask_cvtsepi32_epi16 + #define _mm512_mask_cvtsepi32_epi16(src, k, a) simde_mm512_mask_cvtsepi32_epi16(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -464,7 +467,8 @@ simde_mm512_maskz_cvtsepi32_epi16 (simde__mmask16 k, simde__m512i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_cvtsepi32_epi16(k, a) simde_mm512_maskz_cvtsepi32_epi16(k, a) + #undef _mm512_maskz_cvtsepi32_epi16 + #define _mm512_maskz_cvtsepi32_epi16(k, a) simde_mm512_maskz_cvtsepi32_epi16(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -545,7 +549,8 @@ simde_mm512_maskz_cvtsepi64_epi8 (simde__mmask8 k, simde__m512i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_cvtsepi64_epi8(k, a) simde_mm512_maskz_cvtsepi64_epi8(k, a) + #undef _mm512_maskz_cvtsepi64_epi8 + #define _mm512_maskz_cvtsepi64_epi8(k, a) simde_mm512_maskz_cvtsepi64_epi8(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -626,7 +631,8 @@ simde_mm512_maskz_cvtsepi64_epi16 (simde__mmask8 k, simde__m512i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_cvtsepi64_epi16(k, a) simde_mm512_maskz_cvtsepi64_epi16(k, a) + #undef _mm512_maskz_cvtsepi64_epi16 + #define _mm512_maskz_cvtsepi64_epi16(k, a) simde_mm512_maskz_cvtsepi64_epi16(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -680,7 +686,8 @@ simde_mm512_mask_cvtsepi64_epi32 (simde__m256i src, simde__mmask8 k, simde__m512 #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_cvtsepi64_epi32(src, k, a) simde_mm512_mask_cvtsepi64_epi32(src, k, a) + #undef _mm512_mask_cvtsepi64_epi32 + #define _mm512_mask_cvtsepi64_epi32(src, k, a) simde_mm512_mask_cvtsepi64_epi32(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -706,7 +713,8 @@ simde_mm512_maskz_cvtsepi64_epi32 (simde__mmask8 k, simde__m512i a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_cvtsepi64_epi32(k, a) simde_mm512_maskz_cvtsepi64_epi32(k, a) + #undef _mm512_maskz_cvtsepi64_epi32 + #define _mm512_maskz_cvtsepi64_epi32(k, a) simde_mm512_maskz_cvtsepi64_epi32(k, a) #endif SIMDE_END_DECLS_ diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/div.h b/lib/mmseqs/lib/simde/simde/x86/avx512/div.h index 6f5e67c..5e6349a 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/div.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/div.h @@ -47,7 +47,11 @@ simde_mm512_div_ps (simde__m512 a, simde__m512 b) { a_ = simde__m512_to_private(a), b_ = simde__m512_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) { + r_.m256[i] = simde_mm256_div_ps(a_.m256[i], b_.m256[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 / b_.f32; #else SIMDE_VECTORIZE @@ -103,7 +107,11 @@ simde_mm512_div_pd (simde__m512d a, simde__m512d b) { a_ = simde__m512d_to_private(a), b_ = simde__m512d_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256d) / sizeof(r_.m256d[0])) ; i++) { + r_.m256d[i] = simde_mm256_div_pd(a_.m256d[i], b_.m256d[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f64 = a_.f64 / b_.f64; #else SIMDE_VECTORIZE diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/fmadd.h b/lib/mmseqs/lib/simde/simde/x86/avx512/fmadd.h index e95f1d4..0a89b4c 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/fmadd.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/fmadd.h @@ -56,7 +56,7 @@ simde_mm512_fmadd_ps (simde__m512 a, simde__m512 b, simde__m512 c) { r_.f32 = (a_.f32 * b_.f32) + c_.f32; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m32) / sizeof(r_.m32)) ; i++) { + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (a_.f32[i] * b_.f32[i]) + c_.f32[i]; } #endif @@ -117,7 +117,7 @@ simde_mm512_fmadd_pd (simde__m512d a, simde__m512d b, simde__m512d c) { r_.f64 = (a_.f64 * b_.f64) + c_.f64; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m64) / sizeof(r_.m64)) ; i++) { + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { r_.f64[i] = (a_.f64[i] * b_.f64[i]) + c_.f64[i]; } #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/fmsub.h b/lib/mmseqs/lib/simde/simde/x86/avx512/fmsub.h index 7b5d415..b9983d0 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/fmsub.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/fmsub.h @@ -56,7 +56,7 @@ simde_mm512_fmsub_ps (simde__m512 a, simde__m512 b, simde__m512 c) { r_.f32 = (a_.f32 * b_.f32) - c_.f32; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m32) / sizeof(r_.m32)) ; i++) { + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (a_.f32[i] * b_.f32[i]) - c_.f32[i]; } #endif @@ -89,7 +89,7 @@ simde_mm512_fmsub_pd (simde__m512d a, simde__m512d b, simde__m512d c) { r_.f64 = (a_.f64 * b_.f64) - c_.f64; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m64) / sizeof(r_.m64)) ; i++) { + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { r_.f64[i] = (a_.f64[i] * b_.f64[i]) - c_.f64[i]; } #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/fnmadd.h b/lib/mmseqs/lib/simde/simde/x86/avx512/fnmadd.h index 2614a3e..6779dbd 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/fnmadd.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/fnmadd.h @@ -56,7 +56,7 @@ simde_mm512_fnmadd_ps (simde__m512 a, simde__m512 b, simde__m512 c) { r_.f32 = -(a_.f32 * b_.f32) + c_.f32; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m32) / sizeof(r_.m32)) ; i++) { + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = -(a_.f32[i] * b_.f32[i]) + c_.f32[i]; } #endif @@ -89,7 +89,7 @@ simde_mm512_fnmadd_pd (simde__m512d a, simde__m512d b, simde__m512d c) { r_.f64 = -(a_.f64 * b_.f64) + c_.f64; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m64) / sizeof(r_.m64)) ; i++) { + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { r_.f64[i] = -(a_.f64[i] * b_.f64[i]) + c_.f64[i]; } #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/fnmsub.h b/lib/mmseqs/lib/simde/simde/x86/avx512/fnmsub.h index 6a6a306..8d969de 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/fnmsub.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/fnmsub.h @@ -56,7 +56,7 @@ simde_mm512_fnmsub_ps (simde__m512 a, simde__m512 b, simde__m512 c) { r_.f32 = -(a_.f32 * b_.f32) - c_.f32; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m32) / sizeof(r_.m32)) ; i++) { + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = -(a_.f32[i] * b_.f32[i]) - c_.f32[i]; } #endif @@ -89,7 +89,7 @@ simde_mm512_fnmsub_pd (simde__m512d a, simde__m512d b, simde__m512d c) { r_.f64 = -(a_.f64 * b_.f64) - c_.f64; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m64) / sizeof(r_.m64)) ; i++) { + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { r_.f64[i] = -(a_.f64[i] * b_.f64[i]) - c_.f64[i]; } #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/kshift.h b/lib/mmseqs/lib/simde/simde/x86/avx512/kshift.h new file mode 100644 index 0000000..691fcfd --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/kshift.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2020 Christopher Moore + */ + +#if !defined(SIMDE_X86_AVX512_KSHIFT_H) +#define SIMDE_X86_AVX512_KSHIFT_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_kshiftli_mask16 (simde__mmask16 a, unsigned int count) + SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) { + return HEDLEY_STATIC_CAST(simde__mmask16, (count <= 15) ? (a << count) : 0); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + #define simde_kshiftli_mask16(a, count) _kshiftli_mask16(a, count) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _kshiftli_mask16 + #define _kshiftli_mask16(a, count) simde_kshiftli_mask16(a, count) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_kshiftli_mask32 (simde__mmask32 a, unsigned int count) + SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) { + return (count <= 31) ? (a << count) : 0; +} +#if defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + #define simde_kshiftli_mask32(a, count) _kshiftli_mask32(a, count) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _kshiftli_mask32 + #define _kshiftli_mask32(a, count) simde_kshiftli_mask32(a, count) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_kshiftli_mask64 (simde__mmask64 a, unsigned int count) + SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) { + return (count <= 63) ? (a << count) : 0; +} +#if defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + #define simde_kshiftli_mask64(a, count) _kshiftli_mask64(a, count) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _kshiftli_mask64 + #define _kshiftli_mask64(a, count) simde_kshiftli_mask64(a, count) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_kshiftli_mask8 (simde__mmask8 a, unsigned int count) + SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) { + return HEDLEY_STATIC_CAST(simde__mmask8, (count <= 7) ? (a << count) : 0); +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + #define simde_kshiftli_mask8(a, count) _kshiftli_mask8(a, count) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _kshiftli_mask8 + #define _kshiftli_mask8(a, count) simde_kshiftli_mask8(a, count) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_kshiftri_mask16 (simde__mmask16 a, unsigned int count) + SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) { + return HEDLEY_STATIC_CAST(simde__mmask16, (count <= 15) ? (a >> count) : 0); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + #define simde_kshiftri_mask16(a, count) _kshiftri_mask16(a, count) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _kshiftri_mask16 + #define _kshiftri_mask16(a, count) simde_kshiftri_mask16(a, count) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_kshiftri_mask32 (simde__mmask32 a, unsigned int count) + SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) { + return (count <= 31) ? (a >> count) : 0; +} +#if defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + #define simde_kshiftri_mask32(a, count) _kshiftri_mask32(a, count) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _kshiftri_mask32 + #define _kshiftri_mask32(a, count) simde_kshiftri_mask32(a, count) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_kshiftri_mask64 (simde__mmask64 a, unsigned int count) + SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) { + return (count <= 63) ? (a >> count) : 0; +} +#if defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + #define simde_kshiftri_mask64(a, count) _kshiftri_mask64(a, count) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _kshiftri_mask64 + #define _kshiftri_mask64(a, count) simde_kshiftri_mask64(a, count) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_kshiftri_mask8 (simde__mmask8 a, unsigned int count) + SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) { + return HEDLEY_STATIC_CAST(simde__mmask8, (count <= 7) ? (a >> count) : 0); +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + #define simde_kshiftri_mask8(a, count) _kshiftri_mask8(a, count) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _kshiftri_mask8 + #define _kshiftri_mask8(a, count) simde_kshiftri_mask8(a, count) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_KSHIFT_H) */ diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/load.h b/lib/mmseqs/lib/simde/simde/x86/avx512/load.h index 6964e60..03d7327 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/load.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/load.h @@ -37,10 +37,10 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_load_si512 (void const * mem_addr) { #if defined(SIMDE_X86_AVX512F_NATIVE) - return _mm512_load_si512(HEDLEY_REINTERPRET_CAST(void const*, mem_addr)); + return _mm512_load_si512(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m512i)); #else simde__m512i r; - simde_memcpy(&r, SIMDE_ASSUME_ALIGNED_AS(simde__m512i, mem_addr), sizeof(r)); + simde_memcpy(&r, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m512i), sizeof(r)); return r; #endif } diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/loadu.h b/lib/mmseqs/lib/simde/simde/x86/avx512/loadu.h index 2514662..5694b78 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/loadu.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/loadu.h @@ -33,6 +33,118 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_loadu_epi8(void const * mem_addr) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(HEDLEY_GCC_VERSION) + return _mm_loadu_epi8(mem_addr); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_loadu_si128(SIMDE_ALIGN_CAST(__m128i const *, mem_addr)); + #else + simde__m128i r; + simde_memcpy(&r, mem_addr, sizeof(r)); + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_loadu_epi16(void const * mem_addr) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(HEDLEY_GCC_VERSION) + return _mm_loadu_epi8(mem_addr); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_loadu_si128(SIMDE_ALIGN_CAST(__m128i const *, mem_addr)); + #else + simde__m128i r; + simde_memcpy(&r, mem_addr, sizeof(r)); + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_loadu_epi32(void const * mem_addr) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(HEDLEY_GCC_VERSION) + return _mm_loadu_epi8(mem_addr); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_loadu_si128(SIMDE_ALIGN_CAST(__m128i const *, mem_addr)); + #else + simde__m128i r; + simde_memcpy(&r, mem_addr, sizeof(r)); + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_loadu_epi64(void const * mem_addr) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(HEDLEY_GCC_VERSION) + return _mm_loadu_epi8(mem_addr); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_loadu_si128(SIMDE_ALIGN_CAST(__m128i const *, mem_addr)); + #else + simde__m128i r; + simde_memcpy(&r, mem_addr, sizeof(r)); + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_loadu_epi8(void const * mem_addr) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(HEDLEY_GCC_VERSION) + return _mm256_loadu_epi8(mem_addr); + #elif defined(SIMDE_X86_AVX_NATIVE) + return _mm256_loadu_si256(SIMDE_ALIGN_CAST(__m256i const *, mem_addr)); + #else + simde__m256i r; + simde_memcpy(&r, mem_addr, sizeof(r)); + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_loadu_epi16(void const * mem_addr) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(HEDLEY_GCC_VERSION) + return _mm256_loadu_epi8(mem_addr); + #elif defined(SIMDE_X86_AVX_NATIVE) + return _mm256_loadu_si256(SIMDE_ALIGN_CAST(__m256i const *, mem_addr)); + #else + simde__m256i r; + simde_memcpy(&r, mem_addr, sizeof(r)); + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_loadu_epi32(void const * mem_addr) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(HEDLEY_GCC_VERSION) + return _mm256_loadu_epi8(mem_addr); + #elif defined(SIMDE_X86_AVX_NATIVE) + return _mm256_loadu_si256(SIMDE_ALIGN_CAST(__m256i const *, mem_addr)); + #else + simde__m256i r; + simde_memcpy(&r, mem_addr, sizeof(r)); + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_loadu_epi64(void const * mem_addr) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(HEDLEY_GCC_VERSION) + return _mm256_loadu_epi8(mem_addr); + #elif defined(SIMDE_X86_AVX_NATIVE) + return _mm256_loadu_si256(SIMDE_ALIGN_CAST(__m256i const *, mem_addr)); + #else + simde__m256i r; + simde_memcpy(&r, mem_addr, sizeof(r)); + return r; + #endif +} + SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_loadu_ps (void const * mem_addr) { @@ -92,15 +204,17 @@ simde_mm512_loadu_si512 (void const * mem_addr) { #define simde_mm512_loadu_epi16(mem_addr) simde_mm512_loadu_si512(mem_addr) #define simde_mm512_loadu_epi32(mem_addr) simde_mm512_loadu_si512(mem_addr) #define simde_mm512_loadu_epi64(mem_addr) simde_mm512_loadu_si512(mem_addr) -#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm512_loadu_epi8 #undef _mm512_loadu_epi16 + #define _mm512_loadu_epi8(a) simde_mm512_loadu_si512(a) + #define _mm512_loadu_epi16(a) simde_mm512_loadu_si512(a) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_loadu_epi32 #undef _mm512_loadu_epi64 #undef _mm512_loadu_si512 #define _mm512_loadu_si512(a) simde_mm512_loadu_si512(a) - #define _mm512_loadu_epi8(a) simde_mm512_loadu_si512(a) - #define _mm512_loadu_epi16(a) simde_mm512_loadu_si512(a) #define _mm512_loadu_epi32(a) simde_mm512_loadu_si512(a) #define _mm512_loadu_epi64(a) simde_mm512_loadu_si512(a) #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/madd.h b/lib/mmseqs/lib/simde/simde/x86/avx512/madd.h new file mode 100644 index 0000000..0a65d2f --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/madd.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2020 Ashleigh Newman-Jones + */ + +#if !defined(SIMDE_X86_AVX512_MADD_H) +#define SIMDE_X86_AVX512_MADD_H + +#include "types.h" +#include "mov.h" +#include "../avx2.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_madd_epi16 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_madd_epi16(src, k, a, b); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_madd_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_madd_epi16 + #define _mm_mask_madd_epi16(a, b) simde_mm_mask_madd_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_madd_epi16 (simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_madd_epi16(k, a, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_madd_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_madd_epi16 + #define _mm_maskz_madd_epi16(a, b) simde_mm_maskz_madd_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_madd_epi16 (simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_madd_epi16(src, k, a, b); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_madd_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_madd_epi16 + #define _mm256_mask_madd_epi16(a, b) simde_mm256_mask_madd_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_madd_epi16 (simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_madd_epi16(k, a, b); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_madd_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_madd_epi16 + #define _mm256_maskz_madd_epi16(a, b) simde_mm256_maskz_madd_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_madd_epi16 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_madd_epi16(a, b); + #else + simde__m512i_private r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_madd_epi16(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i += 2) { + r_.i32[i / 2] = (a_.i16[i] * b_.i16[i]) + (a_.i16[i + 1] * b_.i16[i + 1]); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_madd_epi16 + #define _mm512_madd_epi16(a, b) simde_mm512_madd_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_madd_epi16 (simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_madd_epi16(src, k, a, b); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_madd_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_madd_epi16 + #define _mm512_mask_madd_epi16(a, b) simde_mm512_mask_madd_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_madd_epi16 (simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maskz_madd_epi16(k, a, b); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_madd_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_madd_epi16 + #define _mm512_maskz_madd_epi16(a, b) simde_mm512_maskz_madd_epi16(a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_MADD_H) */ diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/maddubs.h b/lib/mmseqs/lib/simde/simde/x86/avx512/maddubs.h new file mode 100644 index 0000000..310d0a0 --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/maddubs.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2020 Ashleigh Newman-Jones + */ + +#if !defined(SIMDE_X86_AVX512_MADDUBS_H) +#define SIMDE_X86_AVX512_MADDUBS_H + +#include "types.h" +#include "mov.h" +#include "../avx2.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_maddubs_epi16 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_maddubs_epi16(src, k, a, b); + #else + return simde_mm_mask_mov_epi16(src, k, simde_mm_maddubs_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_maddubs_epi16 + #define _mm_mask_maddubs_epi16(a, b) simde_mm_mask_maddubs_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_maddubs_epi16 (simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE ) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_maddubs_epi16(k, a, b); + #else + return simde_mm_maskz_mov_epi16(k, simde_mm_maddubs_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_maddubs_epi16 + #define _mm_maskz_maddubs_epi16(a, b) simde_mm_maskz_maddubs_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_maddubs_epi16 (simde__m256i src, simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_maddubs_epi16(src, k, a, b); + #else + return simde_mm256_mask_mov_epi16(src, k, simde_mm256_maddubs_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_maddubs_epi16 + #define _mm256_mask_maddubs_epi16(a, b) simde_mm256_mask_maddubs_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_maddubs_epi16 (simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_maddubs_epi16(k, a, b); + #else + return simde_mm256_maskz_mov_epi16(k, simde_mm256_maddubs_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_maddubs_epi16 + #define _mm256_maskz_maddubs_epi16(a, b) simde_mm256_maskz_maddubs_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maddubs_epi16 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maddubs_epi16(a, b); + #else + simde__m512i_private r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_maddubs_epi16(a_.m256i[i], b_.m256i[i]); + } + #else + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + const int idx = HEDLEY_STATIC_CAST(int, i) << 1; + int32_t ts = + (HEDLEY_STATIC_CAST(int16_t, a_.u8[ idx ]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[ idx ])) + + (HEDLEY_STATIC_CAST(int16_t, a_.u8[idx + 1]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[idx + 1])); + r_.i16[i] = (ts > INT16_MIN) ? ((ts < INT16_MAX) ? HEDLEY_STATIC_CAST(int16_t, ts) : INT16_MAX) : INT16_MIN; + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maddubs_epi16 + #define _mm512_maddubs_epi16(a, b) simde_mm512_maddubs_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_maddubs_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_maddubs_epi16(src, k, a, b); + #else + return simde_mm512_mask_mov_epi16(src, k, simde_mm512_maddubs_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_maddubs_epi16 + #define _mm512_mask_maddubs_epi16(a, b) simde_mm512_mask_maddubs_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_maddubs_epi16 (simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maskz_maddubs_epi16(k, a, b); + #else + return simde_mm512_maskz_mov_epi16(k, simde_mm512_maddubs_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_maddubs_epi16 + #define _mm512_maskz_maddubs_epi16(a, b) simde_mm512_maskz_maddubs_epi16(a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_MADDUBS_H) */ diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/mov_mask.h b/lib/mmseqs/lib/simde/simde/x86/avx512/mov_mask.h index 53612dc..f79b3bd 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/mov_mask.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/mov_mask.h @@ -37,6 +37,221 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_movepi8_mask (simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_movepi8_mask(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return HEDLEY_STATIC_CAST(simde__mmask16, simde_mm_movemask_epi8(a)); + #else + simde__m128i_private a_ = simde__m128i_to_private(a); + simde__mmask16 r = 0; + + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) { + r |= (a_.i8[i] < 0) ? (UINT64_C(1) << i) : 0; + } + + return r; + #endif +} +#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) + #undef _mm_movepi8_mask + #define _mm_movepi8_mask(a) simde_mm_movepi8_mask(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_movepi16_mask (simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_movepi16_mask(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* There is no 32-bit _mm_movemask_* function, so we use + * _mm_movemask_epi8 then extract the odd bits. */ + uint_fast16_t r = HEDLEY_STATIC_CAST(uint_fast16_t, simde_mm_movemask_epi8(a)); + r = ( (r >> 1)) & UINT32_C(0x5555); + r = (r | (r >> 1)) & UINT32_C(0x3333); + r = (r | (r >> 2)) & UINT32_C(0x0f0f); + r = (r | (r >> 4)) & UINT32_C(0x00ff); + return HEDLEY_STATIC_CAST(simde__mmask8, r); + #else + simde__m128i_private a_ = simde__m128i_to_private(a); + simde__mmask8 r = 0; + + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r |= (a_.i16[i] < 0) ? (UINT32_C(1) << i) : 0; + } + + return r; + #endif +} +#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) + #undef _mm_movepi16_mask + #define _mm_movepi16_mask(a) simde_mm_movepi16_mask(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_movepi32_mask (simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm_movepi32_mask(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return HEDLEY_STATIC_CAST(simde__mmask8, simde_mm_movemask_ps(simde_mm_castsi128_ps(a))); + #else + simde__m128i_private a_ = simde__m128i_to_private(a); + simde__mmask8 r = 0; + + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r |= (a_.i32[i] < 0) ? (UINT32_C(1) << i) : 0; + } + + return r; + #endif +} +#if defined(SIMDE_X86_AVX256DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_movepi32_mask + #define _mm_movepi32_mask(a) simde_mm_movepi32_mask(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_movepi64_mask (simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm_movepi64_mask(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return HEDLEY_STATIC_CAST(simde__mmask8, simde_mm_movemask_pd(simde_mm_castsi128_pd(a))); + #else + simde__m128i_private a_ = simde__m128i_to_private(a); + simde__mmask8 r = 0; + + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r |= (a_.i64[i] < 0) ? (UINT32_C(1) << i) : 0; + } + + return r; + #endif +} +#if defined(SIMDE_X86_AVX256DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_movepi64_mask + #define _mm_movepi64_mask(a) simde_mm_movepi64_mask(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_movepi8_mask (simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_movepi8_mask(a); + #else + simde__m256i_private a_ = simde__m256i_to_private(a); + simde__mmask32 r = 0; + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(a_.m128i) / sizeof(a_.m128i[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(simde__mmask32, simde_mm_movepi8_mask(a_.m128i[i])) << (i * 16); + } + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) { + r |= (a_.i8[i] < 0) ? (UINT64_C(1) << i) : 0; + } + #endif + + return HEDLEY_STATIC_CAST(simde__mmask32, r); + #endif +} +#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_movepi8_mask + #define _mm256_movepi8_mask(a) simde_mm256_movepi8_mask(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_movepi16_mask (simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_movepi16_mask(a); + #else + simde__m256i_private a_ = simde__m256i_to_private(a); + simde__mmask16 r = 0; + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(a_.m128i) / sizeof(a_.m128i[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(simde__mmask16, simde_mm_movepi16_mask(a_.m128i[i])) << (i * 8); + } + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r |= (a_.i16[i] < 0) ? (UINT32_C(1) << i) : 0; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_movepi16_mask + #define _mm256_movepi16_mask(a) simde_mm256_movepi16_mask(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_movepi32_mask (simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm256_movepi32_mask(a); + #else + simde__m256i_private a_ = simde__m256i_to_private(a); + simde__mmask8 r = 0; + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(a_.m128i) / sizeof(a_.m128i[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(simde__mmask16, simde_mm_movepi32_mask(a_.m128i[i])) << (i * 4); + } + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r |= (a_.i32[i] < 0) ? (UINT32_C(1) << i) : 0; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_X86_AVX256DQ_ENABLE_NATIVE_ALIASES) + #undef _mm256_movepi32_mask + #define _mm256_movepi32_mask(a) simde_mm256_movepi32_mask(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_movepi64_mask (simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm256_movepi64_mask(a); + #else + simde__m256i_private a_ = simde__m256i_to_private(a); + simde__mmask8 r = 0; + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(a_.m128i) / sizeof(a_.m128i[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(simde__mmask8, simde_mm_movepi64_mask(a_.m128i[i])) << (i * 2); + } + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r |= (a_.i64[i] < 0) ? (UINT32_C(1) << i) : 0; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_X86_AVX256DQ_ENABLE_NATIVE_ALIASES) + #undef _mm256_movepi64_mask + #define _mm256_movepi64_mask(a) simde_mm256_movepi64_mask(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__mmask64 simde_mm512_movepi8_mask (simde__m512i a) { @@ -44,12 +259,11 @@ simde_mm512_movepi8_mask (simde__m512i a) { return _mm512_movepi8_mask(a); #else simde__m512i_private a_ = simde__m512i_to_private(a); - uint64_t r = 0; + simde__mmask64 r = 0; #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) { - const uint32_t t = HEDLEY_STATIC_CAST(uint32_t, simde_mm256_movemask_epi8(a_.m256i[i])); - r |= HEDLEY_STATIC_CAST(uint64_t, t) << (i * 32); + r |= HEDLEY_STATIC_CAST(simde__mmask64, simde_mm256_movepi8_mask(a_.m256i[i])) << (i * 32); } #else r = 0; @@ -77,10 +291,16 @@ simde_mm512_movepi16_mask (simde__m512i a) { simde__m512i_private a_ = simde__m512i_to_private(a); simde__mmask32 r = 0; - SIMDE_VECTORIZE_REDUCTION(|:r) - for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { - r |= (a_.i16[i] < 0) ? (UINT32_C(1) << i) : 0; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(simde__mmask32, simde_mm256_movepi16_mask(a_.m256i[i])) << (i * 16); + } + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r |= (a_.i16[i] < 0) ? (UINT32_C(1) << i) : 0; + } + #endif return r; #endif @@ -99,10 +319,16 @@ simde_mm512_movepi32_mask (simde__m512i a) { simde__m512i_private a_ = simde__m512i_to_private(a); simde__mmask16 r = 0; - SIMDE_VECTORIZE_REDUCTION(|:r) - for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { - r |= (a_.i32[i] < 0) ? (UINT32_C(1) << i) : 0; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(simde__mmask16, simde_mm256_movepi32_mask(a_.m256i[i])) << (i * 8); + } + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r |= (a_.i32[i] < 0) ? (UINT32_C(1) << i) : 0; + } + #endif return r; #endif @@ -121,10 +347,16 @@ simde_mm512_movepi64_mask (simde__m512i a) { simde__m512i_private a_ = simde__m512i_to_private(a); simde__mmask8 r = 0; - SIMDE_VECTORIZE_REDUCTION(|:r) - for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { - r |= (a_.i64[i] < 0) ? (UINT32_C(1) << i) : 0; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) { + r |= simde_mm256_movepi64_mask(a_.m256i[i]) << (i * 4); + } + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r |= (a_.i64[i] < 0) ? (UINT32_C(1) << i) : 0; + } + #endif return r; #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/or.h b/lib/mmseqs/lib/simde/simde/x86/avx512/or.h index b01ef48..a063c83 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/or.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/or.h @@ -51,7 +51,7 @@ simde_mm512_or_ps (simde__m512 a, simde__m512 b) { #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256[0] = simde_mm256_or_ps(a_.m256[0], b_.m256[0]); r_.m256[1] = simde_mm256_or_ps(a_.m256[1], b_.m256[1]); - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS) r_.i32f = a_.i32f | b_.i32f; #else SIMDE_VECTORIZE @@ -82,7 +82,7 @@ simde_mm512_or_pd (simde__m512d a, simde__m512d b) { #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256d[0] = simde_mm256_or_pd(a_.m256d[0], b_.m256d[0]); r_.m256d[1] = simde_mm256_or_pd(a_.m256d[1], b_.m256d[1]); - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS) r_.i32f = a_.i32f | b_.i32f; #else SIMDE_VECTORIZE @@ -170,12 +170,12 @@ simde_mm512_or_epi64 (simde__m512i a, simde__m512i b) { for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { r_.m256i[i] = simde_mm256_or_si256(a_.m256i[i], b_.m256i[i]); } - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = a_.i64 | b_.i64; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS) + r_.i32f = a_.i32f | b_.i32f; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { - r_.i64[i] = a_.i64[i] | b_.i64[i]; + for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) { + r_.i32f[i] = a_.i32f[i] | b_.i32f[i]; } #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/permutex2var.h b/lib/mmseqs/lib/simde/simde/x86/avx512/permutex2var.h index d9bdff2..6c26d25 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/permutex2var.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/permutex2var.h @@ -29,17 +29,1105 @@ #define SIMDE_X86_AVX512_PERMUTEX2VAR_H #include "types.h" +#include "and.h" +#include "andnot.h" +#include "blend.h" #include "mov.h" +#include "or.h" +#include "set1.h" +#include "slli.h" +#include "srli.h" +#include "test.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +/* The following generic code avoids many, nearly identical, repetitions of fairly complex code. + * If the compiler optimizes well, in particular extracting invariant code from loops + * and simplifying code involving constants passed as arguments, it should not be + * significantly slower than specific code. + * Note that when the original vector contains few elements, these implementations + * may not be faster than portable code. + */ +#if defined(SIMDE_X86_SSSE3_NATIVE) || defined(SIMDE_ARM_NEON_A64V8_NATIVE) || defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_WASM_SIMD128_NATIVE) + #define SIMDE_X_PERMUTEX2VAR_USE_GENERIC +#endif + +#if defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_permutex2var128 (const simde__m128i *a, const simde__m128i idx, const simde__m128i *b, const unsigned int log2_index_size, const unsigned int log2_data_length) { + const int idx_mask = (1 << (5 - log2_index_size + log2_data_length)) - 1; + + #if defined(SIMDE_X86_SSE3_NATIVE) + __m128i ra, rb, t, test, select, index; + const __m128i sixteen = _mm_set1_epi8(16); + + /* Avoid the mullo intrinsics which have high latency (and the 32-bit one requires SSE4.1) */ + switch (log2_index_size) { + default: /* Avoid uninitialized variable warning/error */ + case 0: + index = _mm_and_si128(idx, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, idx_mask))); + break; + case 1: + index = _mm_and_si128(idx, _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, idx_mask))); + index = _mm_slli_epi32(index, 1); + t = _mm_slli_epi32(index, 8); + index = _mm_or_si128(index, t); + index = _mm_add_epi16(index, _mm_set1_epi16(0x0100)); + break; + case 2: + index = _mm_and_si128(idx, _mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, idx_mask))); + index = _mm_slli_epi32(index, 2); + t = _mm_slli_epi32(index, 8); + index = _mm_or_si128(index, t); + t = _mm_slli_epi32(index, 16); + index = _mm_or_si128(index, t); + index = _mm_add_epi32(index, _mm_set1_epi32(0x03020100)); + break; + } + + test = index; + index = _mm_and_si128(index, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << (4 + log2_data_length)) - 1))); + test = _mm_cmpgt_epi8(test, index); + + ra = _mm_shuffle_epi8(a[0], index); + rb = _mm_shuffle_epi8(b[0], index); + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + SIMDE_VECTORIZE + for (int i = 1 ; i < (1 << log2_data_length) ; i++) { + select = _mm_cmplt_epi8(index, sixteen); + index = _mm_sub_epi8(index, sixteen); + ra = _mm_blendv_epi8(_mm_shuffle_epi8(a[i], index), ra, select); + rb = _mm_blendv_epi8(_mm_shuffle_epi8(b[i], index), rb, select); + } + + return _mm_blendv_epi8(ra, rb, test); + #else + SIMDE_VECTORIZE + for (int i = 1 ; i < (1 << log2_data_length) ; i++) { + select = _mm_cmplt_epi8(index, sixteen); + index = _mm_sub_epi8(index, sixteen); + ra = _mm_or_si128(_mm_andnot_si128(select, _mm_shuffle_epi8(a[i], index)), _mm_and_si128(select, ra)); + rb = _mm_or_si128(_mm_andnot_si128(select, _mm_shuffle_epi8(b[i], index)), _mm_and_si128(select, rb)); + } + + return _mm_or_si128(_mm_andnot_si128(test, ra), _mm_and_si128(test, rb)); + #endif + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint8x16_t index, r; + uint16x8_t index16; + uint32x4_t index32; + uint8x16x2_t table2_a, table2_b; + uint8x16x4_t table4_a, table4_b; + + switch (log2_index_size) { + case 0: + index = vandq_u8(simde__m128i_to_neon_u8(idx), vdupq_n_u8(HEDLEY_STATIC_CAST(uint8_t, idx_mask))); + break; + case 1: + index16 = vandq_u16(simde__m128i_to_neon_u16(idx), vdupq_n_u16(HEDLEY_STATIC_CAST(uint16_t, idx_mask))); + index16 = vmulq_n_u16(index16, 0x0202); + index16 = vaddq_u16(index16, vdupq_n_u16(0x0100)); + index = vreinterpretq_u8_u16(index16); + break; + case 2: + index32 = vandq_u32(simde__m128i_to_neon_u32(idx), vdupq_n_u32(HEDLEY_STATIC_CAST(uint32_t, idx_mask))); + index32 = vmulq_n_u32(index32, 0x04040404); + index32 = vaddq_u32(index32, vdupq_n_u32(0x03020100)); + index = vreinterpretq_u8_u32(index32); + break; + } + + uint8x16_t mask = vdupq_n_u8(HEDLEY_STATIC_CAST(uint8_t, (1 << (4 + log2_data_length)) - 1)); + + switch (log2_data_length) { + case 0: + r = vqtbx1q_u8(vqtbl1q_u8(simde__m128i_to_neon_u8(b[0]), vandq_u8(index, mask)), simde__m128i_to_neon_u8(a[0]), index); + break; + case 1: + table2_a.val[0] = simde__m128i_to_neon_u8(a[0]); + table2_a.val[1] = simde__m128i_to_neon_u8(a[1]); + table2_b.val[0] = simde__m128i_to_neon_u8(b[0]); + table2_b.val[1] = simde__m128i_to_neon_u8(b[1]); + r = vqtbx2q_u8(vqtbl2q_u8(table2_b, vandq_u8(index, mask)), table2_a, index); + break; + case 2: + table4_a.val[0] = simde__m128i_to_neon_u8(a[0]); + table4_a.val[1] = simde__m128i_to_neon_u8(a[1]); + table4_a.val[2] = simde__m128i_to_neon_u8(a[2]); + table4_a.val[3] = simde__m128i_to_neon_u8(a[3]); + table4_b.val[0] = simde__m128i_to_neon_u8(b[0]); + table4_b.val[1] = simde__m128i_to_neon_u8(b[1]); + table4_b.val[2] = simde__m128i_to_neon_u8(b[2]); + table4_b.val[3] = simde__m128i_to_neon_u8(b[3]); + r = vqtbx4q_u8(vqtbl4q_u8(table4_b, vandq_u8(index, mask)), table4_a, index); + break; + } + + return simde__m128i_from_neon_u8(r); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r, ra, rb, t, index, s, thirty_two = vec_splats(HEDLEY_STATIC_CAST(uint8_t, 32)); + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) index16; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) temp32, index32; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL char) select, test; + + switch (log2_index_size) { + default: /* Avoid uninitialized variable warning/error */ + case 0: + index = vec_and(simde__m128i_to_altivec_u8(idx), vec_splats(HEDLEY_STATIC_CAST(uint8_t, idx_mask))); + break; + case 1: + index16 = simde__m128i_to_altivec_u16(idx); + index16 = vec_and(index16, vec_splats(HEDLEY_STATIC_CAST(uint16_t, idx_mask))); + index16 = vec_mladd(index16, vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0202)), vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0100))); + index = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), index16); + break; + case 2: + index32 = simde__m128i_to_altivec_u32(idx); + index32 = vec_and(index32, vec_splats(HEDLEY_STATIC_CAST(uint32_t, idx_mask))); + + /* Multiply index32 by 0x04040404; unfortunately vec_mul isn't available so (mis)use 16-bit vec_mladd */ + temp32 = vec_sl(index32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 16))); + index32 = vec_add(index32, temp32); + index32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), + vec_mladd(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), index32), + vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0404)), + vec_splat_u16(0))); + + index32 = vec_add(index32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 0x03020100))); + index = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), index32); + break; + } + + if (log2_data_length == 0) { + r = vec_perm(simde__m128i_to_altivec_u8(a[0]), simde__m128i_to_altivec_u8(b[0]), HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), index)); + } + else { + s = index; + index = vec_and(index, vec_splats(HEDLEY_STATIC_CAST(uint8_t, (1 << (4 + log2_data_length)) - 1))); + test = vec_cmpgt(s, index); + + ra = vec_perm(simde__m128i_to_altivec_u8(a[0]), simde__m128i_to_altivec_u8(a[1]), index); + rb = vec_perm(simde__m128i_to_altivec_u8(b[0]), simde__m128i_to_altivec_u8(b[1]), index); + + SIMDE_VECTORIZE + for (int i = 2 ; i < (1 << log2_data_length) ; i += 2) { + select = vec_cmplt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), index), + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), thirty_two)); + index = vec_sub(index, thirty_two); + t = vec_perm(simde__m128i_to_altivec_u8(a[i]), simde__m128i_to_altivec_u8(a[i + 1]), index); + ra = vec_sel(t, ra, select); + t = vec_perm(simde__m128i_to_altivec_u8(b[i]), simde__m128i_to_altivec_u8(b[i + 1]), index); + rb = vec_sel(t, rb, select); + } + + r = vec_sel(ra, rb, test); + } + + return simde__m128i_from_altivec_u8(r); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + const v128_t sixteen = wasm_i8x16_splat(16); + + v128_t index = simde__m128i_to_wasm_v128(idx); + + switch (log2_index_size) { + case 0: + index = wasm_v128_and(index, wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, idx_mask))); + break; + case 1: + index = wasm_v128_and(index, wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, idx_mask))); + index = wasm_i16x8_mul(index, wasm_i16x8_splat(0x0202)); + index = wasm_i16x8_add(index, wasm_i16x8_splat(0x0100)); + break; + case 2: + index = wasm_v128_and(index, wasm_i32x4_splat(HEDLEY_STATIC_CAST(int32_t, idx_mask))); + index = wasm_i32x4_mul(index, wasm_i32x4_splat(0x04040404)); + index = wasm_i32x4_add(index, wasm_i32x4_splat(0x03020100)); + break; + } + + v128_t r = wasm_v8x16_swizzle(simde__m128i_to_wasm_v128(a[0]), index); + + SIMDE_VECTORIZE + for (int i = 1 ; i < (1 << log2_data_length) ; i++) { + index = wasm_i8x16_sub(index, sixteen); + r = wasm_v128_or(r, wasm_v8x16_swizzle(simde__m128i_to_wasm_v128(a[i]), index)); + } + + SIMDE_VECTORIZE + for (int i = 0 ; i < (1 << log2_data_length) ; i++) { + index = wasm_i8x16_sub(index, sixteen); + r = wasm_v128_or(r, wasm_v8x16_swizzle(simde__m128i_to_wasm_v128(b[i]), index)); + } + + return simde__m128i_from_wasm_v128(r); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_x_permutex2var (simde__m128i *r, const simde__m128i *a, const simde__m128i *idx, const simde__m128i *b, const unsigned int log2_index_size, const unsigned int log2_data_length) { + SIMDE_VECTORIZE + for (int i = 0 ; i < (1 << log2_data_length) ; i++) { + r[i] = simde_x_permutex2var128(a, idx[i], b, log2_index_size, log2_data_length); + } +} +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_permutex2var_epi16 (simde__m128i a, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_permutex2var_epi16(a, idx, b); + #elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) + simde__m128i r; + + simde_x_permutex2var(&r, &a, &idx, &b, 1, 0); + + return r; + #else + simde__m128i_private + a_ = simde__m128i_to_private(a), + idx_ = simde__m128i_to_private(idx), + b_ = simde__m128i_to_private(b), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = ((idx_.i16[i] & 8) ? b_ : a_).i16[idx_.i16[i] & 7]; + } + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_permutex2var_epi16 + #define _mm_permutex2var_epi16(a, idx, b) simde_mm_permutex2var_epi16(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_permutex2var_epi16 (simde__m128i a, simde__mmask8 k, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_permutex2var_epi16(a, k, idx, b); + #else + return simde_mm_mask_mov_epi16(a, k, simde_mm_permutex2var_epi16(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_permutex2var_epi16 +#define _mm_mask_permutex2var_epi16(a, k, idx, b) simde_mm_mask_permutex2var_epi16(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask2_permutex2var_epi16 (simde__m128i a, simde__m128i idx, simde__mmask8 k, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask2_permutex2var_epi16(a, idx, k, b); + #else + return simde_mm_mask_mov_epi16(idx, k, simde_mm_permutex2var_epi16(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask2_permutex2var_epi16 +#define _mm_mask2_permutex2var_epi16(a, idx, k, b) simde_mm_mask2_permutex2var_epi16(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_permutex2var_epi16 (simde__mmask8 k, simde__m128i a, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_permutex2var_epi16(k, a, idx, b); + #else + return simde_mm_maskz_mov_epi16(k, simde_mm_permutex2var_epi16(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_permutex2var_epi16 +#define _mm_maskz_permutex2var_epi16(k, a, idx, b) simde_mm_maskz_permutex2var_epi16(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_permutex2var_epi32 (simde__m128i a, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_permutex2var_epi32(a, idx, b); + #elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) /* This may not be faster than the portable version */ + simde__m128i r; + + simde_x_permutex2var(&r, &a, &idx, &b, 2, 0); + + return r; + #else + simde__m128i_private + a_ = simde__m128i_to_private(a), + idx_ = simde__m128i_to_private(idx), + b_ = simde__m128i_to_private(b), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = ((idx_.i32[i] & 4) ? b_ : a_).i32[idx_.i32[i] & 3]; + } + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_permutex2var_epi32 + #define _mm_permutex2var_epi32(a, idx, b) simde_mm_permutex2var_epi32(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_permutex2var_epi32 (simde__m128i a, simde__mmask8 k, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_permutex2var_epi32(a, k, idx, b); + #else + return simde_mm_mask_mov_epi32(a, k, simde_mm_permutex2var_epi32(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_permutex2var_epi32 +#define _mm_mask_permutex2var_epi32(a, k, idx, b) simde_mm_mask_permutex2var_epi32(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask2_permutex2var_epi32 (simde__m128i a, simde__m128i idx, simde__mmask8 k, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask2_permutex2var_epi32(a, idx, k, b); + #else + return simde_mm_mask_mov_epi32(idx, k, simde_mm_permutex2var_epi32(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask2_permutex2var_epi32 +#define _mm_mask2_permutex2var_epi32(a, idx, k, b) simde_mm_mask2_permutex2var_epi32(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_permutex2var_epi32 (simde__mmask8 k, simde__m128i a, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_permutex2var_epi32(k, a, idx, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_permutex2var_epi32(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_permutex2var_epi32 +#define _mm_maskz_permutex2var_epi32(k, a, idx, b) simde_mm_maskz_permutex2var_epi32(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_permutex2var_epi64 (simde__m128i a, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_permutex2var_epi64(a, idx, b); + #else + simde__m128i_private + a_ = simde__m128i_to_private(a), + idx_ = simde__m128i_to_private(idx), + b_ = simde__m128i_to_private(b), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = ((idx_.i64[i] & 2) ? b_ : a_).i64[idx_.i64[i] & 1]; + } + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_permutex2var_epi64 + #define _mm_permutex2var_epi64(a, idx, b) simde_mm_permutex2var_epi64(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_permutex2var_epi64 (simde__m128i a, simde__mmask8 k, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_permutex2var_epi64(a, k, idx, b); + #else + return simde_mm_mask_mov_epi64(a, k, simde_mm_permutex2var_epi64(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_permutex2var_epi64 +#define _mm_mask_permutex2var_epi64(a, k, idx, b) simde_mm_mask_permutex2var_epi64(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask2_permutex2var_epi64 (simde__m128i a, simde__m128i idx, simde__mmask8 k, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask2_permutex2var_epi64(a, idx, k, b); + #else + return simde_mm_mask_mov_epi64(idx, k, simde_mm_permutex2var_epi64(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask2_permutex2var_epi64 +#define _mm_mask2_permutex2var_epi64(a, idx, k, b) simde_mm_mask2_permutex2var_epi64(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_permutex2var_epi64 (simde__mmask8 k, simde__m128i a, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_permutex2var_epi64(k, a, idx, b); + #else + return simde_mm_maskz_mov_epi64(k, simde_mm_permutex2var_epi64(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_permutex2var_epi64 +#define _mm_maskz_permutex2var_epi64(k, a, idx, b) simde_mm_maskz_permutex2var_epi64(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_permutex2var_epi8 (simde__m128i a, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_permutex2var_epi8(a, idx, b); + #elif defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cvtepi32_epi8(_mm512_permutex2var_epi32(_mm512_cvtepu8_epi32(a), _mm512_cvtepu8_epi32(idx), _mm512_cvtepu8_epi32(b))); + #elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) + simde__m128i r; + + simde_x_permutex2var(&r, &a, &idx, &b, 0, 0); + + return r; + #else + simde__m128i_private + a_ = simde__m128i_to_private(a), + idx_ = simde__m128i_to_private(idx), + b_ = simde__m128i_to_private(b), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = ((idx_.i8[i] & 0x10) ? b_ : a_).i8[idx_.i8[i] & 0x0F]; + } + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_permutex2var_epi8 + #define _mm_permutex2var_epi8(a, idx, b) simde_mm_permutex2var_epi8(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_permutex2var_epi8 (simde__m128i a, simde__mmask16 k, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_permutex2var_epi8(a, k, idx, b); + #else + return simde_mm_mask_mov_epi8(a, k, simde_mm_permutex2var_epi8(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_permutex2var_epi8 +#define _mm_mask_permutex2var_epi8(a, k, idx, b) simde_mm_mask_permutex2var_epi8(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask2_permutex2var_epi8 (simde__m128i a, simde__m128i idx, simde__mmask16 k, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask2_permutex2var_epi8(a, idx, k, b); + #else + return simde_mm_mask_mov_epi8(idx, k, simde_mm_permutex2var_epi8(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask2_permutex2var_epi8 +#define _mm_mask2_permutex2var_epi8(a, idx, k, b) simde_mm_mask2_permutex2var_epi8(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_permutex2var_epi8 (simde__mmask16 k, simde__m128i a, simde__m128i idx, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_permutex2var_epi8(k, a, idx, b); + #else + return simde_mm_maskz_mov_epi8(k, simde_mm_permutex2var_epi8(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_permutex2var_epi8 +#define _mm_maskz_permutex2var_epi8(k, a, idx, b) simde_mm_maskz_permutex2var_epi8(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_permutex2var_pd (simde__m128d a, simde__m128i idx, simde__m128d b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_permutex2var_pd(a, idx, b); + #else + return simde_mm_castsi128_pd(simde_mm_permutex2var_epi64(simde_mm_castpd_si128(a), idx, simde_mm_castpd_si128(b))); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_permutex2var_pd + #define _mm_permutex2var_pd(a, idx, b) simde_mm_permutex2var_pd(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_mask_permutex2var_pd (simde__m128d a, simde__mmask8 k, simde__m128i idx, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_permutex2var_pd(a, k, idx, b); + #else + return simde_mm_mask_mov_pd(a, k, simde_mm_permutex2var_pd(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_permutex2var_pd +#define _mm_mask_permutex2var_pd(a, k, idx, b) simde_mm_mask_permutex2var_pd(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_mask2_permutex2var_pd (simde__m128d a, simde__m128i idx, simde__mmask8 k, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask2_permutex2var_pd(a, idx, k, b); + #else + return simde_mm_mask_mov_pd(simde_mm_castsi128_pd(idx), k, simde_mm_permutex2var_pd(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask2_permutex2var_pd +#define _mm_mask2_permutex2var_pd(a, idx, k, b) simde_mm_mask2_permutex2var_pd(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_maskz_permutex2var_pd (simde__mmask8 k, simde__m128d a, simde__m128i idx, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_permutex2var_pd(k, a, idx, b); + #else + return simde_mm_maskz_mov_pd(k, simde_mm_permutex2var_pd(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_permutex2var_pd +#define _mm_maskz_permutex2var_pd(k, a, idx, b) simde_mm_maskz_permutex2var_pd(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_permutex2var_ps (simde__m128 a, simde__m128i idx, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_permutex2var_ps(a, idx, b); + #else + return simde_mm_castsi128_ps(simde_mm_permutex2var_epi32(simde_mm_castps_si128(a), idx, simde_mm_castps_si128(b))); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_permutex2var_ps + #define _mm_permutex2var_ps(a, idx, b) simde_mm_permutex2var_ps(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_mask_permutex2var_ps (simde__m128 a, simde__mmask8 k, simde__m128i idx, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_permutex2var_ps(a, k, idx, b); + #else + return simde_mm_mask_mov_ps(a, k, simde_mm_permutex2var_ps(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_permutex2var_ps +#define _mm_mask_permutex2var_ps(a, k, idx, b) simde_mm_mask_permutex2var_ps(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_mask2_permutex2var_ps (simde__m128 a, simde__m128i idx, simde__mmask8 k, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask2_permutex2var_ps(a, idx, k, b); + #else + return simde_mm_mask_mov_ps(simde_mm_castsi128_ps(idx), k, simde_mm_permutex2var_ps(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask2_permutex2var_ps +#define _mm_mask2_permutex2var_ps(a, idx, k, b) simde_mm_mask2_permutex2var_ps(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_maskz_permutex2var_ps (simde__mmask8 k, simde__m128 a, simde__m128i idx, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_permutex2var_ps(k, a, idx, b); + #else + return simde_mm_maskz_mov_ps(k, simde_mm_permutex2var_ps(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_permutex2var_ps +#define _mm_maskz_permutex2var_ps(k, a, idx, b) simde_mm_maskz_permutex2var_ps(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_permutex2var_epi16 (simde__m256i a, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutex2var_epi16(a, idx, b); + #elif defined(SIMDE_X86_AVX2_NATIVE) + __m256i hilo, hilo2, hi, lo, idx2, ta, tb, select; + const __m256i ones = _mm256_set1_epi16(1); + + idx2 = _mm256_srli_epi32(idx, 1); + + ta = _mm256_permutevar8x32_epi32(a, idx2); + tb = _mm256_permutevar8x32_epi32(b, idx2); + select = _mm256_slli_epi32(idx2, 28); + hilo = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta), + _mm256_castsi256_ps(tb), + _mm256_castsi256_ps(select))); + idx2 = _mm256_srli_epi32(idx2, 16); + + ta = _mm256_permutevar8x32_epi32(a, idx2); + tb = _mm256_permutevar8x32_epi32(b, idx2); + select = _mm256_slli_epi32(idx2, 28); + hilo2 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta), + _mm256_castsi256_ps(tb), + _mm256_castsi256_ps(select))); + + lo = _mm256_blend_epi16(_mm256_slli_epi32(hilo2, 16), hilo, 0x55); + hi = _mm256_blend_epi16(hilo2, _mm256_srli_epi32(hilo, 16), 0x55); + + select = _mm256_cmpeq_epi16(_mm256_and_si256(idx, ones), ones); + return _mm256_blendv_epi8(lo, hi, select); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + idx_ = simde__m256i_to_private(idx), + b_ = simde__m256i_to_private(b), + r_; + + #if defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) + simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 1, 1); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = ((idx_.i16[i] & 0x10) ? b_ : a_).i16[idx_.i16[i] & 0x0F]; + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutex2var_epi16 + #define _mm256_permutex2var_epi16(a, idx, b) simde_mm256_permutex2var_epi16(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_permutex2var_epi16 (simde__m256i a, simde__mmask16 k, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutex2var_epi16(a, k, idx, b); + #else + return simde_mm256_mask_mov_epi16(a, k, simde_mm256_permutex2var_epi16(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutex2var_epi16 +#define _mm256_mask_permutex2var_epi16(a, k, idx, b) simde_mm256_mask_permutex2var_epi16(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask2_permutex2var_epi16 (simde__m256i a, simde__m256i idx, simde__mmask16 k, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask2_permutex2var_epi16(a, idx, k, b); + #else + return simde_mm256_mask_mov_epi16(idx, k, simde_mm256_permutex2var_epi16(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask2_permutex2var_epi16 +#define _mm256_mask2_permutex2var_epi16(a, idx, k, b) simde_mm256_mask2_permutex2var_epi16(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_permutex2var_epi16 (simde__mmask16 k, simde__m256i a, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutex2var_epi16(k, a, idx, b); + #else + return simde_mm256_maskz_mov_epi16(k, simde_mm256_permutex2var_epi16(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutex2var_epi16 +#define _mm256_maskz_permutex2var_epi16(k, a, idx, b) simde_mm256_maskz_permutex2var_epi16(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_permutex2var_epi32 (simde__m256i a, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutex2var_epi32(a, idx, b); + #elif defined(SIMDE_X86_AVX2_NATIVE) + __m256i ta, tb, select; + ta = _mm256_permutevar8x32_epi32(a, idx); + tb = _mm256_permutevar8x32_epi32(b, idx); + select = _mm256_slli_epi32(idx, 28); + return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta), + _mm256_castsi256_ps(tb), + _mm256_castsi256_ps(select))); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + idx_ = simde__m256i_to_private(idx), + b_ = simde__m256i_to_private(b), + r_; + + #if defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) + simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 2, 1); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = ((idx_.i32[i] & 8) ? b_ : a_).i32[idx_.i32[i] & 7]; + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutex2var_epi32 + #define _mm256_permutex2var_epi32(a, idx, b) simde_mm256_permutex2var_epi32(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_permutex2var_epi32 (simde__m256i a, simde__mmask8 k, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutex2var_epi32(a, k, idx, b); + #else + return simde_mm256_mask_mov_epi32(a, k, simde_mm256_permutex2var_epi32(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutex2var_epi32 +#define _mm256_mask_permutex2var_epi32(a, k, idx, b) simde_mm256_mask_permutex2var_epi32(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask2_permutex2var_epi32 (simde__m256i a, simde__m256i idx, simde__mmask8 k, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask2_permutex2var_epi32(a, idx, k, b); + #else + return simde_mm256_mask_mov_epi32(idx, k, simde_mm256_permutex2var_epi32(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask2_permutex2var_epi32 +#define _mm256_mask2_permutex2var_epi32(a, idx, k, b) simde_mm256_mask2_permutex2var_epi32(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_permutex2var_epi32 (simde__mmask8 k, simde__m256i a, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutex2var_epi32(k, a, idx, b); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_permutex2var_epi32(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutex2var_epi32 +#define _mm256_maskz_permutex2var_epi32(k, a, idx, b) simde_mm256_maskz_permutex2var_epi32(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_permutex2var_epi64 (simde__m256i a, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutex2var_epi64(a, idx, b); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + idx_ = simde__m256i_to_private(idx), + b_ = simde__m256i_to_private(b), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = ((idx_.i64[i] & 4) ? b_ : a_).i64[idx_.i64[i] & 3]; + } + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutex2var_epi64 + #define _mm256_permutex2var_epi64(a, idx, b) simde_mm256_permutex2var_epi64(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_permutex2var_epi64 (simde__m256i a, simde__mmask8 k, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutex2var_epi64(a, k, idx, b); + #else + return simde_mm256_mask_mov_epi64(a, k, simde_mm256_permutex2var_epi64(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutex2var_epi64 +#define _mm256_mask_permutex2var_epi64(a, k, idx, b) simde_mm256_mask_permutex2var_epi64(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask2_permutex2var_epi64 (simde__m256i a, simde__m256i idx, simde__mmask8 k, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask2_permutex2var_epi64(a, idx, k, b); + #else + return simde_mm256_mask_mov_epi64(idx, k, simde_mm256_permutex2var_epi64(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask2_permutex2var_epi64 +#define _mm256_mask2_permutex2var_epi64(a, idx, k, b) simde_mm256_mask2_permutex2var_epi64(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_permutex2var_epi64 (simde__mmask8 k, simde__m256i a, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutex2var_epi64(k, a, idx, b); + #else + return simde_mm256_maskz_mov_epi64(k, simde_mm256_permutex2var_epi64(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutex2var_epi64 +#define _mm256_maskz_permutex2var_epi64(k, a, idx, b) simde_mm256_maskz_permutex2var_epi64(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_permutex2var_epi8 (simde__m256i a, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutex2var_epi8(a, idx, b); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_cvtepi16_epi8(_mm512_permutex2var_epi16(_mm512_cvtepu8_epi16(a), _mm512_cvtepu8_epi16(idx), _mm512_cvtepu8_epi16(b))); + #elif defined(SIMDE_X86_AVX2_NATIVE) + __m256i t0, t1, index, select0x10, select0x20, a01, b01; + const __m256i mask = _mm256_set1_epi8(0x3F); + const __m256i a0 = _mm256_permute4x64_epi64(a, (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0)); + const __m256i a1 = _mm256_permute4x64_epi64(a, (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0)); + const __m256i b0 = _mm256_permute4x64_epi64(b, (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0)); + const __m256i b1 = _mm256_permute4x64_epi64(b, (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0)); + + index = _mm256_and_si256(idx, mask); + t0 = _mm256_shuffle_epi8(a0, index); + t1 = _mm256_shuffle_epi8(a1, index); + select0x10 = _mm256_slli_epi64(index, 3); + a01 = _mm256_blendv_epi8(t0, t1, select0x10); + t0 = _mm256_shuffle_epi8(b0, index); + t1 = _mm256_shuffle_epi8(b1, index); + b01 = _mm256_blendv_epi8(t0, t1, select0x10); + select0x20 = _mm256_slli_epi64(index, 2); + return _mm256_blendv_epi8(a01, b01, select0x20); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + idx_ = simde__m256i_to_private(idx), + b_ = simde__m256i_to_private(b), + r_; + + #if defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) + simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 0, 1); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = ((idx_.i8[i] & 0x20) ? b_ : a_).i8[idx_.i8[i] & 0x1F]; + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutex2var_epi8 + #define _mm256_permutex2var_epi8(a, idx, b) simde_mm256_permutex2var_epi8(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_permutex2var_epi8 (simde__m256i a, simde__mmask32 k, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutex2var_epi8(a, k, idx, b); + #else + return simde_mm256_mask_mov_epi8(a, k, simde_mm256_permutex2var_epi8(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutex2var_epi8 +#define _mm256_mask_permutex2var_epi8(a, k, idx, b) simde_mm256_mask_permutex2var_epi8(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask2_permutex2var_epi8 (simde__m256i a, simde__m256i idx, simde__mmask32 k, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask2_permutex2var_epi8(a, idx, k, b); + #else + return simde_mm256_mask_mov_epi8(idx, k, simde_mm256_permutex2var_epi8(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask2_permutex2var_epi8 +#define _mm256_mask2_permutex2var_epi8(a, idx, k, b) simde_mm256_mask2_permutex2var_epi8(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_permutex2var_epi8 (simde__mmask32 k, simde__m256i a, simde__m256i idx, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutex2var_epi8(k, a, idx, b); + #else + return simde_mm256_maskz_mov_epi8(k, simde_mm256_permutex2var_epi8(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutex2var_epi8 +#define _mm256_maskz_permutex2var_epi8(k, a, idx, b) simde_mm256_maskz_permutex2var_epi8(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_permutex2var_pd (simde__m256d a, simde__m256i idx, simde__m256d b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutex2var_pd(a, idx, b); + #else + return simde_mm256_castsi256_pd(simde_mm256_permutex2var_epi64(simde_mm256_castpd_si256(a), idx, simde_mm256_castpd_si256(b))); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutex2var_pd + #define _mm256_permutex2var_pd(a, idx, b) simde_mm256_permutex2var_pd(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_mask_permutex2var_pd (simde__m256d a, simde__mmask8 k, simde__m256i idx, simde__m256d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutex2var_pd(a, k, idx, b); + #else + return simde_mm256_mask_mov_pd(a, k, simde_mm256_permutex2var_pd(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutex2var_pd +#define _mm256_mask_permutex2var_pd(a, k, idx, b) simde_mm256_mask_permutex2var_pd(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_mask2_permutex2var_pd (simde__m256d a, simde__m256i idx, simde__mmask8 k, simde__m256d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask2_permutex2var_pd(a, idx, k, b); + #else + return simde_mm256_mask_mov_pd(simde_mm256_castsi256_pd(idx), k, simde_mm256_permutex2var_pd(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask2_permutex2var_pd +#define _mm256_mask2_permutex2var_pd(a, idx, k, b) simde_mm256_mask2_permutex2var_pd(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_maskz_permutex2var_pd (simde__mmask8 k, simde__m256d a, simde__m256i idx, simde__m256d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutex2var_pd(k, a, idx, b); + #else + return simde_mm256_maskz_mov_pd(k, simde_mm256_permutex2var_pd(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutex2var_pd +#define _mm256_maskz_permutex2var_pd(k, a, idx, b) simde_mm256_maskz_permutex2var_pd(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_permutex2var_ps (simde__m256 a, simde__m256i idx, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutex2var_ps(a, idx, b); + #else + return simde_mm256_castsi256_ps(simde_mm256_permutex2var_epi32(simde_mm256_castps_si256(a), idx, simde_mm256_castps_si256(b))); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutex2var_ps + #define _mm256_permutex2var_ps(a, idx, b) simde_mm256_permutex2var_ps(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask_permutex2var_ps (simde__m256 a, simde__mmask8 k, simde__m256i idx, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutex2var_ps(a, k, idx, b); + #else + return simde_mm256_mask_mov_ps(a, k, simde_mm256_permutex2var_ps(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutex2var_ps +#define _mm256_mask_permutex2var_ps(a, k, idx, b) simde_mm256_mask_permutex2var_ps(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask2_permutex2var_ps (simde__m256 a, simde__m256i idx, simde__mmask8 k, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask2_permutex2var_ps(a, idx, k, b); + #else + return simde_mm256_mask_mov_ps(simde_mm256_castsi256_ps(idx), k, simde_mm256_permutex2var_ps(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask2_permutex2var_ps +#define _mm256_mask2_permutex2var_ps(a, idx, k, b) simde_mm256_mask2_permutex2var_ps(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_maskz_permutex2var_ps (simde__mmask8 k, simde__m256 a, simde__m256i idx, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutex2var_ps(k, a, idx, b); + #else + return simde_mm256_maskz_mov_ps(k, simde_mm256_permutex2var_ps(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutex2var_ps +#define _mm256_maskz_permutex2var_ps(k, a, idx, b) simde_mm256_maskz_permutex2var_ps(k, a, idx, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i -simde_mm512_permutex2var_epi32 (simde__m512i a, simde__m512i idx, simde__m512i b) { +simde_mm512_permutex2var_epi16 (simde__m512i a, simde__m512i idx, simde__m512i b) { #if defined(SIMDE_X86_AVX512BW_NATIVE) - return _mm512_permutex2var_epi32(a, idx, b); + return _mm512_permutex2var_epi16(a, idx, b); #else simde__m512i_private a_ = simde__m512i_to_private(a), @@ -47,15 +1135,160 @@ simde_mm512_permutex2var_epi32 (simde__m512i a, simde__m512i idx, simde__m512i b b_ = simde__m512i_to_private(b), r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = ((idx_.i32[i] & 0x10) ? b_ : a_).i32[idx_.i32[i] & 0x0F]; - } + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i hilo, hilo1, hilo2, hi, lo, idx1, idx2, ta, tb, select; + const __m256i ones = _mm256_set1_epi16(1); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m256i_private) / sizeof(r_.m256i_private[0])) ; i++) { + idx1 = idx_.m256i[i]; + idx2 = _mm256_srli_epi32(idx1, 1); + + select = _mm256_slli_epi32(idx2, 27); + ta = _mm256_permutevar8x32_epi32(a_.m256i[0], idx2); + tb = _mm256_permutevar8x32_epi32(b_.m256i[0], idx2); + hilo = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta), + _mm256_castsi256_ps(tb), + _mm256_castsi256_ps(select))); + ta = _mm256_permutevar8x32_epi32(a_.m256i[1], idx2); + tb = _mm256_permutevar8x32_epi32(b_.m256i[1], idx2); + hilo1 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta), + _mm256_castsi256_ps(tb), + _mm256_castsi256_ps(select))); + select = _mm256_add_epi32(select, select); + hilo1 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(hilo), + _mm256_castsi256_ps(hilo1), + _mm256_castsi256_ps(select))); + + idx2 = _mm256_srli_epi32(idx2, 16); + + select = _mm256_slli_epi32(idx2, 27); + ta = _mm256_permutevar8x32_epi32(a_.m256i[0], idx2); + tb = _mm256_permutevar8x32_epi32(b_.m256i[0], idx2); + hilo = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta), + _mm256_castsi256_ps(tb), + _mm256_castsi256_ps(select))); + ta = _mm256_permutevar8x32_epi32(a_.m256i[1], idx2); + tb = _mm256_permutevar8x32_epi32(b_.m256i[1], idx2); + hilo2 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta), + _mm256_castsi256_ps(tb), + _mm256_castsi256_ps(select))); + select = _mm256_add_epi32(select, select); + hilo2 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(hilo), + _mm256_castsi256_ps(hilo2), + _mm256_castsi256_ps(select))); + + lo = _mm256_blend_epi16(_mm256_slli_epi32(hilo2, 16), hilo1, 0x55); + hi = _mm256_blend_epi16(hilo2, _mm256_srli_epi32(hilo1, 16), 0x55); + + select = _mm256_cmpeq_epi16(_mm256_and_si256(idx1, ones), ones); + r_.m256i[i] = _mm256_blendv_epi8(lo, hi, select); + } + #elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) + simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 1, 2); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = ((idx_.i16[i] & 0x20) ? b_ : a_).i16[idx_.i16[i] & 0x1F]; + } + #endif return simde__m512i_from_private(r_); #endif } +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_permutex2var_epi16 + #define _mm512_permutex2var_epi16(a, idx, b) simde_mm512_permutex2var_epi16(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_permutex2var_epi16 (simde__m512i a, simde__mmask32 k, simde__m512i idx, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_permutex2var_epi16(a, k, idx, b); + #else + return simde_mm512_mask_mov_epi16(a, k, simde_mm512_permutex2var_epi16(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_permutex2var_epi16 +#define _mm512_mask_permutex2var_epi16(a, k, idx, b) simde_mm512_mask_permutex2var_epi16(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask2_permutex2var_epi16 (simde__m512i a, simde__m512i idx, simde__mmask32 k, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask2_permutex2var_epi16(a, idx, k, b); + #else + return simde_mm512_mask_mov_epi16(idx, k, simde_mm512_permutex2var_epi16(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask2_permutex2var_epi16 +#define _mm512_mask2_permutex2var_epi16(a, idx, k, b) simde_mm512_mask2_permutex2var_epi16(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_permutex2var_epi16 (simde__mmask32 k, simde__m512i a, simde__m512i idx, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maskz_permutex2var_epi16(k, a, idx, b); + #else + return simde_mm512_maskz_mov_epi16(k, simde_mm512_permutex2var_epi16(a, idx, b)); + #endif +} #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_permutex2var_epi16 +#define _mm512_maskz_permutex2var_epi16(k, a, idx, b) simde_mm512_maskz_permutex2var_epi16(k, a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_permutex2var_epi32 (simde__m512i a, simde__m512i idx, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_permutex2var_epi32(a, idx, b); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + idx_ = simde__m512i_to_private(idx), + b_ = simde__m512i_to_private(b), + r_; + + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i index, t0, t1, a01, b01, select; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m256i_private) / sizeof(r_.m256i_private[0])) ; i++) { + index = idx_.m256i[i]; + t0 = _mm256_permutevar8x32_epi32(a_.m256i[0], index); + t1 = _mm256_permutevar8x32_epi32(a_.m256i[1], index); + select = _mm256_slli_epi32(index, 28); + a01 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(t0), + _mm256_castsi256_ps(t1), + _mm256_castsi256_ps(select))); + t0 = _mm256_permutevar8x32_epi32(b_.m256i[0], index); + t1 = _mm256_permutevar8x32_epi32(b_.m256i[1], index); + b01 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(t0), + _mm256_castsi256_ps(t1), + _mm256_castsi256_ps(select))); + select = _mm256_slli_epi32(index, 27); + r_.m256i[i] = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(a01), + _mm256_castsi256_ps(b01), + _mm256_castsi256_ps(select))); + } + #elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) + simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 2, 2); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = ((idx_.i32[i] & 0x10) ? b_ : a_).i32[idx_.i32[i] & 0x0F]; + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_permutex2var_epi32 #define _mm512_permutex2var_epi32(a, idx, b) simde_mm512_permutex2var_epi32(a, idx, b) #endif @@ -63,13 +1296,13 @@ simde_mm512_permutex2var_epi32 (simde__m512i a, simde__m512i idx, simde__m512i b SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_mask_permutex2var_epi32 (simde__m512i a, simde__mmask16 k, simde__m512i idx, simde__m512i b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask_permutex2var_epi32(a, k, idx, b); #else return simde_mm512_mask_mov_epi32(a, k, simde_mm512_permutex2var_epi32(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_permutex2var_epi32 #define _mm512_mask_permutex2var_epi32(a, k, idx, b) simde_mm512_mask_permutex2var_epi32(a, k, idx, b) #endif @@ -77,13 +1310,13 @@ simde_mm512_mask_permutex2var_epi32 (simde__m512i a, simde__mmask16 k, simde__m5 SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_mask2_permutex2var_epi32 (simde__m512i a, simde__m512i idx, simde__mmask16 k, simde__m512i b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask2_permutex2var_epi32(a, idx, k, b); #else return simde_mm512_mask_mov_epi32(idx, k, simde_mm512_permutex2var_epi32(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask2_permutex2var_epi32 #define _mm512_mask2_permutex2var_epi32(a, idx, k, b) simde_mm512_mask2_permutex2var_epi32(a, idx, k, b) #endif @@ -91,13 +1324,13 @@ simde_mm512_mask2_permutex2var_epi32 (simde__m512i a, simde__m512i idx, simde__m SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_maskz_permutex2var_epi32 (simde__mmask16 k, simde__m512i a, simde__m512i idx, simde__m512i b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_maskz_permutex2var_epi32(k, a, idx, b); #else return simde_mm512_maskz_mov_epi32(k, simde_mm512_permutex2var_epi32(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_permutex2var_epi32 #define _mm512_maskz_permutex2var_epi32(k, a, idx, b) simde_mm512_maskz_permutex2var_epi32(k, a, idx, b) #endif @@ -105,7 +1338,7 @@ simde_mm512_maskz_permutex2var_epi32 (simde__mmask16 k, simde__m512i a, simde__m SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_permutex2var_epi64 (simde__m512i a, simde__m512i idx, simde__m512i b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_permutex2var_epi64(a, idx, b); #else simde__m512i_private @@ -116,13 +1349,13 @@ simde_mm512_permutex2var_epi64 (simde__m512i a, simde__m512i idx, simde__m512i b SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { - r_.i64[i] = ((idx_.i64[i] & 0x08) ? b_ : a_).i64[idx_.i64[i] & 0x07]; + r_.i64[i] = ((idx_.i64[i] & 8) ? b_ : a_).i64[idx_.i64[i] & 7]; } return simde__m512i_from_private(r_); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_permutex2var_epi64 #define _mm512_permutex2var_epi64(a, idx, b) simde_mm512_permutex2var_epi64(a, idx, b) #endif @@ -130,13 +1363,13 @@ simde_mm512_permutex2var_epi64 (simde__m512i a, simde__m512i idx, simde__m512i b SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_mask_permutex2var_epi64 (simde__m512i a, simde__mmask8 k, simde__m512i idx, simde__m512i b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask_permutex2var_epi64(a, k, idx, b); #else return simde_mm512_mask_mov_epi64(a, k, simde_mm512_permutex2var_epi64(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_permutex2var_epi64 #define _mm512_mask_permutex2var_epi64(a, k, idx, b) simde_mm512_mask_permutex2var_epi64(a, k, idx, b) #endif @@ -144,13 +1377,13 @@ simde_mm512_mask_permutex2var_epi64 (simde__m512i a, simde__mmask8 k, simde__m51 SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_mask2_permutex2var_epi64 (simde__m512i a, simde__m512i idx, simde__mmask8 k, simde__m512i b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask2_permutex2var_epi64(a, idx, k, b); #else return simde_mm512_mask_mov_epi64(idx, k, simde_mm512_permutex2var_epi64(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask2_permutex2var_epi64 #define _mm512_mask2_permutex2var_epi64(a, idx, k, b) simde_mm512_mask2_permutex2var_epi64(a, idx, k, b) #endif @@ -158,35 +1391,149 @@ simde_mm512_mask2_permutex2var_epi64 (simde__m512i a, simde__m512i idx, simde__m SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_maskz_permutex2var_epi64 (simde__mmask8 k, simde__m512i a, simde__m512i idx, simde__m512i b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_maskz_permutex2var_epi64(k, a, idx, b); #else return simde_mm512_maskz_mov_epi64(k, simde_mm512_permutex2var_epi64(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_permutex2var_epi64 #define _mm512_maskz_permutex2var_epi64(k, a, idx, b) simde_mm512_maskz_permutex2var_epi64(k, a, idx, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_permutex2var_epi8 (simde__m512i a, simde__m512i idx, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) + return _mm512_permutex2var_epi8(a, idx, b); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + __m512i hilo, hi, lo, hi2, lo2, idx2; + const __m512i ones = _mm512_set1_epi8(1); + const __m512i low_bytes = _mm512_set1_epi16(0x00FF); + + idx2 = _mm512_srli_epi16(idx, 1); + hilo = _mm512_permutex2var_epi16(a, idx2, b); + __mmask64 mask = _mm512_test_epi8_mask(idx, ones); + lo = _mm512_and_si512(hilo, low_bytes); + hi = _mm512_srli_epi16(hilo, 8); + + idx2 = _mm512_srli_epi16(idx, 9); + hilo = _mm512_permutex2var_epi16(a, idx2, b); + lo2 = _mm512_slli_epi16(hilo, 8); + hi2 = _mm512_andnot_si512(low_bytes, hilo); + + lo = _mm512_or_si512(lo, lo2); + hi = _mm512_or_si512(hi, hi2); + + return _mm512_mask_blend_epi8(mask, lo, hi); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + idx_ = simde__m512i_to_private(idx), + b_ = simde__m512i_to_private(b), + r_; + + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i t0, t1, index, select0x10, select0x20, select0x40, t01, t23, a0123, b0123; + const __m256i mask = _mm256_set1_epi8(0x7F); + const __m256i a0 = _mm256_permute4x64_epi64(a_.m256i[0], (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0)); + const __m256i a1 = _mm256_permute4x64_epi64(a_.m256i[0], (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0)); + const __m256i a2 = _mm256_permute4x64_epi64(a_.m256i[1], (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0)); + const __m256i a3 = _mm256_permute4x64_epi64(a_.m256i[1], (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0)); + const __m256i b0 = _mm256_permute4x64_epi64(b_.m256i[0], (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0)); + const __m256i b1 = _mm256_permute4x64_epi64(b_.m256i[0], (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0)); + const __m256i b2 = _mm256_permute4x64_epi64(b_.m256i[1], (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0)); + const __m256i b3 = _mm256_permute4x64_epi64(b_.m256i[1], (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0)); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m256i_private) / sizeof(r_.m256i_private[0])) ; i++) { + index = _mm256_and_si256(idx_.m256i[i], mask); + t0 = _mm256_shuffle_epi8(a0, index); + t1 = _mm256_shuffle_epi8(a1, index); + select0x10 = _mm256_slli_epi64(index, 3); + t01 = _mm256_blendv_epi8(t0, t1, select0x10); + t0 = _mm256_shuffle_epi8(a2, index); + t1 = _mm256_shuffle_epi8(a3, index); + t23 = _mm256_blendv_epi8(t0, t1, select0x10); + select0x20 = _mm256_slli_epi64(index, 2); + a0123 = _mm256_blendv_epi8(t01, t23, select0x20); + t0 = _mm256_shuffle_epi8(b0, index); + t1 = _mm256_shuffle_epi8(b1, index); + t01 = _mm256_blendv_epi8(t0, t1, select0x10); + t0 = _mm256_shuffle_epi8(b2, index); + t1 = _mm256_shuffle_epi8(b3, index); + t23 = _mm256_blendv_epi8(t0, t1, select0x10); + b0123 = _mm256_blendv_epi8(t01, t23, select0x20); + select0x40 = _mm256_slli_epi64(index, 1); + r_.m256i[i] = _mm256_blendv_epi8(a0123, b0123, select0x40); + } + #elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) + simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 0, 2); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = ((idx_.i8[i] & 0x40) ? b_ : a_).i8[idx_.i8[i] & 0x3F]; + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) + #undef _mm512_permutex2var_epi8 + #define _mm512_permutex2var_epi8(a, idx, b) simde_mm512_permutex2var_epi8(a, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_permutex2var_epi8 (simde__m512i a, simde__mmask64 k, simde__m512i idx, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) + return _mm512_mask_permutex2var_epi8(a, k, idx, b); + #else + return simde_mm512_mask_mov_epi8(a, k, simde_mm512_permutex2var_epi8(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_permutex2var_epi8 +#define _mm512_mask_permutex2var_epi8(a, k, idx, b) simde_mm512_mask_permutex2var_epi8(a, k, idx, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask2_permutex2var_epi8 (simde__m512i a, simde__m512i idx, simde__mmask64 k, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) + return _mm512_mask2_permutex2var_epi8(a, idx, k, b); + #else + return simde_mm512_mask_mov_epi8(idx, k, simde_mm512_permutex2var_epi8(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask2_permutex2var_epi8 +#define _mm512_mask2_permutex2var_epi8(a, idx, k, b) simde_mm512_mask2_permutex2var_epi8(a, idx, k, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_permutex2var_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i idx, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) + return _mm512_maskz_permutex2var_epi8(k, a, idx, b); + #else + return simde_mm512_maskz_mov_epi8(k, simde_mm512_permutex2var_epi8(a, idx, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_permutex2var_epi8 +#define _mm512_maskz_permutex2var_epi8(k, a, idx, b) simde_mm512_maskz_permutex2var_epi8(k, a, idx, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_permutex2var_pd (simde__m512d a, simde__m512i idx, simde__m512d b) { #if defined(SIMDE_X86_AVX512BW_NATIVE) return _mm512_permutex2var_pd(a, idx, b); #else - simde__m512i_private idx_ = simde__m512i_to_private(idx); - simde__m512d_private - a_ = simde__m512d_to_private(a), - b_ = simde__m512d_to_private(b), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.f64[i] = ((idx_.i64[i] & 0x08) ? b_ : a_).f64[idx_.i64[i] & 0x07]; - } - - return simde__m512d_from_private(r_); + return simde_mm512_castsi512_pd(simde_mm512_permutex2var_epi64(simde_mm512_castpd_si512(a), idx, simde_mm512_castpd_si512(b))); #endif } #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) @@ -197,13 +1544,13 @@ simde_mm512_permutex2var_pd (simde__m512d a, simde__m512i idx, simde__m512d b) { SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_mask_permutex2var_pd (simde__m512d a, simde__mmask8 k, simde__m512i idx, simde__m512d b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask_permutex2var_pd(a, k, idx, b); #else return simde_mm512_mask_mov_pd(a, k, simde_mm512_permutex2var_pd(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_permutex2var_pd #define _mm512_mask_permutex2var_pd(a, k, idx, b) simde_mm512_mask_permutex2var_pd(a, k, idx, b) #endif @@ -211,13 +1558,13 @@ simde_mm512_mask_permutex2var_pd (simde__m512d a, simde__mmask8 k, simde__m512i SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_mask2_permutex2var_pd (simde__m512d a, simde__m512i idx, simde__mmask8 k, simde__m512d b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask2_permutex2var_pd(a, idx, k, b); #else return simde_mm512_mask_mov_pd(simde_mm512_castsi512_pd(idx), k, simde_mm512_permutex2var_pd(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask2_permutex2var_pd #define _mm512_mask2_permutex2var_pd(a, idx, k, b) simde_mm512_mask2_permutex2var_pd(a, idx, k, b) #endif @@ -225,13 +1572,13 @@ simde_mm512_mask2_permutex2var_pd (simde__m512d a, simde__m512i idx, simde__mmas SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_maskz_permutex2var_pd (simde__mmask8 k, simde__m512d a, simde__m512i idx, simde__m512d b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_maskz_permutex2var_pd(k, a, idx, b); #else return simde_mm512_maskz_mov_pd(k, simde_mm512_permutex2var_pd(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_permutex2var_pd #define _mm512_maskz_permutex2var_pd(k, a, idx, b) simde_mm512_maskz_permutex2var_pd(k, a, idx, b) #endif @@ -239,24 +1586,13 @@ simde_mm512_maskz_permutex2var_pd (simde__mmask8 k, simde__m512d a, simde__m512i SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_permutex2var_ps (simde__m512 a, simde__m512i idx, simde__m512 b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_permutex2var_ps(a, idx, b); #else - simde__m512i_private idx_ = simde__m512i_to_private(idx); - simde__m512_private - a_ = simde__m512_to_private(a), - b_ = simde__m512_to_private(b), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = ((idx_.i32[i] & 0x10) ? b_ : a_).f32[idx_.i32[i] & 0x0F]; - } - - return simde__m512_from_private(r_); + return simde_mm512_castsi512_ps(simde_mm512_permutex2var_epi32(simde_mm512_castps_si512(a), idx, simde_mm512_castps_si512(b))); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_permutex2var_ps #define _mm512_permutex2var_ps(a, idx, b) simde_mm512_permutex2var_ps(a, idx, b) #endif @@ -264,13 +1600,13 @@ simde_mm512_permutex2var_ps (simde__m512 a, simde__m512i idx, simde__m512 b) { SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_mask_permutex2var_ps (simde__m512 a, simde__mmask16 k, simde__m512i idx, simde__m512 b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask_permutex2var_ps(a, k, idx, b); #else return simde_mm512_mask_mov_ps(a, k, simde_mm512_permutex2var_ps(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_permutex2var_ps #define _mm512_mask_permutex2var_ps(a, k, idx, b) simde_mm512_mask_permutex2var_ps(a, k, idx, b) #endif @@ -278,13 +1614,13 @@ simde_mm512_mask_permutex2var_ps (simde__m512 a, simde__mmask16 k, simde__m512i SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_mask2_permutex2var_ps (simde__m512 a, simde__m512i idx, simde__mmask16 k, simde__m512 b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask2_permutex2var_ps(a, idx, k, b); #else return simde_mm512_mask_mov_ps(simde_mm512_castsi512_ps(idx), k, simde_mm512_permutex2var_ps(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask2_permutex2var_ps #define _mm512_mask2_permutex2var_ps(a, idx, k, b) simde_mm512_mask2_permutex2var_ps(a, idx, k, b) #endif @@ -292,13 +1628,13 @@ simde_mm512_mask2_permutex2var_ps (simde__m512 a, simde__m512i idx, simde__mmask SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_maskz_permutex2var_ps (simde__mmask16 k, simde__m512 a, simde__m512i idx, simde__m512 b) { - #if defined(SIMDE_X86_AVX512BW_NATIVE) + #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_maskz_permutex2var_ps(k, a, idx, b); #else return simde_mm512_maskz_mov_ps(k, simde_mm512_permutex2var_ps(a, idx, b)); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_permutex2var_ps #define _mm512_maskz_permutex2var_ps(k, a, idx, b) simde_mm512_maskz_permutex2var_ps(k, a, idx, b) #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/permutexvar.h b/lib/mmseqs/lib/simde/simde/x86/avx512/permutexvar.h index fcf91b9..2615d7f 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/permutexvar.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/permutexvar.h @@ -29,12 +29,729 @@ #define SIMDE_X86_AVX512_PERMUTEXVAR_H #include "types.h" +#include "and.h" +#include "andnot.h" +#include "blend.h" #include "mov.h" +#include "or.h" +#include "set1.h" +#include "slli.h" +#include "srli.h" +#include "test.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_permutexvar_epi16 (simde__m128i idx, simde__m128i a) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_permutexvar_epi16(idx, a); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + simde__m128i mask16 = simde_mm_set1_epi16(0x0007); + simde__m128i shift16 = simde_mm_set1_epi16(0x0202); + simde__m128i byte_index16 = simde_mm_set1_epi16(0x0100); + simde__m128i index16 = simde_mm_and_si128(idx, mask16); + index16 = simde_mm_mullo_epi16(index16, shift16); + index16 = simde_mm_add_epi16(index16, byte_index16); + return simde_mm_shuffle_epi8(a, index16); + #else + simde__m128i_private + idx_ = simde__m128i_to_private(idx), + a_ = simde__m128i_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint16x8_t mask16 = vdupq_n_u16(0x0007); + uint16x8_t byte_index16 = vdupq_n_u16(0x0100); + uint16x8_t index16 = vandq_u16(idx_.neon_u16, mask16); + index16 = vmulq_n_u16(index16, 0x0202); + index16 = vaddq_u16(index16, byte_index16); + r_.neon_u8 = vqtbl1q_u8(a_.neon_u8, vreinterpretq_u8_u16(index16)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) index16; + index16 = vec_and(idx_.altivec_u16, vec_splat_u16(7)); + index16 = vec_mladd(index16, vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0202)), vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0100))); + r_.altivec_u8 = vec_perm(a_.altivec_u8, a_.altivec_u8, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), index16)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + const v128_t mask16 = wasm_i16x8_splat(0x0007); + const v128_t shift16 = wasm_i16x8_splat(0x0202); + const v128_t byte_index16 = wasm_i16x8_splat(0x0100); + v128_t index16 = wasm_v128_and(idx_.wasm_v128, mask16); + index16 = wasm_i16x8_mul(index16, shift16); + index16 = wasm_i16x8_add(index16, byte_index16); + r_.wasm_v128 = wasm_v8x16_swizzle(a_.wasm_v128, index16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = a_.i16[idx_.i16[i] & 0x07]; + } + #endif + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_permutexvar_epi16 + #define _mm_permutexvar_epi16(idx, a) simde_mm_permutexvar_epi16(idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_permutexvar_epi16 (simde__m128i src, simde__mmask8 k, simde__m128i idx, simde__m128i a) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_permutexvar_epi16(src, k, idx, a); + #else + return simde_mm_mask_mov_epi16(src, k, simde_mm_permutexvar_epi16(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_permutexvar_epi16 + #define _mm_mask_permutexvar_epi16(src, k, idx, a) simde_mm_mask_permutexvar_epi16(src, k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_permutexvar_epi16 (simde__mmask8 k, simde__m128i idx, simde__m128i a) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_permutexvar_epi16(k, idx, a); + #else + return simde_mm_maskz_mov_epi16(k, simde_mm_permutexvar_epi16(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_permutexvar_epi16 + #define _mm_maskz_permutexvar_epi16(k, idx, a) simde_mm_maskz_permutexvar_epi16(k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_permutexvar_epi8 (simde__m128i idx, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_permutexvar_epi8(idx, a); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + simde__m128i mask = simde_mm_set1_epi8(0x0F); + simde__m128i index = simde_mm_and_si128(idx, mask); + return simde_mm_shuffle_epi8(a, index); + #else + simde__m128i_private + idx_ = simde__m128i_to_private(idx), + a_ = simde__m128i_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint8x16_t mask = vdupq_n_u8(0x0F); + uint8x16_t index = vandq_u8(idx_.neon_u8, mask); + r_.neon_u8 = vqtbl1q_u8(a_.neon_u8, index); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u8 = vec_perm(a_.altivec_u8, a_.altivec_u8, idx_.altivec_u8); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + const v128_t mask = wasm_i8x16_splat(0x0F); + v128_t index = wasm_v128_and(idx_.wasm_v128, mask); + r_.wasm_v128 = wasm_v8x16_swizzle(a_.wasm_v128, index); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = a_.i8[idx_.i8[i] & 0x0F]; + } + #endif + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_permutexvar_epi8 + #define _mm_permutexvar_epi8(idx, a) simde_mm_permutexvar_epi8(idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_permutexvar_epi8 (simde__m128i src, simde__mmask16 k, simde__m128i idx, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_permutexvar_epi8(src, k, idx, a); + #else + return simde_mm_mask_mov_epi8(src, k, simde_mm_permutexvar_epi8(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_permutexvar_epi8 + #define _mm_mask_permutexvar_epi8(src, k, idx, a) simde_mm_mask_permutexvar_epi8(src, k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_permutexvar_epi8 (simde__mmask16 k, simde__m128i idx, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_permutexvar_epi8(k, idx, a); + #else + return simde_mm_maskz_mov_epi8(k, simde_mm_permutexvar_epi8(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_permutexvar_epi8 + #define _mm_maskz_permutexvar_epi8(k, idx, a) simde_mm_maskz_permutexvar_epi8(k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_permutexvar_epi16 (simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutexvar_epi16(idx, a); + #elif defined(SIMDE_X86_AVX2_NATIVE) + simde__m256i mask16 = simde_mm256_set1_epi16(0x001F); + simde__m256i shift16 = simde_mm256_set1_epi16(0x0202); + simde__m256i byte_index16 = simde_mm256_set1_epi16(0x0100); + simde__m256i index16 = simde_mm256_and_si256(idx, mask16); + index16 = simde_mm256_mullo_epi16(index16, shift16); + simde__m256i lo = simde_mm256_permute4x64_epi64(a, (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0)); + simde__m256i hi = simde_mm256_permute4x64_epi64(a, (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0)); + simde__m256i select = simde_mm256_slli_epi64(index16, 3); + index16 = simde_mm256_add_epi16(index16, byte_index16); + lo = simde_mm256_shuffle_epi8(lo, index16); + hi = simde_mm256_shuffle_epi8(hi, index16); + return simde_mm256_blendv_epi8(lo, hi, select); + #else + simde__m256i_private + idx_ = simde__m256i_to_private(idx), + a_ = simde__m256i_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint8x16x2_t table = { { a_.m128i_private[0].neon_u8, + a_.m128i_private[1].neon_u8 } }; + uint16x8_t mask16 = vdupq_n_u16(0x000F); + uint16x8_t byte_index16 = vdupq_n_u16(0x0100); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + uint16x8_t index16 = vandq_u16(idx_.m128i_private[i].neon_u16, mask16); + index16 = vmulq_n_u16(index16, 0x0202); + index16 = vaddq_u16(index16, byte_index16); + r_.m128i_private[i].neon_u8 = vqtbl2q_u8(table, vreinterpretq_u8_u16(index16)); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) index16, mask16, shift16, byte_index16; + mask16 = vec_splat_u16(0x000F); + shift16 = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0202)); + byte_index16 = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0100)); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + index16 = vec_and(idx_.m128i_private[i].altivec_u16, mask16); + index16 = vec_mladd(index16, shift16, byte_index16); + r_.m128i_private[i].altivec_u8 = vec_perm(a_.m128i_private[0].altivec_u8, + a_.m128i_private[1].altivec_u8, + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), index16)); + } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t index, index16, r, t; + const v128_t mask16 = wasm_i16x8_splat(0x000F); + const v128_t shift16 = wasm_i16x8_splat(0x0202); + const v128_t byte_index16 = wasm_i16x8_splat(0x0100); + const v128_t sixteen = wasm_i8x16_splat(16); + const v128_t a0 = a_.m128i_private[0].wasm_v128; + const v128_t a1 = a_.m128i_private[1].wasm_v128; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + index16 = wasm_v128_and(idx_.m128i_private[i].wasm_v128, mask16); + index16 = wasm_i16x8_mul(index16, shift16); + index = wasm_i16x8_add(index16, byte_index16); + r = wasm_v8x16_swizzle(a0, index); + + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a1, index); + r_.m128i_private[i].wasm_v128 = wasm_v128_or(r, t); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = a_.i16[idx_.i16[i] & 0x0F]; + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutexvar_epi16 + #define _mm256_permutexvar_epi16(idx, a) simde_mm256_permutexvar_epi16(idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_permutexvar_epi16 (simde__m256i src, simde__mmask16 k, simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutexvar_epi16(src, k, idx, a); + #else + return simde_mm256_mask_mov_epi16(src, k, simde_mm256_permutexvar_epi16(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutexvar_epi16 + #define _mm256_mask_permutexvar_epi16(src, k, idx, a) simde_mm256_mask_permutexvar_epi16(src, k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_permutexvar_epi16 (simde__mmask16 k, simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutexvar_epi16(k, idx, a); + #else + return simde_mm256_maskz_mov_epi16(k, simde_mm256_permutexvar_epi16(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutexvar_epi16 + #define _mm256_maskz_permutexvar_epi16(k, idx, a) simde_mm256_maskz_permutexvar_epi16(k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_permutexvar_epi32 (simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutexvar_epi32(idx, a); + #elif defined(SIMDE_X86_AVX2_NATIVE) + return simde_mm256_permutevar8x32_epi32(a, idx); + #else + simde__m256i_private + idx_ = simde__m256i_to_private(idx), + a_ = simde__m256i_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint8x16x2_t table = { { a_.m128i_private[0].neon_u8, + a_.m128i_private[1].neon_u8 } }; + uint32x4_t mask32 = vdupq_n_u32(0x00000007); + uint32x4_t byte_index32 = vdupq_n_u32(0x03020100); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + uint32x4_t index32 = vandq_u32(idx_.m128i_private[i].neon_u32, mask32); + index32 = vmulq_n_u32(index32, 0x04040404); + index32 = vaddq_u32(index32, byte_index32); + r_.m128i_private[i].neon_u8 = vqtbl2q_u8(table, vreinterpretq_u8_u32(index32)); + } + #else + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = a_.i32[idx_.i32[i] & 0x07]; + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutexvar_epi32 + #define _mm256_permutexvar_epi32(idx, a) simde_mm256_permutexvar_epi32(idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_permutexvar_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutexvar_epi32(src, k, idx, a); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_permutexvar_epi32(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutexvar_epi32 + #define _mm256_mask_permutexvar_epi32(src, k, idx, a) simde_mm256_mask_permutexvar_epi32(src, k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_permutexvar_epi32 (simde__mmask8 k, simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutexvar_epi32(k, idx, a); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_permutexvar_epi32(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutexvar_epi32 + #define _mm256_maskz_permutexvar_epi32(k, idx, a) simde_mm256_maskz_permutexvar_epi32(k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_permutexvar_epi64 (simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutexvar_epi64(idx, a); + #else + simde__m256i_private + idx_ = simde__m256i_to_private(idx), + a_ = simde__m256i_to_private(a), + r_; + + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = a_.i64[idx_.i64[i] & 3]; + } + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutexvar_epi64 + #define _mm256_permutexvar_epi64(idx, a) simde_mm256_permutexvar_epi64(idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_permutexvar_epi64 (simde__m256i src, simde__mmask8 k, simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutexvar_epi64(src, k, idx, a); + #else + return simde_mm256_mask_mov_epi64(src, k, simde_mm256_permutexvar_epi64(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutexvar_epi64 + #define _mm256_mask_permutexvar_epi64(src, k, idx, a) simde_mm256_mask_permutexvar_epi64(src, k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_permutexvar_epi64 (simde__mmask8 k, simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutexvar_epi64(k, idx, a); + #else + return simde_mm256_maskz_mov_epi64(k, simde_mm256_permutexvar_epi64(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutexvar_epi64 + #define _mm256_maskz_permutexvar_epi64(k, idx, a) simde_mm256_maskz_permutexvar_epi64(k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_permutexvar_epi8 (simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutexvar_epi8(idx, a); + #elif defined(SIMDE_X86_AVX2_NATIVE) + simde__m256i mask = simde_mm256_set1_epi8(0x0F); + simde__m256i lo = simde_mm256_permute4x64_epi64(a, (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0)); + simde__m256i hi = simde_mm256_permute4x64_epi64(a, (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0)); + simde__m256i index = simde_mm256_and_si256(idx, mask); + simde__m256i select = simde_mm256_slli_epi64(idx, 3); + lo = simde_mm256_shuffle_epi8(lo, index); + hi = simde_mm256_shuffle_epi8(hi, index); + return simde_mm256_blendv_epi8(lo, hi, select); + #else + simde__m256i_private + idx_ = simde__m256i_to_private(idx), + a_ = simde__m256i_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint8x16x2_t table = { { a_.m128i_private[0].neon_u8, + a_.m128i_private[1].neon_u8 } }; + uint8x16_t mask = vdupq_n_u8(0x1F); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].neon_u8 = vqtbl2q_u8(table, vandq_u8(idx_.m128i_private[i].neon_u8, mask)); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_u8 = vec_perm(a_.m128i_private[0].altivec_u8, a_.m128i_private[1].altivec_u8, idx_.m128i_private[i].altivec_u8); + } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t index, r, t; + const v128_t mask = wasm_i8x16_splat(0x1F); + const v128_t sixteen = wasm_i8x16_splat(16); + const v128_t a0 = a_.m128i_private[0].wasm_v128; + const v128_t a1 = a_.m128i_private[1].wasm_v128; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + index = wasm_v128_and(idx_.m128i_private[i].wasm_v128, mask); + r = wasm_v8x16_swizzle(a0, index); + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a1, index); + r_.m128i_private[i].wasm_v128 = wasm_v128_or(r, t); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = a_.i8[idx_.i8[i] & 0x1F]; + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutexvar_epi8 + #define _mm256_permutexvar_epi8(idx, a) simde_mm256_permutexvar_epi8(idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_permutexvar_epi8 (simde__m256i src, simde__mmask32 k, simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutexvar_epi8(src, k, idx, a); + #else + return simde_mm256_mask_mov_epi8(src, k, simde_mm256_permutexvar_epi8(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutexvar_epi8 + #define _mm256_mask_permutexvar_epi8(src, k, idx, a) simde_mm256_mask_permutexvar_epi8(src, k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_permutexvar_epi8 (simde__mmask32 k, simde__m256i idx, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutexvar_epi8(k, idx, a); + #else + return simde_mm256_maskz_mov_epi8(k, simde_mm256_permutexvar_epi8(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutexvar_epi8 + #define _mm256_maskz_permutexvar_epi8(k, idx, a) simde_mm256_maskz_permutexvar_epi8(k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_permutexvar_pd (simde__m256i idx, simde__m256d a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutexvar_pd(idx, a); + #else + return simde_mm256_castsi256_pd(simde_mm256_permutexvar_epi64(idx, simde_mm256_castpd_si256(a))); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutexvar_pd + #define _mm256_permutexvar_pd(idx, a) simde_mm256_permutexvar_pd(idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_mask_permutexvar_pd (simde__m256d src, simde__mmask8 k, simde__m256i idx, simde__m256d a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutexvar_pd(src, k, idx, a); + #else + return simde_mm256_mask_mov_pd(src, k, simde_mm256_permutexvar_pd(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutexvar_pd + #define _mm256_mask_permutexvar_pd(src, k, idx, a) simde_mm256_mask_permutexvar_pd(src, k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_maskz_permutexvar_pd (simde__mmask8 k, simde__m256i idx, simde__m256d a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutexvar_pd(k, idx, a); + #else + return simde_mm256_maskz_mov_pd(k, simde_mm256_permutexvar_pd(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutexvar_pd + #define _mm256_maskz_permutexvar_pd(k, idx, a) simde_mm256_maskz_permutexvar_pd(k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_permutexvar_ps (simde__m256i idx, simde__m256 a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_permutexvar_ps(idx, a); + #elif defined(SIMDE_X86_AVX2_NATIVE) + return simde_mm256_permutevar8x32_ps(a, idx); + #else + return simde_mm256_castsi256_ps(simde_mm256_permutexvar_epi32(idx, simde_mm256_castps_si256(a))); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_permutexvar_ps + #define _mm256_permutexvar_ps(idx, a) simde_mm256_permutexvar_ps(idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask_permutexvar_ps (simde__m256 src, simde__mmask8 k, simde__m256i idx, simde__m256 a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_permutexvar_ps(src, k, idx, a); + #else + return simde_mm256_mask_mov_ps(src, k, simde_mm256_permutexvar_ps(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_permutexvar_ps + #define _mm256_mask_permutexvar_ps(src, k, idx, a) simde_mm256_mask_permutexvar_ps(src, k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_maskz_permutexvar_ps (simde__mmask8 k, simde__m256i idx, simde__m256 a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_permutexvar_ps(k, idx, a); + #else + return simde_mm256_maskz_mov_ps(k, simde_mm256_permutexvar_ps(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_permutexvar_ps + #define _mm256_maskz_permutexvar_ps(k, idx, a) simde_mm256_maskz_permutexvar_ps(k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_permutexvar_epi16 (simde__m512i idx, simde__m512i a) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_permutexvar_epi16(idx, a); + #else + simde__m512i_private + idx_ = simde__m512i_to_private(idx), + a_ = simde__m512i_to_private(a), + r_; + + #if defined(SIMDE_X86_AVX2_NATIVE) + simde__m256i t0, t1, index, select, a01, a23; + simde__m256i mask = simde_mm256_set1_epi16(0x001F); + simde__m256i shift = simde_mm256_set1_epi16(0x0202); + simde__m256i byte_index = simde_mm256_set1_epi16(0x0100); + simde__m256i a0 = simde_mm256_broadcastsi128_si256(a_.m128i[0]); + simde__m256i a1 = simde_mm256_broadcastsi128_si256(a_.m128i[1]); + simde__m256i a2 = simde_mm256_broadcastsi128_si256(a_.m128i[2]); + simde__m256i a3 = simde_mm256_broadcastsi128_si256(a_.m128i[3]); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m256i_private) / sizeof(r_.m256i_private[0])) ; i++) { + index = idx_.m256i[i]; + index = simde_mm256_and_si256(index, mask); + index = simde_mm256_mullo_epi16(index, shift); + index = simde_mm256_add_epi16(index, byte_index); + t0 = simde_mm256_shuffle_epi8(a0, index); + t1 = simde_mm256_shuffle_epi8(a1, index); + select = simde_mm256_slli_epi64(index, 3); + a01 = simde_mm256_blendv_epi8(t0, t1, select); + t0 = simde_mm256_shuffle_epi8(a2, index); + t1 = simde_mm256_shuffle_epi8(a3, index); + a23 = simde_mm256_blendv_epi8(t0, t1, select); + select = simde_mm256_slli_epi64(index, 2); + r_.m256i[i] = simde_mm256_blendv_epi8(a01, a23, select); + } + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint8x16x4_t table = { { a_.m128i_private[0].neon_u8, + a_.m128i_private[1].neon_u8, + a_.m128i_private[2].neon_u8, + a_.m128i_private[3].neon_u8 } }; + uint16x8_t mask16 = vdupq_n_u16(0x001F); + uint16x8_t byte_index16 = vdupq_n_u16(0x0100); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + uint16x8_t index16 = vandq_u16(idx_.m128i_private[i].neon_u16, mask16); + index16 = vmulq_n_u16(index16, 0x0202); + index16 = vaddq_u16(index16, byte_index16); + r_.m128i_private[i].neon_u8 = vqtbl4q_u8(table, vreinterpretq_u8_u16(index16)); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) index16, mask16, shift16, byte_index16; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) index, test, r01, r23; + mask16 = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x001F)); + shift16 = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0202)); + byte_index16 = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0100)); + test = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 0x20)); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + index16 = vec_and(idx_.m128i_private[i].altivec_u16, mask16); + index16 = vec_mladd(index16, shift16, byte_index16); + index = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), index16); + r01 = vec_perm(a_.m128i_private[0].altivec_u8, a_.m128i_private[1].altivec_u8, index); + r23 = vec_perm(a_.m128i_private[2].altivec_u8, a_.m128i_private[3].altivec_u8, index); + r_.m128i_private[i].altivec_u8 = vec_sel(r01, r23, vec_cmpeq(vec_and(index, test), test)); + } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t index, r, t; + const v128_t mask = wasm_i16x8_splat(0x001F); + const v128_t shift = wasm_i16x8_splat(0x0202); + const v128_t byte_index = wasm_i16x8_splat(0x0100); + const v128_t sixteen = wasm_i8x16_splat(16); + const v128_t a0 = a_.m128i_private[0].wasm_v128; + const v128_t a1 = a_.m128i_private[1].wasm_v128; + const v128_t a2 = a_.m128i_private[2].wasm_v128; + const v128_t a3 = a_.m128i_private[3].wasm_v128; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + index = wasm_v128_and(idx_.m128i_private[i].wasm_v128, mask); + index = wasm_i16x8_mul(index, shift); + index = wasm_i16x8_add(index, byte_index); + r = wasm_v8x16_swizzle(a0, index); + + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a1, index); + r = wasm_v128_or(r, t); + + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a2, index); + r = wasm_v128_or(r, t); + + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a3, index); + r_.m128i_private[i].wasm_v128 = wasm_v128_or(r, t); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = a_.i16[idx_.i16[i] & 0x1F]; + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_permutexvar_epi16 + #define _mm512_permutexvar_epi16(idx, a) simde_mm512_permutexvar_epi16(idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_permutexvar_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i idx, simde__m512i a) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_permutexvar_epi16(src, k, idx, a); + #else + return simde_mm512_mask_mov_epi16(src, k, simde_mm512_permutexvar_epi16(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_permutexvar_epi16 + #define _mm512_mask_permutexvar_epi16(src, k, idx, a) simde_mm512_mask_permutexvar_epi16(src, k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_permutexvar_epi16 (simde__mmask32 k, simde__m512i idx, simde__m512i a) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maskz_permutexvar_epi16(k, idx, a); + #else + return simde_mm512_maskz_mov_epi16(k, simde_mm512_permutexvar_epi16(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_permutexvar_epi16 + #define _mm512_maskz_permutexvar_epi16(k, idx, a) simde_mm512_maskz_permutexvar_epi16(k, idx, a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_permutexvar_epi32 (simde__m512i idx, simde__m512i a) { @@ -46,12 +763,100 @@ simde_mm512_permutexvar_epi32 (simde__m512i idx, simde__m512i a) { a_ = simde__m512i_to_private(a), r_; - #if !defined(__INTEL_COMPILER) + #if defined(SIMDE_X86_AVX2_NATIVE) + simde__m256i index, r0, r1, select; SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m256i_private) / sizeof(r_.m256i_private[0])) ; i++) { + index = idx_.m256i[i]; + r0 = simde_mm256_permutevar8x32_epi32(a_.m256i[0], index); + r1 = simde_mm256_permutevar8x32_epi32(a_.m256i[1], index); + select = simde_mm256_slli_epi32(index, 28); + r_.m256i[i] = simde_mm256_castps_si256(simde_mm256_blendv_ps(simde_mm256_castsi256_ps(r0), + simde_mm256_castsi256_ps(r1), + simde_mm256_castsi256_ps(select))); + } + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint8x16x4_t table = { { a_.m128i_private[0].neon_u8, + a_.m128i_private[1].neon_u8, + a_.m128i_private[2].neon_u8, + a_.m128i_private[3].neon_u8 } }; + uint32x4_t mask32 = vdupq_n_u32(0x0000000F); + uint32x4_t byte_index32 = vdupq_n_u32(0x03020100); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + uint32x4_t index32 = vandq_u32(idx_.m128i_private[i].neon_u32, mask32); + index32 = vmulq_n_u32(index32, 0x04040404); + index32 = vaddq_u32(index32, byte_index32); + r_.m128i_private[i].neon_u8 = vqtbl4q_u8(table, vreinterpretq_u8_u32(index32)); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) index32, mask32, byte_index32, temp32, sixteen; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) zero, shift; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) index, test, r01, r23; + mask32 = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 0x0000000F)); + byte_index32 = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 0x03020100)); + zero = vec_splat_u16(0); + shift = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0404)); + sixteen = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 16)); + test = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 0x20)); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + index32 = vec_and(idx_.m128i_private[i].altivec_u32, mask32); + + /* Multiply index32 by 0x04040404; unfortunately vec_mul isn't available so (mis)use 16-bit vec_mladd */ + temp32 = vec_sl(index32, sixteen); + index32 = vec_add(index32, temp32); + index32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), + vec_mladd(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), index32), + shift, + zero)); + + index32 = vec_add(index32, byte_index32); + index = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), index32); + r01 = vec_perm(a_.m128i_private[0].altivec_u8, a_.m128i_private[1].altivec_u8, index); + r23 = vec_perm(a_.m128i_private[2].altivec_u8, a_.m128i_private[3].altivec_u8, index); + r_.m128i_private[i].altivec_u8 = vec_sel(r01, r23, vec_cmpeq(vec_and(index, test), test)); + } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t index, r, t; + const v128_t mask = wasm_i32x4_splat(0x0000000F); + const v128_t shift = wasm_i32x4_splat(0x04040404); + const v128_t byte_index = wasm_i32x4_splat(0x03020100); + const v128_t sixteen = wasm_i8x16_splat(16); + const v128_t a0 = a_.m128i_private[0].wasm_v128; + const v128_t a1 = a_.m128i_private[1].wasm_v128; + const v128_t a2 = a_.m128i_private[2].wasm_v128; + const v128_t a3 = a_.m128i_private[3].wasm_v128; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + index = wasm_v128_and(idx_.m128i_private[i].wasm_v128, mask); + index = wasm_i32x4_mul(index, shift); + index = wasm_i32x4_add(index, byte_index); + r = wasm_v8x16_swizzle(a0, index); + + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a1, index); + r = wasm_v128_or(r, t); + + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a2, index); + r = wasm_v128_or(r, t); + + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a3, index); + r_.m128i_private[i].wasm_v128 = wasm_v128_or(r, t); + } + #else + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = a_.i32[idx_.i32[i] & 0x0F]; + } #endif - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = a_.i32[idx_.i32[i] & 0x0F]; - } return simde__m512i_from_private(r_); #endif @@ -144,24 +949,154 @@ simde_mm512_maskz_permutexvar_epi64 (simde__mmask8 k, simde__m512i idx, simde__m #endif SIMDE_FUNCTION_ATTRIBUTES -simde__m512d -simde_mm512_permutexvar_pd (simde__m512i idx, simde__m512d a) { - #if defined(SIMDE_X86_AVX512F_NATIVE) - return _mm512_permutexvar_pd(idx, a); +simde__m512i +simde_mm512_permutexvar_epi8 (simde__m512i idx, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) + return _mm512_permutexvar_epi8(idx, a); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + simde__m512i hilo, hi, lo, hi2, lo2, idx2; + simde__m512i ones = simde_mm512_set1_epi8(1); + simde__m512i low_bytes = simde_mm512_set1_epi16(0x00FF); + + idx2 = simde_mm512_srli_epi16(idx, 1); + hilo = simde_mm512_permutexvar_epi16(idx2, a); + simde__mmask64 mask = simde_mm512_test_epi8_mask(idx, ones); + lo = simde_mm512_and_si512(hilo, low_bytes); + hi = simde_mm512_srli_epi16(hilo, 8); + + idx2 = simde_mm512_srli_epi16(idx, 9); + hilo = simde_mm512_permutexvar_epi16(idx2, a); + lo2 = simde_mm512_slli_epi16(hilo, 8); + hi2 = simde_mm512_andnot_si512(low_bytes, hilo); + + lo = simde_mm512_or_si512(lo, lo2); + hi = simde_mm512_or_si512(hi, hi2); + + return simde_mm512_mask_blend_epi8(mask, lo, hi); #else - simde__m512i_private idx_ = simde__m512i_to_private(idx); - simde__m512d_private - a_ = simde__m512d_to_private(a), + simde__m512i_private + idx_ = simde__m512i_to_private(idx), + a_ = simde__m512i_to_private(a), r_; - #if !defined(__INTEL_COMPILER) + #if defined(SIMDE_X86_AVX2_NATIVE) + simde__m256i t0, t1, index, select, a01, a23; + simde__m256i mask = simde_mm256_set1_epi8(0x3F); + simde__m256i a0 = simde_mm256_broadcastsi128_si256(a_.m128i[0]); + simde__m256i a1 = simde_mm256_broadcastsi128_si256(a_.m128i[1]); + simde__m256i a2 = simde_mm256_broadcastsi128_si256(a_.m128i[2]); + simde__m256i a3 = simde_mm256_broadcastsi128_si256(a_.m128i[3]); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m256i_private) / sizeof(r_.m256i_private[0])) ; i++) { + index = idx_.m256i[i]; + index = simde_mm256_and_si256(index, mask); + select = simde_mm256_slli_epi64(index, 3); + t0 = simde_mm256_shuffle_epi8(a0, index); + t1 = simde_mm256_shuffle_epi8(a1, index); + a01 = simde_mm256_blendv_epi8(t0, t1, select); + t0 = simde_mm256_shuffle_epi8(a2, index); + t1 = simde_mm256_shuffle_epi8(a3, index); + a23 = simde_mm256_blendv_epi8(t0, t1, select); + select = simde_mm256_slli_epi64(index, 2); + r_.m256i[i] = simde_mm256_blendv_epi8(a01, a23, select); + } + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint8x16x4_t table = { { a_.m128i_private[0].neon_u8, + a_.m128i_private[1].neon_u8, + a_.m128i_private[2].neon_u8, + a_.m128i_private[3].neon_u8 } }; + uint8x16_t mask = vdupq_n_u8(0x3F); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].neon_u8 = vqtbl4q_u8(table, vandq_u8(idx_.m128i_private[i].neon_u8, mask)); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) test, r01, r23; + test = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 0x20)); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r01 = vec_perm(a_.m128i_private[0].altivec_u8, a_.m128i_private[1].altivec_u8, idx_.m128i_private[i].altivec_u8); + r23 = vec_perm(a_.m128i_private[2].altivec_u8, a_.m128i_private[3].altivec_u8, idx_.m128i_private[i].altivec_u8); + r_.m128i_private[i].altivec_u8 = vec_sel(r01, r23, vec_cmpeq(vec_and(idx_.m128i_private[i].altivec_u8, test), test)); + } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t index, r, t; + const v128_t mask = wasm_i8x16_splat(0x3F); + const v128_t sixteen = wasm_i8x16_splat(16); + const v128_t a0 = a_.m128i_private[0].wasm_v128; + const v128_t a1 = a_.m128i_private[1].wasm_v128; + const v128_t a2 = a_.m128i_private[2].wasm_v128; + const v128_t a3 = a_.m128i_private[3].wasm_v128; + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + index = wasm_v128_and(idx_.m128i_private[i].wasm_v128, mask); + r = wasm_v8x16_swizzle(a0, index); + + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a1, index); + r = wasm_v128_or(r, t); + + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a2, index); + r = wasm_v128_or(r, t); + + index = wasm_i8x16_sub(index, sixteen); + t = wasm_v8x16_swizzle(a3, index); + r_.m128i_private[i].wasm_v128 = wasm_v128_or(r, t); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = a_.i8[idx_.i8[i] & 0x3F]; + } #endif - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.f64[i] = a_.f64[idx_.i64[i] & 7]; - } - return simde__m512d_from_private(r_); + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) + #undef _mm512_permutexvar_epi8 + #define _mm512_permutexvar_epi8(idx, a) simde_mm512_permutexvar_epi8(idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_permutexvar_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i idx, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) + return _mm512_mask_permutexvar_epi8(src, k, idx, a); + #else + return simde_mm512_mask_mov_epi8(src, k, simde_mm512_permutexvar_epi8(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_permutexvar_epi8 + #define _mm512_mask_permutexvar_epi8(src, k, idx, a) simde_mm512_mask_permutexvar_epi8(src, k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_permutexvar_epi8 (simde__mmask64 k, simde__m512i idx, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) + return _mm512_maskz_permutexvar_epi8(k, idx, a); + #else + return simde_mm512_maskz_mov_epi8(k, simde_mm512_permutexvar_epi8(idx, a)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_permutexvar_epi8 + #define _mm512_maskz_permutexvar_epi8(k, idx, a) simde_mm512_maskz_permutexvar_epi8(k, idx, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_permutexvar_pd (simde__m512i idx, simde__m512d a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_permutexvar_pd(idx, a); + #else + return simde_mm512_castsi512_pd(simde_mm512_permutexvar_epi64(idx, simde_mm512_castpd_si512(a))); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) @@ -203,19 +1138,7 @@ simde_mm512_permutexvar_ps (simde__m512i idx, simde__m512 a) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_permutexvar_ps(idx, a); #else - simde__m512i_private idx_ = simde__m512i_to_private(idx); - simde__m512_private - a_ = simde__m512_to_private(a), - r_; - - #if !defined(__INTEL_COMPILER) - SIMDE_VECTORIZE - #endif - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = a_.f32[idx_.i32[i] & 0x0F]; - } - - return simde__m512_from_private(r_); + return simde_mm512_castsi512_ps(simde_mm512_permutexvar_epi32(idx, simde_mm512_castps_si512(a))); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/set.h b/lib/mmseqs/lib/simde/simde/x86/avx512/set.h index 61985bc..59d6039 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/set.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/set.h @@ -388,7 +388,7 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_x_mm512_set_m128i (simde__m128i a, simde__m128i b, simde__m128i c, simde__m128i d) { #if defined(SIMDE_X86_AVX512F_NATIVE) - SIMDE_ALIGN(64) simde__m128i v[] = { d, c, b, a }; + SIMDE_ALIGN_LIKE_16(simde__m128i) simde__m128i v[] = { d, c, b, a }; return simde_mm512_load_si512(HEDLEY_STATIC_CAST(__m512i *, HEDLEY_STATIC_CAST(void *, v))); #else simde__m512i_private r_; @@ -406,7 +406,7 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_x_mm512_set_m256i (simde__m256i a, simde__m256i b) { #if defined(SIMDE_X86_AVX512F_NATIVE) - SIMDE_ALIGN(64) simde__m256i v[] = { b, a }; + SIMDE_ALIGN_LIKE_32(simde__m256i) simde__m256i v[] = { b, a }; return simde_mm512_load_si512(HEDLEY_STATIC_CAST(__m512i *, HEDLEY_STATIC_CAST(void *, v))); #else simde__m512i_private r_; diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/set1.h b/lib/mmseqs/lib/simde/simde/x86/avx512/set1.h index c4301e4..82c9c8c 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/set1.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/set1.h @@ -66,7 +66,8 @@ simde_mm512_mask_set1_epi8(simde__m512i src, simde__mmask64 k, int8_t a) { #endif } #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_set1_epi8(src, k, a) simde_mm512_mask_set1_epi8(src, k, a) + #undef _mm512_mask_set1_epi8 + #define _mm512_mask_set1_epi8(src, k, a) simde_mm512_mask_set1_epi8(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -79,7 +80,8 @@ simde_mm512_maskz_set1_epi8(simde__mmask64 k, int8_t a) { #endif } #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_set1_epi8(k, a) simde_mm512_maskz_set1_epi8(k, a) + #undef _mm512_maskz_set1_epi8 + #define _mm512_maskz_set1_epi8(k, a) simde_mm512_maskz_set1_epi8(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -113,7 +115,8 @@ simde_mm512_mask_set1_epi16(simde__m512i src, simde__mmask32 k, int16_t a) { #endif } #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_set1_epi16(src, k, a) simde_mm512_mask_set1_epi16(src, k, a) + #undef _mm512_mask_set1_epi16 + #define _mm512_mask_set1_epi16(src, k, a) simde_mm512_mask_set1_epi16(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -126,7 +129,8 @@ simde_mm512_maskz_set1_epi16(simde__mmask32 k, int16_t a) { #endif } #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_set1_epi16(k, a) simde_mm512_maskz_set1_epi16(k, a) + #undef _mm512_maskz_set1_epi16 + #define _mm512_maskz_set1_epi16(k, a) simde_mm512_maskz_set1_epi16(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -160,7 +164,8 @@ simde_mm512_mask_set1_epi32(simde__m512i src, simde__mmask16 k, int32_t a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_set1_epi32(src, k, a) simde_mm512_mask_set1_epi32(src, k, a) + #undef _mm512_mask_set1_epi32 + #define _mm512_mask_set1_epi32(src, k, a) simde_mm512_mask_set1_epi32(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -173,7 +178,8 @@ simde_mm512_maskz_set1_epi32(simde__mmask16 k, int32_t a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_set1_epi32(k, a) simde_mm512_maskz_set1_epi32(k, a) + #undef _mm512_maskz_set1_epi32 + #define _mm512_maskz_set1_epi32(k, a) simde_mm512_maskz_set1_epi32(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -207,7 +213,8 @@ simde_mm512_mask_set1_epi64(simde__m512i src, simde__mmask8 k, int64_t a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_mask_set1_epi64(src, k, a) simde_mm512_mask_set1_epi64(src, k, a) + #undef _mm512_mask_set1_epi64 + #define _mm512_mask_set1_epi64(src, k, a) simde_mm512_mask_set1_epi64(src, k, a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -220,7 +227,8 @@ simde_mm512_maskz_set1_epi64(simde__mmask8 k, int64_t a) { #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) -#define _mm512_maskz_set1_epi64(k, a) simde_mm512_maskz_set1_epi64(k, a) + #undef _mm512_maskz_set1_epi64 + #define _mm512_maskz_set1_epi64(k, a) simde_mm512_maskz_set1_epi64(k, a) #endif SIMDE_FUNCTION_ATTRIBUTES diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/shuffle.h b/lib/mmseqs/lib/simde/simde/x86/avx512/shuffle.h index 54914b2..b4f23b5 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/shuffle.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/shuffle.h @@ -94,6 +94,82 @@ simde_mm512_maskz_shuffle_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i b #define _mm512_maskz_shuffle_epi8(k, a, b) simde_mm512_maskz_shuffle_epi8(k, a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_shuffle_i32x4 (simde__m256i a, simde__m256i b, const int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + r_.m128i[0] = a_.m128i[ imm8 & 1]; + r_.m128i[1] = b_.m128i[(imm8 >> 1) & 1]; + + return simde__m256i_from_private(r_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_shuffle_i32x4(a, b, imm8) _mm256_shuffle_i32x4(a, b, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_shuffle_i32x4 + #define _mm256_shuffle_i32x4(a, b, imm8) simde_mm256_shuffle_i32x4(a, b, imm8) +#endif + +#define simde_mm256_maskz_shuffle_i32x4(k, a, b, imm8) simde_mm256_maskz_mov_epi32(k, simde_mm256_shuffle_i32x4(a, b, imm8)) +#define simde_mm256_mask_shuffle_i32x4(src, k, a, b, imm8) simde_mm256_mask_mov_epi32(src, k, simde_mm256_shuffle_i32x4(a, b, imm8)) + +#define simde_mm256_shuffle_f32x4(a, b, imm8) simde_mm256_castsi256_ps(simde_mm256_shuffle_i32x4(simde_mm256_castps_si256(a), simde_mm256_castps_si256(b), imm8)) +#define simde_mm256_maskz_shuffle_f32x4(k, a, b, imm8) simde_mm256_maskz_mov_ps(k, simde_mm256_shuffle_f32x4(a, b, imm8)) +#define simde_mm256_mask_shuffle_f32x4(src, k, a, b, imm8) simde_mm256_mask_mov_ps(src, k, simde_mm256_shuffle_f32x4(a, b, imm8)) + +#define simde_mm256_shuffle_i64x2(a, b, imm8) simde_mm256_shuffle_i32x4(a, b, imm8) +#define simde_mm256_maskz_shuffle_i64x2(k, a, b, imm8) simde_mm256_maskz_mov_epi64(k, simde_mm256_shuffle_i64x2(a, b, imm8)) +#define simde_mm256_mask_shuffle_i64x2(src, k, a, b, imm8) simde_mm256_mask_mov_epi64(src, k, simde_mm256_shuffle_i64x2(a, b, imm8)) + +#define simde_mm256_shuffle_f64x2(a, b, imm8) simde_mm256_castsi256_pd(simde_mm256_shuffle_i64x2(simde_mm256_castpd_si256(a), simde_mm256_castpd_si256(b), imm8)) +#define simde_mm256_maskz_shuffle_f64x2(k, a, b, imm8) simde_mm256_maskz_mov_pd(k, simde_mm256_shuffle_f64x2(a, b, imm8)) +#define simde_mm256_mask_shuffle_f64x2(src, k, a, b, imm8) simde_mm256_mask_mov_pd(src, k, simde_mm256_shuffle_f64x2(a, b, imm8)) + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_shuffle_i32x4 (simde__m512i a, simde__m512i b, const int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + r_.m128i[0] = a_.m128i[ imm8 & 3]; + r_.m128i[1] = a_.m128i[(imm8 >> 2) & 3]; + r_.m128i[2] = b_.m128i[(imm8 >> 4) & 3]; + r_.m128i[3] = b_.m128i[(imm8 >> 6) & 3]; + + return simde__m512i_from_private(r_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_shuffle_i32x4(a, b, imm8) _mm512_shuffle_i32x4(a, b, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_shuffle_i32x4 + #define _mm512_shuffle_i32x4(a, b, imm8) simde_mm512_shuffle_i32x4(a, b, imm8) +#endif + +#define simde_mm512_maskz_shuffle_i32x4(k, a, b, imm8) simde_mm512_maskz_mov_epi32(k, simde_mm512_shuffle_i32x4(a, b, imm8)) +#define simde_mm512_mask_shuffle_i32x4(src, k, a, b, imm8) simde_mm512_mask_mov_epi32(src, k, simde_mm512_shuffle_i32x4(a, b, imm8)) + +#define simde_mm512_shuffle_f32x4(a, b, imm8) simde_mm512_castsi512_ps(simde_mm512_shuffle_i32x4(simde_mm512_castps_si512(a), simde_mm512_castps_si512(b), imm8)) +#define simde_mm512_maskz_shuffle_f32x4(k, a, b, imm8) simde_mm512_maskz_mov_ps(k, simde_mm512_shuffle_f32x4(a, b, imm8)) +#define simde_mm512_mask_shuffle_f32x4(src, k, a, b, imm8) simde_mm512_mask_mov_ps(src, k, simde_mm512_shuffle_f32x4(a, b, imm8)) + +#define simde_mm512_shuffle_i64x2(a, b, imm8) simde_mm512_shuffle_i32x4(a, b, imm8) +#define simde_mm512_maskz_shuffle_i64x2(k, a, b, imm8) simde_mm512_maskz_mov_epi64(k, simde_mm512_shuffle_i64x2(a, b, imm8)) +#define simde_mm512_mask_shuffle_i64x2(src, k, a, b, imm8) simde_mm512_mask_mov_epi64(src, k, simde_mm512_shuffle_i64x2(a, b, imm8)) + +#define simde_mm512_shuffle_f64x2(a, b, imm8) simde_mm512_castsi512_pd(simde_mm512_shuffle_i64x2(simde_mm512_castpd_si512(a), simde_mm512_castpd_si512(b), imm8)) +#define simde_mm512_maskz_shuffle_f64x2(k, a, b, imm8) simde_mm512_maskz_mov_pd(k, simde_mm512_shuffle_f64x2(a, b, imm8)) +#define simde_mm512_mask_shuffle_f64x2(src, k, a, b, imm8) simde_mm512_mask_mov_pd(src, k, simde_mm512_shuffle_f64x2(a, b, imm8)) + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/sll.h b/lib/mmseqs/lib/simde/simde/x86/avx512/sll.h index f1eef7d..8cc9446 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/sll.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/sll.h @@ -77,6 +77,34 @@ simde_mm512_sll_epi16 (simde__m512i a, simde__m128i count) { #define _mm512_sll_epi16(a, count) simde_mm512_sll_epi16(a, count) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_sll_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m128i count) { + #if defined(SIMDE_X86_AVX51BW_NATIVE) + return _mm512_mask_sll_epi16(src, k, a, count); + #else + return simde_mm512_mask_mov_epi16(src, k, simde_mm512_sll_epi16(a, count)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_sll_epi16 + #define _mm512_mask_sll_epi16(src, k, a, count) simde_mm512_mask_sll_epi16(src, k, a, count) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_sll_epi16 (simde__mmask32 k, simde__m512i a, simde__m128i count) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maskz_sll_epi16(k, a, count); + #else + return simde_mm512_maskz_mov_epi16(k, simde_mm512_sll_epi16(a, count)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_sll_epi16 + #define _mm512_maskz_sll_epi16(src, k, a, count) simde_mm512_maskz_sll_epi16(src, k, a, count) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_sll_epi32 (simde__m512i a, simde__m128i count) { diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/slli.h b/lib/mmseqs/lib/simde/simde/x86/avx512/slli.h index e752e5e..a51b421 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/slli.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/slli.h @@ -40,7 +40,7 @@ SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES simde__m512i -simde_mm512_slli_epi16 (simde__m512i a, const int imm8) +simde_mm512_slli_epi16 (simde__m512i a, const unsigned int imm8) SIMDE_REQUIRE_RANGE(imm8, 0, 255) { #if defined(SIMDE_X86_AVX512BW_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4))) simde__m512i r; diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/srl.h b/lib/mmseqs/lib/simde/simde/x86/avx512/srl.h index d5c6803..31e3fa1 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/srl.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/srl.h @@ -31,6 +31,7 @@ #include "types.h" #include "../avx2.h" #include "mov.h" +#include "setzero.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -54,14 +55,15 @@ simde_mm512_srl_epi16 (simde__m512i a, simde__m128i count) { simde__m128i_private count_ = simde__m128i_to_private(count); - uint64_t shift = HEDLEY_STATIC_CAST(uint64_t , (count_.i64[0] > 16 ? 16 : count_.i64[0])); + if (HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]) > 15) + return simde_mm512_setzero_si512(); #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u16 = a_.u16 >> HEDLEY_STATIC_CAST(int16_t, shift); + r_.u16 = a_.u16 >> count_.i64[0]; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.u16[i] = a_.u16[i] >> (shift); + r_.u16[i] = a_.u16[i] >> count_.i64[0]; } #endif #endif @@ -92,16 +94,15 @@ simde_mm512_srl_epi32 (simde__m512i a, simde__m128i count) { simde__m128i_private count_ = simde__m128i_to_private(count); - uint64_t shift = HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]); - if (shift > 31) + if (HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]) > 31) return simde_mm512_setzero_si512(); #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.i32 = a_.i32 >> HEDLEY_STATIC_CAST(int32_t, shift); + r_.u32 = a_.u32 >> count_.i64[0]; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i32[i] >> (shift)); + r_.u32[i] = a_.u32[i] >> count_.i64[0]; } #endif #endif @@ -160,16 +161,15 @@ simde_mm512_srl_epi64 (simde__m512i a, simde__m128i count) { simde__m128i_private count_ = simde__m128i_to_private(count); - uint64_t shift = HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]); - if (shift > 63) + if (HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]) > 63) return simde_mm512_setzero_si512(); #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.i64 = a_.i64 >> HEDLEY_STATIC_CAST(int64_t, shift); + r_.u64 = a_.u64 >> count_.i64[0]; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { - r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i64[i] >> (shift)); + r_.u64[i] = a_.u64[i] >> count_.i64[0]; } #endif #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/srli.h b/lib/mmseqs/lib/simde/simde/x86/avx512/srli.h index c699fde..588e8c9 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/srli.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/srli.h @@ -39,7 +39,7 @@ SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES simde__m512i -simde_mm512_srli_epi16 (simde__m512i a, const int imm8) +simde_mm512_srli_epi16 (simde__m512i a, const unsigned int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { #if defined(SIMDE_X86_AVX512BW_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4))) simde__m512i r; diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/srlv.h b/lib/mmseqs/lib/simde/simde/x86/avx512/srlv.h index a7b7cf7..203342f 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/srlv.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/srlv.h @@ -29,6 +29,7 @@ #define SIMDE_X86_AVX512_SRLV_H #include "types.h" +#include "../avx2.h" #include "mov.h" HEDLEY_DIAGNOSTIC_PUSH @@ -36,32 +37,245 @@ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES -simde__m512i -simde_mm512_srlv_epi16 (simde__m512i a, simde__m512i b) { - simde__m512i_private - a_ = simde__m512i_to_private(a), - b_ = simde__m512i_to_private(b), - r_; +simde__m128i +simde_mm_srlv_epi16 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX256VL_NATIVE) && defined(SIMDE_X86_AVX256BW_NATIVE) + return _mm_srlv_epi16(a, b); + #else + simde__m128i_private + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u16 = HEDLEY_STATIC_CAST(__typeof__(r_.u16), (b_.u16 < 16) & (a_.u16 >> b_.u16)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = (b_.u16[i] < 16) ? (a_.u16[i] >> b_.u16[i]) : 0; + } + #endif + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_srlv_epi16 + #define _mm_srlv_epi16(a, b) simde_mm_srlv_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_srlv_epi16(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_srlv_epi16(src, k, a, b); + #else + return simde_mm_mask_mov_epi16(src, k, simde_mm_srlv_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_srlv_epi16 + #define _mm_mask_srlv_epi16(src, k, a, b) simde_mm_mask_srlv_epi16(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_srlv_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_maskz_srlv_epi16(k, a, b); + #else + return simde_mm_maskz_mov_epi16(k, simde_mm_srlv_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_srlv_epi16 + #define _mm_maskz_srlv_epi16(k, a, b) simde_mm_maskz_srlv_epi16(k, a, b) +#endif - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u16 = HEDLEY_STATIC_CAST(__typeof__(r_.u16), (b_.u16 < 16) & (a_.u16 >> b_.u16)); +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_srlv_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_srlv_epi32(src, k, a, b); #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = (b_.u16[i] < 16) ? (a_.u16[i] >> b_.u16[i]) : 0; - } + return simde_mm_mask_mov_epi32(src, k, simde_mm_srlv_epi32(a, b)); #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_srlv_epi32 + #define _mm_mask_srlv_epi32(src, k, a, b) simde_mm_mask_srlv_epi32(src, k, a, b) +#endif - return simde__m512i_from_private(r_); +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_srlv_epi32(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_srlv_epi32(k, a, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_srlv_epi32(a, b)); + #endif } -#if defined(SIMDE_X86_AVX512BW_NATIVE) - #define simde_mm512_srlv_epi16(a, b) _mm512_srlv_epi16(a, b) +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_srlv_epi32 + #define _mm_maskz_srlv_epi32(k, a, b) simde_mm_maskz_srlv_epi32(k, a, b) #endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_srlv_epi64(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_srlv_epi64(src, k, a, b); + #else + return simde_mm_mask_mov_epi64(src, k, simde_mm_srlv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_srlv_epi64 + #define _mm_mask_srlv_epi64(src, k, a, b) simde_mm_mask_srlv_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_srlv_epi64(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_srlv_epi64(k, a, b); + #else + return simde_mm_maskz_mov_epi64(k, simde_mm_srlv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_srlv_epi64 + #define _mm_maskz_srlv_epi64(k, a, b) simde_mm_maskz_srlv_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_srlv_epi16 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX256VL_NATIVE) && defined(SIMDE_X86_AVX256BW_NATIVE) + return _mm256_srlv_epi16(a, b); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b), + r_; + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_srlv_epi16(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u16 = HEDLEY_STATIC_CAST(__typeof__(r_.u16), (b_.u16 < 16) & (a_.u16 >> b_.u16)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = (b_.u16[i] < 16) ? (a_.u16[i] >> b_.u16[i]) : 0; + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_srlv_epi16 + #define _mm256_srlv_epi16(a, b) simde_mm256_srlv_epi16(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_srlv_epi16 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_srlv_epi16(a, b); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b), + r_; + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_srlv_epi16(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u16 = HEDLEY_STATIC_CAST(__typeof__(r_.u16), (b_.u16 < 16) & (a_.u16 >> b_.u16)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = (b_.u16[i] < 16) ? (a_.u16[i] >> b_.u16[i]) : 0; + } + #endif + + return simde__m512i_from_private(r_); + #endif +} #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm512_srlv_epi16 #define _mm512_srlv_epi16(a, b) simde_mm512_srlv_epi16(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_srlv_epi32 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_srlv_epi32(a, b); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b), + r_; + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_srlv_epi32(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u32 = HEDLEY_STATIC_CAST(__typeof__(r_.u32), (b_.u32 < 32) & (a_.u32 >> b_.u32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = (b_.u32[i] < 32) ? (a_.u32[i] >> b_.u32[i]) : 0; + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_srlv_epi32 + #define _mm512_srlv_epi32(a, b) simde_mm512_srlv_epi32(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_srlv_epi64 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_srlv_epi64(a, b); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b), + r_; + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_srlv_epi64(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u64 = HEDLEY_STATIC_CAST(__typeof__(r_.u64), (b_.u64 < 64) & (a_.u64 >> b_.u64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = (b_.u64[i] < 64) ? (a_.u64[i] >> b_.u64[i]) : 0; + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_srlv_epi64 + #define _mm512_srlv_epi64(a, b) simde_mm512_srlv_epi64(a, b) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/store.h b/lib/mmseqs/lib/simde/simde/x86/avx512/store.h index 76d7cdd..1f1538b 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/store.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/store.h @@ -39,7 +39,7 @@ simde_mm512_store_ps (void * mem_addr, simde__m512 a) { #if defined(SIMDE_X86_AVX512F_NATIVE) _mm512_store_ps(mem_addr, a); #else - simde_memcpy(SIMDE_ASSUME_ALIGNED_AS(simde__m512, mem_addr), &a, sizeof(a)); + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m512), &a, sizeof(a)); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) @@ -53,7 +53,7 @@ simde_mm512_store_pd (void * mem_addr, simde__m512d a) { #if defined(SIMDE_X86_AVX512F_NATIVE) _mm512_store_pd(mem_addr, a); #else - simde_memcpy(SIMDE_ASSUME_ALIGNED_AS(simde__m512d, mem_addr), &a, sizeof(a)); + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m512d), &a, sizeof(a)); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) @@ -67,7 +67,7 @@ simde_mm512_store_si512 (void * mem_addr, simde__m512i a) { #if defined(SIMDE_X86_AVX512F_NATIVE) _mm512_store_si512(HEDLEY_REINTERPRET_CAST(void*, mem_addr), a); #else - simde_memcpy(SIMDE_ASSUME_ALIGNED_AS(simde__m512i, mem_addr), &a, sizeof(a)); + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m512i), &a, sizeof(a)); #endif } #define simde_mm512_store_epi8(mem_addr, a) simde_mm512_store_si512(mem_addr, a) diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/subs.h b/lib/mmseqs/lib/simde/simde/x86/avx512/subs.h index 0e1ec7e..114ecf1 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/subs.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/subs.h @@ -55,10 +55,7 @@ simde_mm512_subs_epi8 (simde__m512i a, simde__m512i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - const int16_t tmp = - HEDLEY_STATIC_CAST(int16_t, a_.i8[i]) - - HEDLEY_STATIC_CAST(int16_t, b_.i8[i]); - r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, ((tmp < INT8_MAX) ? ((tmp > INT8_MIN) ? tmp : INT8_MIN) : INT8_MAX)); + r_.i8[i] = simde_math_subs_i8(a_.i8[i], b_.i8[i]); } #endif @@ -117,10 +114,7 @@ simde_mm512_subs_epi16 (simde__m512i a, simde__m512i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - const int32_t tmp = - HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) - - HEDLEY_STATIC_CAST(int32_t, b_.i16[i]); - r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, ((tmp < INT16_MAX) ? ((tmp > INT16_MIN) ? tmp : INT16_MIN) : INT16_MAX)); + r_.i16[i] = simde_math_subs_i16(a_.i16[i], b_.i16[i]); } #endif @@ -151,7 +145,7 @@ simde_mm512_subs_epu8 (simde__m512i a, simde__m512i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? (a_.u8[i] - b_.u8[i]) : UINT8_C(0); + r_.u8[i] = simde_math_subs_u8(a_.u8[i], b_.u8[i]); } #endif @@ -210,7 +204,7 @@ simde_mm512_subs_epu16 (simde__m512i a, simde__m512i b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = (a_.u16[i] > b_.u16[i]) ? (a_.u16[i] - b_.u16[i]) : UINT16_C(0); + r_.u16[i] = simde_math_subs_u16(a_.u16[i], b_.u16[i]); } #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/test.h b/lib/mmseqs/lib/simde/simde/x86/avx512/test.h index 99ee329..df1fa12 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/test.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/test.h @@ -23,23 +23,47 @@ * Copyright: * 2020 Evan Nemerson * 2020 Hidayat Khan + * 2020 Christopher Moore */ #if !defined(SIMDE_X86_AVX512_TEST_H) #define SIMDE_X86_AVX512_TEST_H #include "types.h" -#include "mov.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm512_test_epi16_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_test_epi16_mask(a, b); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + simde__mmask32 r = 0; + + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(simde__mmask32, !!(a_.i16[i] & b_.i16[i]) << i); + } + + return r; + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_test_epi16_mask + #define _mm512_test_epi16_mask(a, b) simde_mm512_test_epi16_mask(a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__mmask16 -simde_mm512_mask_test_epi32_mask (simde__mmask16 k1, simde__m512i a, simde__m512i b) { +simde_mm512_test_epi32_mask (simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512F_NATIVE) - return _mm512_mask_test_epi32_mask(k1, a, b); + return _mm512_test_epi32_mask(a, b); #else simde__m512i_private a_ = simde__m512i_to_private(a), @@ -51,19 +75,19 @@ simde_mm512_mask_test_epi32_mask (simde__mmask16 k1, simde__m512i a, simde__m512 r |= HEDLEY_STATIC_CAST(simde__mmask16, !!(a_.i32[i] & b_.i32[i]) << i); } - return r & k1; + return r; #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) - #undef _mm512_mask_test_epi32_mask - #define _mm512_mask_test_epi32_mask(k1, a, b) simde_mm512_mask_test_epi32_mask(k1, a, b) + #undef _mm512_test_epi32_mask +#define _mm512_test_epi32_mask(a, b) simde_mm512_test_epi32_mask(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__mmask8 -simde_mm512_mask_test_epi64_mask (simde__mmask8 k1, simde__m512i a, simde__m512i b) { +simde_mm512_test_epi64_mask (simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512F_NATIVE) - return _mm512_mask_test_epi64_mask(k1, a, b); + return _mm512_test_epi64_mask(a, b); #else simde__m512i_private a_ = simde__m512i_to_private(a), @@ -72,10 +96,76 @@ simde_mm512_mask_test_epi64_mask (simde__mmask8 k1, simde__m512i a, simde__m512i SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { - r |= !!(a_.i64[i] & b_.i64[i]) << i; + r |= HEDLEY_STATIC_CAST(simde__mmask8, !!(a_.i64[i] & b_.i64[i]) << i); } - return r & k1; + return r; + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_test_epi64_mask + #define _mm512_test_epi64_mask(a, b) simde_mm512_test_epi64_mask(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_test_epi8_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_test_epi8_mask(a, b); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + simde__mmask64 r = 0; + + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(simde__mmask64, HEDLEY_STATIC_CAST(uint64_t, !!(a_.i8[i] & b_.i8[i])) << i); + } + + return r; + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_test_epi8_mask + #define _mm512_test_epi8_mask(a, b) simde_mm512_test_epi8_mask(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm512_mask_test_epi16_mask (simde__mmask32 k1, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_test_epi16_mask(k1, a, b); + #else + return simde_mm512_test_epi16_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_test_epi16_mask + #define _mm512_mask_test_epi16_mask(k1, a, b) simde_mm512_mask_test_epi16_mask(k1, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm512_mask_test_epi32_mask (simde__mmask16 k1, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_test_epi32_mask(k1, a, b); + #else + return simde_mm512_test_epi32_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_test_epi32_mask + #define _mm512_mask_test_epi32_mask(k1, a, b) simde_mm512_mask_test_epi32_mask(k1, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm512_mask_test_epi64_mask (simde__mmask8 k1, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_test_epi64_mask(k1, a, b); + #else + return simde_mm512_test_epi64_mask(a, b) & k1; #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) @@ -83,6 +173,20 @@ simde_mm512_mask_test_epi64_mask (simde__mmask8 k1, simde__m512i a, simde__m512i #define _mm512_mask_test_epi64_mask(k1, a, b) simde_mm512_mask_test_epi64_mask(k1, a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_mask_test_epi8_mask (simde__mmask64 k1, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_test_epi8_mask(k1, a, b); + #else + return simde_mm512_test_epi8_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_test_epi8_mask + #define _mm512_mask_test_epi8_mask(k1, a, b) simde_mm512_mask_test_epi8_mask(k1, a, b) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/types.h b/lib/mmseqs/lib/simde/simde/x86/avx512/types.h index c0faef8..7df5204 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/types.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/types.h @@ -53,9 +53,9 @@ SIMDE_BEGIN_DECLS_ # undef SIMDE_X86_AVX512F_NATIVE # pragma message("Native AVX-512 support requires MSVC 2017 or later. See comment above (in code) for details.") # endif -# define SIMDE_AVX512_ALIGN SIMDE_ALIGN(32) +# define SIMDE_AVX512_ALIGN SIMDE_ALIGN_TO_32 # else -# define SIMDE_AVX512_ALIGN SIMDE_ALIGN(64) +# define SIMDE_AVX512_ALIGN SIMDE_ALIGN_TO_64 # endif typedef union { @@ -103,17 +103,17 @@ typedef union { #if defined(SIMDE_X86_AVX512F_NATIVE) SIMDE_AVX512_ALIGN __m512 n; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[4]; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[4]; #endif #endif } simde__m512_private; @@ -163,17 +163,17 @@ typedef union { #if defined(SIMDE_X86_AVX512F_NATIVE) SIMDE_AVX512_ALIGN __m512d n; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[4]; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[4]; #endif #endif } simde__m512d_private; @@ -223,17 +223,17 @@ typedef union { #if defined(SIMDE_X86_AVX512F_NATIVE) SIMDE_AVX512_ALIGN __m512i n; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[4]; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[4]; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[4]; #endif #endif } simde__m512i_private; diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/unpackhi.h b/lib/mmseqs/lib/simde/simde/x86/avx512/unpackhi.h index 926c7eb..0ad1488 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/unpackhi.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/unpackhi.h @@ -70,6 +70,34 @@ simde_mm512_unpackhi_epi8 (simde__m512i a, simde__m512i b) { #define _mm512_unpackhi_epi8(a, b) simde_mm512_unpackhi_epi8(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_unpackhi_epi8(simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_unpackhi_epi8(src, k, a, b); + #else + return simde_mm512_mask_mov_epi8(src, k, simde_mm512_unpackhi_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_unpackhi_epi8 + #define _mm512_mask_unpackhi_epi8(src, k, a, b) simde_mm512_mask_unpackhi_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_unpackhi_epi8(simde__mmask64 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maskz_unpackhi_epi8(k, a, b); + #else + return simde_mm512_maskz_mov_epi8(k, simde_mm512_unpackhi_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_unpackhi_epi8 + #define _mm512_maskz_unpackhi_epi8(k, a, b) simde_mm512_maskz_unpackhi_epi8(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_unpackhi_epi16 (simde__m512i a, simde__m512i b) { @@ -98,6 +126,34 @@ simde_mm512_unpackhi_epi16 (simde__m512i a, simde__m512i b) { #define _mm512_unpackhi_epi16(a, b) simde_mm512_unpackhi_epi16(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_unpackhi_epi16(simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_unpackhi_epi16(src, k, a, b); + #else + return simde_mm512_mask_mov_epi16(src, k, simde_mm512_unpackhi_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_unpackhi_epi16 + #define _mm512_mask_unpackhi_epi16(src, k, a, b) simde_mm512_mask_unpackhi_epi16(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_unpackhi_epi16(simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maskz_unpackhi_epi16(k, a, b); + #else + return simde_mm512_maskz_mov_epi16(k, simde_mm512_unpackhi_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_unpackhi_epi16 + #define _mm512_maskz_unpackhi_epi16(k, a, b) simde_mm512_maskz_unpackhi_epi16(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_unpackhi_epi32 (simde__m512i a, simde__m512i b) { diff --git a/lib/mmseqs/lib/simde/simde/x86/avx512/xor.h b/lib/mmseqs/lib/simde/simde/x86/avx512/xor.h index bf56d72..94f50d7 100644 --- a/lib/mmseqs/lib/simde/simde/x86/avx512/xor.h +++ b/lib/mmseqs/lib/simde/simde/x86/avx512/xor.h @@ -53,7 +53,7 @@ simde_mm512_xor_ps (simde__m512 a, simde__m512 b) { for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) { r_.m256[i] = simde_mm256_xor_ps(a_.m256[i], b_.m256[i]); } - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS) r_.i32f = a_.i32f ^ b_.i32f; #else SIMDE_VECTORIZE @@ -84,7 +84,7 @@ simde_mm512_xor_pd (simde__m512d a, simde__m512d b) { #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256d[0] = simde_mm256_xor_pd(a_.m256d[0], b_.m256d[0]); r_.m256d[1] = simde_mm256_xor_pd(a_.m256d[1], b_.m256d[1]); - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS) r_.i32f = a_.i32f ^ b_.i32f; #else SIMDE_VECTORIZE @@ -176,12 +176,12 @@ simde_mm512_xor_epi64 (simde__m512i a, simde__m512i b) { for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { r_.m256i[i] = simde_mm256_xor_si256(a_.m256i[i], b_.m256i[i]); } - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = a_.i64 ^ b_.i64; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS) + r_.i32f = a_.i32f ^ b_.i32f; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { - r_.i64[i] = a_.i64[i] ^ b_.i64[i]; + for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) { + r_.i32f[i] = a_.i32f[i] ^ b_.i32f[i]; } #endif diff --git a/lib/mmseqs/lib/simde/simde/x86/clmul.h b/lib/mmseqs/lib/simde/simde/x86/clmul.h new file mode 100644 index 0000000..e2bf77f --- /dev/null +++ b/lib/mmseqs/lib/simde/simde/x86/clmul.h @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2016 Thomas Pornin + */ + +/* The portable version is based on the implementation in BearSSL, + * which is MIT licensed, constant-time / branch-free, and documented + * at https://www.bearssl.org/constanttime.html (specifically, we use + * the implementation from ghash_ctmul64.c). */ + +#if !defined(SIMDE_X86_CLMUL_H) +#define SIMDE_X86_CLMUL_H + +#include "avx512/set.h" +#include "avx512/setzero.h" + +#if !defined(SIMDE_X86_PCLMUL_NATIVE) && defined(SIMDE_ENABLE_NATIVE_ALIASES) +# define SIMDE_X86_PCLMUL_ENABLE_NATIVE_ALIASES +#endif + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_x_clmul_u64(uint64_t x, uint64_t y) { + uint64_t x0, x1, x2, x3; + uint64_t y0, y1, y2, y3; + uint64_t z0, z1, z2, z3; + + x0 = x & UINT64_C(0x1111111111111111); + x1 = x & UINT64_C(0x2222222222222222); + x2 = x & UINT64_C(0x4444444444444444); + x3 = x & UINT64_C(0x8888888888888888); + y0 = y & UINT64_C(0x1111111111111111); + y1 = y & UINT64_C(0x2222222222222222); + y2 = y & UINT64_C(0x4444444444444444); + y3 = y & UINT64_C(0x8888888888888888); + + z0 = (x0 * y0) ^ (x1 * y3) ^ (x2 * y2) ^ (x3 * y1); + z1 = (x0 * y1) ^ (x1 * y0) ^ (x2 * y3) ^ (x3 * y2); + z2 = (x0 * y2) ^ (x1 * y1) ^ (x2 * y0) ^ (x3 * y3); + z3 = (x0 * y3) ^ (x1 * y2) ^ (x2 * y1) ^ (x3 * y0); + + z0 &= UINT64_C(0x1111111111111111); + z1 &= UINT64_C(0x2222222222222222); + z2 &= UINT64_C(0x4444444444444444); + z3 &= UINT64_C(0x8888888888888888); + + return z0 | z1 | z2 | z3; +} + +static uint64_t +simde_x_bitreverse_u64(uint64_t v) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint8x8_t bytes = vreinterpret_u8_u64(vmov_n_u64(v)); + bytes = vrbit_u8(bytes); + bytes = vrev64_u8(bytes); + return vget_lane_u64(vreinterpret_u64_u8(bytes), 0); + #elif defined(SIMDE_X86_GFNI_NATIVE) + /* I don't think there is (or likely will ever be) a CPU with GFNI + * but not pclmulq, but this may be useful for things other than + * _mm_clmulepi64_si128. */ + __m128i vec = _mm_cvtsi64_si128(HEDLEY_STATIC_CAST(int64_t, v)); + + /* Reverse bits within each byte */ + vec = _mm_gf2p8affine_epi64_epi8(vec, _mm_cvtsi64_si128(HEDLEY_STATIC_CAST(int64_t, UINT64_C(0x8040201008040201))), 0); + + /* Reverse bytes */ + #if defined(SIMDE_X86_SSSE3_NATIVE) + vec = _mm_shuffle_epi8(vec, _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7)); + #else + vec = _mm_or_si128(_mm_slli_epi16(vec, 8), _mm_srli_epi16(vec, 8)); + vec = _mm_shufflelo_epi16(vec, _MM_SHUFFLE(0, 1, 2, 3)); + vec = _mm_shufflehi_epi16(vec, _MM_SHUFFLE(0, 1, 2, 3)); + #endif + + return HEDLEY_STATIC_CAST(uint64_t, _mm_cvtsi128_si64(vec)); + #elif HEDLEY_HAS_BUILTIN(__builtin_bitreverse64) + return __builtin_bitreverse64(v); + #else + v = ((v >> 1) & UINT64_C(0x5555555555555555)) | ((v & UINT64_C(0x5555555555555555)) << 1); + v = ((v >> 2) & UINT64_C(0x3333333333333333)) | ((v & UINT64_C(0x3333333333333333)) << 2); + v = ((v >> 4) & UINT64_C(0x0F0F0F0F0F0F0F0F)) | ((v & UINT64_C(0x0F0F0F0F0F0F0F0F)) << 4); + v = ((v >> 8) & UINT64_C(0x00FF00FF00FF00FF)) | ((v & UINT64_C(0x00FF00FF00FF00FF)) << 8); + v = ((v >> 16) & UINT64_C(0x0000FFFF0000FFFF)) | ((v & UINT64_C(0x0000FFFF0000FFFF)) << 16); + return (v >> 32) | (v << 32); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_clmulepi64_si128 (simde__m128i a, simde__m128i b, const int imm8) + SIMDE_REQUIRE_CONSTANT(imm8) { + simde__m128i_private + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_AES) + uint64x1_t A = ((imm8) & 0x01) ? vget_high_u64(a_.neon_u64) : vget_low_u64(a_.neon_u64); + uint64x1_t B = ((imm8) & 0x10) ? vget_high_u64(b_.neon_u64) : vget_low_u64(b_.neon_u64); + #if defined(SIMDE_BUG_CLANG_48257) + HEDLEY_DIAGNOSTIC_PUSH + SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ + #endif + poly64_t A_ = vget_lane_p64(vreinterpret_p64_u64(A), 0); + poly64_t B_ = vget_lane_p64(vreinterpret_p64_u64(B), 0); + #if defined(SIMDE_BUG_CLANG_48257) + HEDLEY_DIAGNOSTIC_POP + #endif + poly128_t R = vmull_p64(A_, B_); + r_.neon_u64 = vreinterpretq_u64_p128(R); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + #if defined(SIMDE_SHUFFLE_VECTOR_) + switch (imm8 & 0x11) { + case 0x00: + b_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, b_.u64, b_.u64, 0, 0); + a_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.u64, a_.u64, 0, 0); + break; + case 0x01: + b_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, b_.u64, b_.u64, 0, 0); + a_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.u64, a_.u64, 1, 1); + break; + case 0x10: + b_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, b_.u64, b_.u64, 1, 1); + a_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.u64, a_.u64, 0, 0); + break; + case 0x11: + b_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, b_.u64, b_.u64, 1, 1); + a_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.u64, a_.u64, 1, 1); + break; + } + #else + { + const uint64_t A = a_.u64[(imm8 ) & 1]; + const uint64_t B = b_.u64[(imm8 >> 4) & 1]; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) { + a_.u64[i] = A; + b_.u64[i] = B; + } + } + #endif + + simde__m128i_private reversed_; + { + #if defined(SIMDE_SHUFFLE_VECTOR_) + reversed_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.u64, b_.u64, 1, 3); + #else + reversed_.u64[0] = a_.u64[1]; + reversed_.u64[1] = b_.u64[1]; + #endif + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(reversed_.u64) / sizeof(reversed_.u64[0])) ; i++) { + reversed_.u64[i] = simde_x_bitreverse_u64(reversed_.u64[i]); + } + } + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.u64, reversed_.u64, 0, 2); + b_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 16, b_.u64, reversed_.u64, 1, 3); + #else + a_.u64[1] = reversed_.u64[0]; + b_.u64[1] = reversed_.u64[1]; + #endif + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(reversed_.u64) / sizeof(reversed_.u64[0])) ; i++) { + r_.u64[i] = simde_x_clmul_u64(a_.u64[i], b_.u64[i]); + } + + r_.u64[1] = simde_x_bitreverse_u64(r_.u64[1]) >> 1; + #else + r_.u64[0] = simde_x_clmul_u64( a_.u64[imm8 & 1], b_.u64[(imm8 >> 4) & 1]); + r_.u64[1] = simde_x_bitreverse_u64(simde_x_clmul_u64(simde_x_bitreverse_u64(a_.u64[imm8 & 1]), simde_x_bitreverse_u64(b_.u64[(imm8 >> 4) & 1]))) >> 1; + #endif + + return simde__m128i_from_private(r_); +} +#if defined(SIMDE_X86_PCLMUL_NATIVE) + #define simde_mm_clmulepi64_si128(a, b, imm8) _mm_clmulepi64_si128(a, b, imm8) +#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_AES) + #define simde_mm_clmulepi64_si128(a, b, imm8) \ + simde__m128i_from_neon_u64( \ + vreinterpretq_u64_p128( \ + vmull_p64( \ + vgetq_lane_p64(vreinterpretq_p64_u64(simde__m128i_to_neon_u64(a)), (imm8 ) & 1), \ + vgetq_lane_p64(vreinterpretq_p64_u64(simde__m128i_to_neon_u64(b)), (imm8 >> 4) & 1) \ + ) \ + ) \ + ) +#endif +#if defined(SIMDE_X86_PCLMUL_ENABLE_NATIVE_ALIASES) + #undef _mm_clmulepi64_si128 + #define _mm_clmulepi64_si128(a, b, imm8) simde_mm_clmulepi64_si128(a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_clmulepi64_epi128 (simde__m256i a, simde__m256i b, const int imm8) + SIMDE_REQUIRE_CONSTANT(imm8) { + simde__m256i_private + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b), + r_; + + #if defined(SIMDE_X86_PCLMUL_NATIVE) + switch (imm8 & 0x11) { + case 0x00: + r_.m128i[0] = _mm_clmulepi64_si128(a_.m128i[0], b_.m128i[0], 0x00); + r_.m128i[1] = _mm_clmulepi64_si128(a_.m128i[1], b_.m128i[1], 0x00); + break; + case 0x01: + r_.m128i[0] = _mm_clmulepi64_si128(a_.m128i[0], b_.m128i[0], 0x01); + r_.m128i[1] = _mm_clmulepi64_si128(a_.m128i[1], b_.m128i[1], 0x01); + break; + case 0x10: + r_.m128i[0] = _mm_clmulepi64_si128(a_.m128i[0], b_.m128i[0], 0x10); + r_.m128i[1] = _mm_clmulepi64_si128(a_.m128i[1], b_.m128i[1], 0x10); + break; + case 0x11: + r_.m128i[0] = _mm_clmulepi64_si128(a_.m128i[0], b_.m128i[0], 0x11); + r_.m128i[1] = _mm_clmulepi64_si128(a_.m128i[1], b_.m128i[1], 0x11); + break; + } + #else + simde__m128i_private a_lo_, b_lo_, r_lo_, a_hi_, b_hi_, r_hi_; + + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && !defined(HEDLEY_IBM_VERSION) + switch (imm8 & 0x01) { + case 0x00: + a_lo_.u64 = __builtin_shufflevector(a_.u64, a_.u64, 0, 2); + break; + case 0x01: + a_lo_.u64 = __builtin_shufflevector(a_.u64, a_.u64, 1, 3); + break; + } + switch (imm8 & 0x10) { + case 0x00: + b_lo_.u64 = __builtin_shufflevector(b_.u64, b_.u64, 0, 2); + break; + case 0x10: + b_lo_.u64 = __builtin_shufflevector(b_.u64, b_.u64, 1, 3); + break; + } + #else + a_lo_.u64[0] = a_.u64[((imm8 >> 0) & 1) + 0]; + a_lo_.u64[1] = a_.u64[((imm8 >> 0) & 1) + 2]; + b_lo_.u64[0] = b_.u64[((imm8 >> 4) & 1) + 0]; + b_lo_.u64[1] = b_.u64[((imm8 >> 4) & 1) + 2]; + #endif + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_hi_.u64) / sizeof(r_hi_.u64[0])) ; i++) { + a_hi_.u64[i] = simde_x_bitreverse_u64(a_lo_.u64[i]); + b_hi_.u64[i] = simde_x_bitreverse_u64(b_lo_.u64[i]); + + r_lo_.u64[i] = simde_x_clmul_u64(a_lo_.u64[i], b_lo_.u64[i]); + r_hi_.u64[i] = simde_x_clmul_u64(a_hi_.u64[i], b_hi_.u64[i]); + + r_hi_.u64[i] = simde_x_bitreverse_u64(r_hi_.u64[i]) >> 1; + } + + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && !defined(HEDLEY_IBM_VERSION) + r_.u64 = __builtin_shufflevector(r_lo_.u64, r_hi_.u64, 0, 2, 1, 3); + #elif defined(SIMDE_SHUFFLE_VECTOR_) + r_ = simde__m256i_to_private(simde_mm256_set_m128i(simde__m128i_from_private(r_hi_), simde__m128i_from_private(r_lo_))); + r_.u64 = SIMDE_SHUFFLE_VECTOR_(64, 32, r_.u64, r_.u64, 0, 2, 1, 3); + #else + r_.u64[0] = r_lo_.u64[0]; + r_.u64[1] = r_hi_.u64[0]; + r_.u64[2] = r_lo_.u64[1]; + r_.u64[3] = r_hi_.u64[1]; + #endif + #endif + + return simde__m256i_from_private(r_); +} +#if defined(SIMDE_X86_VPCLMULQDQ_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) + #define simde_mm256_clmulepi64_epi128(a, b, imm8) _mm256_clmulepi64_epi128(a, b, imm8) +#endif +#if defined(SIMDE_X86_VPCLMULQDQ_ENABLE_NATIVE_ALIASES) + #undef _mm256_clmulepi64_epi128 + #define _mm256_clmulepi64_epi128(a, b, imm8) simde_mm256_clmulepi64_epi128(a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_clmulepi64_epi128 (simde__m512i a, simde__m512i b, const int imm8) + SIMDE_REQUIRE_CONSTANT(imm8) { + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b), + r_; + + #if defined(HEDLEY_MSVC_VERSION) + r_ = simde__m512i_to_private(simde_mm512_setzero_si512()); + #endif + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + switch (imm8 & 0x11) { + case 0x00: + r_.m256i[0] = simde_mm256_clmulepi64_epi128(a_.m256i[0], b_.m256i[0], 0x00); + r_.m256i[1] = simde_mm256_clmulepi64_epi128(a_.m256i[1], b_.m256i[1], 0x00); + break; + case 0x01: + r_.m256i[0] = simde_mm256_clmulepi64_epi128(a_.m256i[0], b_.m256i[0], 0x01); + r_.m256i[1] = simde_mm256_clmulepi64_epi128(a_.m256i[1], b_.m256i[1], 0x01); + break; + case 0x10: + r_.m256i[0] = simde_mm256_clmulepi64_epi128(a_.m256i[0], b_.m256i[0], 0x10); + r_.m256i[1] = simde_mm256_clmulepi64_epi128(a_.m256i[1], b_.m256i[1], 0x10); + break; + case 0x11: + r_.m256i[0] = simde_mm256_clmulepi64_epi128(a_.m256i[0], b_.m256i[0], 0x11); + r_.m256i[1] = simde_mm256_clmulepi64_epi128(a_.m256i[1], b_.m256i[1], 0x11); + break; + } + #else + simde__m256i_private a_lo_, b_lo_, r_lo_, a_hi_, b_hi_, r_hi_; + + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && !defined(HEDLEY_IBM_VERSION) + switch (imm8 & 0x01) { + case 0x00: + a_lo_.u64 = __builtin_shufflevector(a_.u64, a_.u64, 0, 2, 4, 6); + break; + case 0x01: + a_lo_.u64 = __builtin_shufflevector(a_.u64, a_.u64, 1, 3, 5, 7); + break; + } + switch (imm8 & 0x10) { + case 0x00: + b_lo_.u64 = __builtin_shufflevector(b_.u64, b_.u64, 0, 2, 4, 6); + break; + case 0x10: + b_lo_.u64 = __builtin_shufflevector(b_.u64, b_.u64, 1, 3, 5, 7); + break; + } + #else + a_lo_.u64[0] = a_.u64[((imm8 >> 0) & 1) + 0]; + a_lo_.u64[1] = a_.u64[((imm8 >> 0) & 1) + 2]; + a_lo_.u64[2] = a_.u64[((imm8 >> 0) & 1) + 4]; + a_lo_.u64[3] = a_.u64[((imm8 >> 0) & 1) + 6]; + b_lo_.u64[0] = b_.u64[((imm8 >> 4) & 1) + 0]; + b_lo_.u64[1] = b_.u64[((imm8 >> 4) & 1) + 2]; + b_lo_.u64[2] = b_.u64[((imm8 >> 4) & 1) + 4]; + b_lo_.u64[3] = b_.u64[((imm8 >> 4) & 1) + 6]; + #endif + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_hi_.u64) / sizeof(r_hi_.u64[0])) ; i++) { + a_hi_.u64[i] = simde_x_bitreverse_u64(a_lo_.u64[i]); + b_hi_.u64[i] = simde_x_bitreverse_u64(b_lo_.u64[i]); + + r_lo_.u64[i] = simde_x_clmul_u64(a_lo_.u64[i], b_lo_.u64[i]); + r_hi_.u64[i] = simde_x_clmul_u64(a_hi_.u64[i], b_hi_.u64[i]); + + r_hi_.u64[i] = simde_x_bitreverse_u64(r_hi_.u64[i]) >> 1; + } + + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && !defined(HEDLEY_IBM_VERSION) + r_.u64 = __builtin_shufflevector(r_lo_.u64, r_hi_.u64, 0, 4, 1, 5, 2, 6, 3, 7); + #else + r_.u64[0] = r_lo_.u64[0]; + r_.u64[1] = r_hi_.u64[0]; + r_.u64[2] = r_lo_.u64[1]; + r_.u64[3] = r_hi_.u64[1]; + r_.u64[4] = r_lo_.u64[2]; + r_.u64[5] = r_hi_.u64[2]; + r_.u64[6] = r_lo_.u64[3]; + r_.u64[7] = r_hi_.u64[3]; + #endif + #endif + + return simde__m512i_from_private(r_); +} +#if defined(SIMDE_X86_VPCLMULQDQ_NATIVE) + #define simde_mm512_clmulepi64_epi128(a, b, imm8) _mm512_clmulepi64_epi128(a, b, imm8) +#endif +#if defined(SIMDE_X86_VPCLMULQDQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_clmulepi64_epi128 + #define _mm512_clmulepi64_epi128(a, b, imm8) simde_mm512_clmulepi64_epi128(a, b, imm8) +#endif + +SIMDE_END_DECLS_ + +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_CLMUL_H) */ diff --git a/lib/mmseqs/lib/simde/simde/x86/fma.h b/lib/mmseqs/lib/simde/simde/x86/fma.h index 5f461f0..e43a45d 100644 --- a/lib/mmseqs/lib/simde/simde/x86/fma.h +++ b/lib/mmseqs/lib/simde/simde/x86/fma.h @@ -110,7 +110,7 @@ simde_mm_fmadd_ps (simde__m128 a, simde__m128 b, simde__m128 c) { #elif defined(simde_math_fmaf) && (defined(__FP_FAST_FMAF) || defined(FP_FAST_FMAF)) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = simde_math_fma(a_.f32[i], b_.f32[i], c_.f32[i]); + r_.f32[i] = simde_math_fmaf(a_.f32[i], b_.f32[i], c_.f32[i]); } #else SIMDE_VECTORIZE diff --git a/lib/mmseqs/lib/simde/simde/x86/gfni.h b/lib/mmseqs/lib/simde/simde/x86/gfni.h index c5a5ca8..b4d120b 100644 --- a/lib/mmseqs/lib/simde/simde/x86/gfni.h +++ b/lib/mmseqs/lib/simde/simde/x86/gfni.h @@ -339,7 +339,7 @@ simde_mm_gf2p8affine_epi64_epi8 (simde__m128i x, simde__m128i A, int b) SIMDE_REQUIRE_CONSTANT_RANGE(b, 0, 255) { return simde_mm_xor_si128(simde_x_mm_gf2p8matrix_multiply_epi64_epi8(x, A), simde_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, b))); } -#if defined(SIMDE_X86_GFNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) +#if defined(SIMDE_X86_GFNI_NATIVE) #define simde_mm_gf2p8affine_epi64_epi8(x, A, b) _mm_gf2p8affine_epi64_epi8(x, A, b) #endif #if defined(SIMDE_X86_GFNI_ENABLE_NATIVE_ALIASES) @@ -353,7 +353,7 @@ simde_mm256_gf2p8affine_epi64_epi8 (simde__m256i x, simde__m256i A, int b) SIMDE_REQUIRE_CONSTANT_RANGE(b, 0, 255) { return simde_mm256_xor_si256(simde_x_mm256_gf2p8matrix_multiply_epi64_epi8(x, A), simde_mm256_set1_epi8(HEDLEY_STATIC_CAST(int8_t, b))); } -#if defined(SIMDE_X86_GFNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) +#if defined(SIMDE_X86_GFNI_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) #define simde_mm256_gf2p8affine_epi64_epi8(x, A, b) _mm256_gf2p8affine_epi64_epi8(x, A, b) #endif #if defined(SIMDE_X86_GFNI_ENABLE_NATIVE_ALIASES) @@ -441,7 +441,7 @@ simde_mm_gf2p8affineinv_epi64_epi8 (simde__m128i x, simde__m128i A, int b) SIMDE_REQUIRE_CONSTANT_RANGE(b, 0, 255) { return simde_mm_xor_si128(simde_x_mm_gf2p8matrix_multiply_inverse_epi64_epi8(x, A), simde_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, b))); } -#if defined(SIMDE_X86_GFNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) +#if defined(SIMDE_X86_GFNI_NATIVE) #define simde_mm_gf2p8affineinv_epi64_epi8(x, A, b) _mm_gf2p8affineinv_epi64_epi8(x, A, b) #endif #if defined(SIMDE_X86_GFNI_ENABLE_NATIVE_ALIASES) @@ -455,7 +455,7 @@ simde_mm256_gf2p8affineinv_epi64_epi8 (simde__m256i x, simde__m256i A, int b) SIMDE_REQUIRE_CONSTANT_RANGE(b, 0, 255) { return simde_mm256_xor_si256(simde_x_mm256_gf2p8matrix_multiply_inverse_epi64_epi8(x, A), simde_mm256_set1_epi8(HEDLEY_STATIC_CAST(int8_t, b))); } -#if defined(SIMDE_X86_GFNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) +#if defined(SIMDE_X86_GFNI_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) #define simde_mm256_gf2p8affineinv_epi64_epi8(x, A, b) _mm256_gf2p8affineinv_epi64_epi8(x, A, b) #endif #if defined(SIMDE_X86_GFNI_ENABLE_NATIVE_ALIASES) @@ -596,7 +596,7 @@ simde__m128i simde_mm_gf2p8mul_epi8 (simde__m128i a, simde__m128i b) { return simde__m128i_from_private(r_); #endif } -#if defined(SIMDE_X86_GFNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) +#if defined(SIMDE_X86_GFNI_NATIVE) #define simde_mm_gf2p8mul_epi8(a, b) _mm_gf2p8mul_epi8(a, b) #endif #if defined(SIMDE_X86_GFNI_ENABLE_NATIVE_ALIASES) @@ -649,7 +649,7 @@ simde_mm256_gf2p8mul_epi8 (simde__m256i a, simde__m256i b) { return simde__m256i_from_private(r_); #endif } -#if defined(SIMDE_X86_GFNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) +#if defined(SIMDE_X86_GFNI_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) #define simde_mm256_gf2p8mul_epi8(a, b) _mm256_gf2p8mul_epi8(a, b) #endif #if defined(SIMDE_X86_GFNI_ENABLE_NATIVE_ALIASES) diff --git a/lib/mmseqs/lib/simde/simde/x86/mmx.h b/lib/mmseqs/lib/simde/simde/x86/mmx.h index c003778..10ae3e6 100644 --- a/lib/mmseqs/lib/simde/simde/x86/mmx.h +++ b/lib/mmseqs/lib/simde/simde/x86/mmx.h @@ -42,6 +42,8 @@ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS #include #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #include +#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + #include #endif #include @@ -51,29 +53,29 @@ SIMDE_BEGIN_DECLS_ typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) - SIMDE_ALIGN(8) int8_t i8 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(8) int16_t i16 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(8) int32_t i32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(8) int64_t i64 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(8) uint8_t u8 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(8) uint16_t u16 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(8) uint32_t u32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(8) uint64_t u64 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(8) simde_float32 f32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(8) int_fast32_t i32f SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(8) uint_fast32_t u32f SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; - #else - SIMDE_ALIGN(8) int8_t i8[8]; - SIMDE_ALIGN(8) int16_t i16[4]; - SIMDE_ALIGN(8) int32_t i32[2]; - SIMDE_ALIGN(8) int64_t i64[1]; - SIMDE_ALIGN(8) uint8_t u8[8]; - SIMDE_ALIGN(8) uint16_t u16[4]; - SIMDE_ALIGN(8) uint32_t u32[2]; - SIMDE_ALIGN(8) uint64_t u64[1]; - SIMDE_ALIGN(8) simde_float32 f32[2]; - SIMDE_ALIGN(8) int_fast32_t i32f[8 / sizeof(int_fast32_t)]; - SIMDE_ALIGN(8) uint_fast32_t u32f[8 / sizeof(uint_fast32_t)]; + SIMDE_ALIGN_TO_8 int8_t i8 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_8 int16_t i16 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_8 int32_t i32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_8 int64_t i64 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_8 uint8_t u8 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_8 uint16_t u16 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_8 uint32_t u32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_8 uint64_t u64 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_8 simde_float32 f32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_8 int_fast32_t i32f SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_8 uint_fast32_t u32f SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + #else + SIMDE_ALIGN_TO_8 int8_t i8[8]; + SIMDE_ALIGN_TO_8 int16_t i16[4]; + SIMDE_ALIGN_TO_8 int32_t i32[2]; + SIMDE_ALIGN_TO_8 int64_t i64[1]; + SIMDE_ALIGN_TO_8 uint8_t u8[8]; + SIMDE_ALIGN_TO_8 uint16_t u16[4]; + SIMDE_ALIGN_TO_8 uint32_t u32[2]; + SIMDE_ALIGN_TO_8 uint64_t u64[1]; + SIMDE_ALIGN_TO_8 simde_float32 f32[2]; + SIMDE_ALIGN_TO_8 int_fast32_t i32f[8 / sizeof(int_fast32_t)]; + SIMDE_ALIGN_TO_8 uint_fast32_t u32f[8 / sizeof(uint_fast32_t)]; #endif #if defined(SIMDE_X86_MMX_USE_NATIVE_TYPE) @@ -90,14 +92,26 @@ typedef union { uint64x1_t neon_u64; float32x2_t neon_f32; #endif + #if defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + int8x8_t mmi_i8; + int16x4_t mmi_i16; + int32x2_t mmi_i32; + int64_t mmi_i64; + uint8x8_t mmi_u8; + uint16x4_t mmi_u16; + uint32x2_t mmi_u32; + uint64_t mmi_u64; + #endif } simde__m64_private; #if defined(SIMDE_X86_MMX_USE_NATIVE_TYPE) typedef __m64 simde__m64; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) typedef int32x2_t simde__m64; +#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + typedef int32x2_t simde__m64; #elif defined(SIMDE_VECTOR_SUBSCRIPT) - typedef int32_t simde__m64 SIMDE_ALIGN(8) SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; + typedef int32_t simde__m64 SIMDE_ALIGN_TO_8 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS; #else typedef simde__m64_private simde__m64; #endif @@ -158,6 +172,17 @@ simde__m64_to_private(simde__m64 v) { SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, float32x2_t, neon, f32) #endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */ +#if defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int8x8_t, mmi, i8) + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int16x4_t, mmi, i16) + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int32x2_t, mmi, i32) + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int64_t, mmi, i64) + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint8x8_t, mmi, u8) + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint16x4_t, mmi, u16) + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint32x2_t, mmi, u32) + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint64_t, mmi, u64) +#endif /* defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) */ + SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_add_pi8 (simde__m64 a, simde__m64 b) { @@ -170,6 +195,8 @@ simde_mm_add_pi8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vadd_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i8 = paddb_s(a_.mmi_i8, b_.mmi_i8); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i8 = a_.i8 + b_.i8; #else @@ -200,6 +227,8 @@ simde_mm_add_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vadd_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = paddh_s(a_.mmi_i16, b_.mmi_i16); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i16 = a_.i16 + b_.i16; #else @@ -230,6 +259,8 @@ simde_mm_add_pi32 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vadd_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = paddw_s(a_.mmi_i32, b_.mmi_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 + b_.i32; #else @@ -261,6 +292,8 @@ simde_mm_adds_pi8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vqadd_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i8 = paddsb(a_.mmi_i8, b_.mmi_i8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -295,6 +328,8 @@ simde_mm_adds_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vqadd_u8(a_.neon_u8, b_.neon_u8); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_u8 = paddusb(a_.mmi_u8, b_.mmi_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { @@ -327,6 +362,8 @@ simde_mm_adds_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vqadd_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = paddsh(a_.mmi_i16, b_.mmi_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -361,6 +398,8 @@ simde_mm_adds_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vqadd_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_u16 = paddush(a_.mmi_u16, b_.mmi_u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -420,6 +459,8 @@ simde_mm_andnot_si64 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbic_s32(b_.neon_i32, a_.neon_i32); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = pandn_sw(a_.mmi_i32, b_.mmi_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = ~a_.i32f & b_.i32f; #else @@ -447,6 +488,8 @@ simde_mm_cmpeq_pi8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vceq_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i8 = pcmpeqb_s(a_.mmi_i8, b_.mmi_i8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -475,6 +518,8 @@ simde_mm_cmpeq_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vceq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = pcmpeqh_s(a_.mmi_i16, b_.mmi_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -503,6 +548,8 @@ simde_mm_cmpeq_pi32 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vceq_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = pcmpeqw_s(a_.mmi_i32, b_.mmi_i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -531,6 +578,8 @@ simde_mm_cmpgt_pi8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vcgt_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i8 = pcmpgtb_s(a_.mmi_i8, b_.mmi_i8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -559,6 +608,8 @@ simde_mm_cmpgt_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vcgt_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = pcmpgth_s(a_.mmi_i16, b_.mmi_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -587,6 +638,8 @@ simde_mm_cmpgt_pi32 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgt_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = pcmpgtw_s(a_.mmi_i32, b_.mmi_i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -729,6 +782,8 @@ simde_mm_madd_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) int32x4_t i1 = vmull_s16(a_.neon_i16, b_.neon_i16); r_.neon_i32 = vpadd_s32(vget_low_s32(i1), vget_high_s32(i1)); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = pmaddhw(a_.mmi_i16, b_.mmi_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i += 2) { @@ -760,6 +815,8 @@ simde_mm_mulhi_pi16 (simde__m64 a, simde__m64 b) { const uint32x4_t t2 = vshrq_n_u32(vreinterpretq_u32_s32(t1), 16); const uint16x4_t t3 = vmovn_u32(t2); r_.neon_u16 = t3; + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = pmulhh(a_.mmi_i16, b_.mmi_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -790,6 +847,8 @@ simde_mm_mullo_pi16 (simde__m64 a, simde__m64 b) { const int32x4_t t1 = vmull_s16(a_.neon_i16, b_.neon_i16); const uint16x4_t t2 = vmovn_u32(vreinterpretq_u32_s32(t1)); r_.neon_u16 = t2; + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = pmullh(a_.mmi_i16, b_.mmi_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -845,6 +904,8 @@ simde_mm_packs_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vqmovn_s16(vcombine_s16(a_.neon_i16, b_.neon_i16)); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i8 = packsshb(a_.mmi_i16, b_.mmi_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -890,6 +951,8 @@ simde_mm_packs_pi32 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vqmovn_s32(vcombine_s32(a_.neon_i32, b_.neon_i32)); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = packsswh(a_.mmi_i32, b_.mmi_i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (8 / sizeof(a_.i32[0])) ; i++) { @@ -950,6 +1013,8 @@ simde_mm_packs_pu16 (simde__m64 a, simde__m64 b) { const int16x8_t values = vorrq_s16(le_max, gt_max); r_.neon_u8 = vmovn_u16(vreinterpretq_u16_s16(values)); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_u8 = packushb(a_.mmi_u16, b_.mmi_u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -1278,7 +1343,7 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_x_mm_load_si64 (const void* mem_addr) { simde__m64 r; - simde_memcpy(&r, SIMDE_ASSUME_ALIGNED_AS(simde__m64, mem_addr), sizeof(r)); + simde_memcpy(&r, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m64), sizeof(r)); return r; } @@ -1293,7 +1358,7 @@ simde_x_mm_loadu_si64 (const void* mem_addr) { SIMDE_FUNCTION_ATTRIBUTES void simde_x_mm_store_si64 (void* mem_addr, simde__m64 value) { - simde_memcpy(SIMDE_ASSUME_ALIGNED_AS(simde__m64, mem_addr), &value, sizeof(value)); + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m64), &value, sizeof(value)); } SIMDE_FUNCTION_ATTRIBUTES @@ -1325,6 +1390,11 @@ simde_mm_sll_pi16 (simde__m64 a, simde__m64 count) { #endif r_.neon_i16 = vshl_s16(a_.neon_i16, vmov_n_s16(HEDLEY_STATIC_CAST(int16_t, vget_lane_u64(count_.neon_u64, 0)))); HEDLEY_DIAGNOSTIC_POP + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_BUG_CLANG_POWER9_16x4_BAD_SHIFT) + if (HEDLEY_UNLIKELY(count_.u64[0] > 15)) + return simde_mm_setzero_si64(); + + r_.i16 = a_.i16 << HEDLEY_STATIC_CAST(int16_t, count_.u64[0]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.i16 = a_.i16 << count_.u64[0]; #else @@ -1397,10 +1467,18 @@ simde_mm_slli_pi16 (simde__m64 a, int count) { simde__m64_private r_; simde__m64_private a_ = simde__m64_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_BUG_CLANG_POWER9_16x4_BAD_SHIFT) + if (HEDLEY_UNLIKELY(count > 15)) + return simde_mm_setzero_si64(); + + r_.i16 = a_.i16 << HEDLEY_STATIC_CAST(int16_t, count); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.i16 = a_.i16 << count; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vshl_s16(a_.neon_i16, vmov_n_s16((int16_t) count)); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = psllh_s(a_.mmi_i16, b_.mmi_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -1430,6 +1508,8 @@ simde_mm_slli_pi32 (simde__m64 a, int count) { r_.i32 = a_.i32 << count; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vshl_s32(a_.neon_i32, vmov_n_s32((int32_t) count)); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = psllw_s(a_.mmi_i32, b_.mmi_i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { @@ -1514,7 +1594,12 @@ simde_mm_srl_pi16 (simde__m64 a, simde__m64 count) { simde__m64_private a_ = simde__m64_to_private(a); simde__m64_private count_ = simde__m64_to_private(count); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_BUG_CLANG_POWER9_16x4_BAD_SHIFT) + if (HEDLEY_UNLIKELY(count_.u64[0] > 15)) + return simde_mm_setzero_si64(); + + r_.i16 = a_.i16 >> HEDLEY_STATIC_CAST(int16_t, count_.u64[0]); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.u16 = a_.u16 >> count_.u64[0]; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vshl_u16(a_.neon_u16, vmov_n_s16(-((int16_t) vget_lane_u64(count_.neon_u64, 0)))); @@ -1587,6 +1672,8 @@ simde_mm_srli_pi16 (simde__m64 a, int count) { r_.u16 = a_.u16 >> count; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vshl_u16(a_.neon_u16, vmov_n_s16(-((int16_t) count))); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = psrlh_s(a_.mmi_i16, b_.mmi_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -1616,6 +1703,8 @@ simde_mm_srli_pi32 (simde__m64 a, int count) { r_.u32 = a_.u32 >> count; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vshl_u32(a_.neon_u32, vmov_n_s32(-((int32_t) count))); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = psrlw_s(a_.mmi_i32, b_.mmi_i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { @@ -1702,7 +1791,9 @@ simde_mm_srai_pi16 (simde__m64 a, int count) { #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.i16 = a_.i16 >> (count & 0xff); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_i16 = vshl_s16(a_.neon_i16, vmov_n_s16(-HEDLEY_STATIC_CAST(int16_t, count)); + r_.neon_i16 = vshl_s16(a_.neon_i16, vmov_n_s16(-HEDLEY_STATIC_CAST(int16_t, count))); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = psrah_s(a_.mmi_i16, count); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -1732,6 +1823,8 @@ simde_mm_srai_pi32 (simde__m64 a, int count) { r_.i32 = a_.i32 >> (count & 0xff); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vshl_s32(a_.neon_i32, vmov_n_s32(-HEDLEY_STATIC_CAST(int32_t, count))); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = psraw_s(a_.mmi_i32, count); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -1822,6 +1915,8 @@ simde_mm_sub_pi8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vsub_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i8 = psubb_s(a_.mmi_i8, b_.mmi_i8); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i8 = a_.i8 - b_.i8; #else @@ -1852,6 +1947,8 @@ simde_mm_sub_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vsub_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = psubh_s(a_.mmi_i16, b_.mmi_i16); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i16 = a_.i16 - b_.i16; #else @@ -1882,6 +1979,8 @@ simde_mm_sub_pi32 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vsub_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = psubw_s(a_.mmi_i32, b_.mmi_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 - b_.i32; #else @@ -1912,6 +2011,8 @@ simde_mm_subs_pi8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vqsub_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i8 = psubsb(a_.mmi_i8, b_.mmi_i8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -1946,6 +2047,8 @@ simde_mm_subs_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vqsub_u8(a_.neon_u8, b_.neon_u8); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_u8 = psubusb(a_.mmi_u8, b_.mmi_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { @@ -1981,6 +2084,8 @@ simde_mm_subs_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vqsub_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = psubsh(a_.mmi_i16, b_.mmi_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -2015,6 +2120,8 @@ simde_mm_subs_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vqsub_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_u16 = psubush(a_.mmi_u16, b_.mmi_u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -2052,6 +2159,8 @@ simde_mm_unpackhi_pi8 (simde__m64 a, simde__m64 b) { r_.neon_i8 = vzip2_s8(a_.neon_i8, b_.neon_i8); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.i8, b_.i8, 4, 12, 5, 13, 6, 14, 7, 15); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i8 = punpckhbh_s(a_.mmi_i8, b_.mmi_i8); #else r_.i8[0] = a_.i8[4]; r_.i8[1] = b_.i8[4]; @@ -2084,6 +2193,8 @@ simde_mm_unpackhi_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i16 = vzip2_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = punpckhhw_s(a_.mmi_i16, b_.mmi_i16); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.i16, b_.i16, 2, 6, 3, 7); #else @@ -2114,6 +2225,8 @@ simde_mm_unpackhi_pi32 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i32 = vzip2_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = punpckhwd_s(a_.mmi_i32, b_.mmi_i32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.i32, b_.i32, 1, 3); #else @@ -2142,6 +2255,8 @@ simde_mm_unpacklo_pi8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i8 = vzip1_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i8 = punpcklbh_s(a_.mmi_i8, b_.mmi_i8); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.i8, b_.i8, 0, 8, 1, 9, 2, 10, 3, 11); #else @@ -2176,6 +2291,8 @@ simde_mm_unpacklo_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i16 = vzip1_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i16 = punpcklhw_s(a_.mmi_i16, b_.mmi_i16); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.i16, b_.i16, 0, 4, 1, 5); #else @@ -2206,6 +2323,8 @@ simde_mm_unpacklo_pi32 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i32 = vzip1_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) + r_.mmi_i32 = punpcklwd_s(a_.mmi_i32, b_.mmi_i32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.i32, b_.i32, 0, 2); #else diff --git a/lib/mmseqs/lib/simde/simde/x86/sse.h b/lib/mmseqs/lib/simde/simde/x86/sse.h index 47c1fbc..d72d9d4 100644 --- a/lib/mmseqs/lib/simde/simde/x86/sse.h +++ b/lib/mmseqs/lib/simde/simde/x86/sse.h @@ -42,71 +42,71 @@ SIMDE_BEGIN_DECLS_ typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) - SIMDE_ALIGN(16) int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #if defined(SIMDE_HAVE_INT128_) - SIMDE_ALIGN(16) simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #endif - SIMDE_ALIGN(16) simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else - SIMDE_ALIGN(16) int8_t i8[16]; - SIMDE_ALIGN(16) int16_t i16[8]; - SIMDE_ALIGN(16) int32_t i32[4]; - SIMDE_ALIGN(16) int64_t i64[2]; - SIMDE_ALIGN(16) uint8_t u8[16]; - SIMDE_ALIGN(16) uint16_t u16[8]; - SIMDE_ALIGN(16) uint32_t u32[4]; - SIMDE_ALIGN(16) uint64_t u64[2]; + SIMDE_ALIGN_TO_16 int8_t i8[16]; + SIMDE_ALIGN_TO_16 int16_t i16[8]; + SIMDE_ALIGN_TO_16 int32_t i32[4]; + SIMDE_ALIGN_TO_16 int64_t i64[2]; + SIMDE_ALIGN_TO_16 uint8_t u8[16]; + SIMDE_ALIGN_TO_16 uint16_t u16[8]; + SIMDE_ALIGN_TO_16 uint32_t u32[4]; + SIMDE_ALIGN_TO_16 uint64_t u64[2]; #if defined(SIMDE_HAVE_INT128_) - SIMDE_ALIGN(16) simde_int128 i128[1]; - SIMDE_ALIGN(16) simde_uint128 u128[1]; + SIMDE_ALIGN_TO_16 simde_int128 i128[1]; + SIMDE_ALIGN_TO_16 simde_uint128 u128[1]; #endif - SIMDE_ALIGN(16) simde_float32 f32[4]; - SIMDE_ALIGN(16) int_fast32_t i32f[16 / sizeof(int_fast32_t)]; - SIMDE_ALIGN(16) uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; + SIMDE_ALIGN_TO_16 simde_float32 f32[4]; + SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)]; + SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; #endif - SIMDE_ALIGN(16) simde__m64_private m64_private[2]; - SIMDE_ALIGN(16) simde__m64 m64[2]; + SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2]; + SIMDE_ALIGN_TO_16 simde__m64 m64[2]; #if defined(SIMDE_X86_SSE_NATIVE) - SIMDE_ALIGN(16) __m128 n; + SIMDE_ALIGN_TO_16 __m128 n; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN(16) int8x16_t neon_i8; - SIMDE_ALIGN(16) int16x8_t neon_i16; - SIMDE_ALIGN(16) int32x4_t neon_i32; - SIMDE_ALIGN(16) int64x2_t neon_i64; - SIMDE_ALIGN(16) uint8x16_t neon_u8; - SIMDE_ALIGN(16) uint16x8_t neon_u16; - SIMDE_ALIGN(16) uint32x4_t neon_u32; - SIMDE_ALIGN(16) uint64x2_t neon_u64; - SIMDE_ALIGN(16) float32x4_t neon_f32; + SIMDE_ALIGN_TO_16 int8x16_t neon_i8; + SIMDE_ALIGN_TO_16 int16x8_t neon_i16; + SIMDE_ALIGN_TO_16 int32x4_t neon_i32; + SIMDE_ALIGN_TO_16 int64x2_t neon_i64; + SIMDE_ALIGN_TO_16 uint8x16_t neon_u8; + SIMDE_ALIGN_TO_16 uint16x8_t neon_u16; + SIMDE_ALIGN_TO_16 uint32x4_t neon_u32; + SIMDE_ALIGN_TO_16 uint64x2_t neon_u64; + SIMDE_ALIGN_TO_16 float32x4_t neon_f32; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - SIMDE_ALIGN(16) float64x2_t neon_f64; + SIMDE_ALIGN_TO_16 float64x2_t neon_f64; #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) - SIMDE_ALIGN(16) v128_t wasm_v128; + SIMDE_ALIGN_TO_16 v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; #endif #endif } simde__m128_private; @@ -120,7 +120,7 @@ typedef union { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128; #elif defined(SIMDE_VECTOR_SUBSCRIPT) - typedef simde_float32 simde__m128 SIMDE_ALIGN(16) SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else typedef simde__m128_private simde__m128; #endif @@ -198,6 +198,8 @@ simde__m128_to_private(simde__m128 v) { SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64) #endif +#elif defined(SIMDE_WASM_SIMD128_NATIVE) + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128); #endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */ enum { @@ -383,12 +385,15 @@ simde_mm_setcsr (uint32_t a) { SIMDE_FUNCTION_ATTRIBUTES simde__m128 -simde_mm_round_ps (simde__m128 a, int rounding) - SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) { +simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding) + SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) + SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) { simde__m128_private r_, a_ = simde__m128_to_private(a); + (void) lax_rounding; + /* For architectures which lack a current direction SIMD instruction. * * Note that NEON actually has a current rounding mode instruction, @@ -406,7 +411,7 @@ simde_mm_round_ps (simde__m128 a, int rounding) case SIMDE_MM_FROUND_CUR_DIRECTION: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32)); - #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_f32 = vrndiq_f32(a_.neon_f32); #elif defined(simde_math_nearbyintf) SIMDE_VECTORIZE @@ -419,14 +424,14 @@ simde_mm_round_ps (simde__m128 a, int rounding) break; case SIMDE_MM_FROUND_TO_NEAREST_INT: - #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32)); - #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 - r_.neon_f32 = vrndaq_f32(a_.neon_f32); - #elif defined(simde_math_roundf) + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32)); + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) + r_.neon_f32 = vrndnq_f32(a_.neon_f32); + #elif defined(simde_math_roundevenf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = simde_math_roundf(a_.f32[i]); + r_.f32[i] = simde_math_roundevenf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); @@ -436,7 +441,7 @@ simde_mm_round_ps (simde__m128 a, int rounding) case SIMDE_MM_FROUND_TO_NEG_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32)); - #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndmq_f32(a_.neon_f32); #elif defined(simde_math_floorf) SIMDE_VECTORIZE @@ -451,7 +456,7 @@ simde_mm_round_ps (simde__m128 a, int rounding) case SIMDE_MM_FROUND_TO_POS_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32)); - #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndpq_f32(a_.neon_f32); #elif defined(simde_math_ceilf) SIMDE_VECTORIZE @@ -466,7 +471,7 @@ simde_mm_round_ps (simde__m128 a, int rounding) case SIMDE_MM_FROUND_TO_ZERO: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32)); - #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndq_f32(a_.neon_f32); #elif defined(simde_math_truncf) SIMDE_VECTORIZE @@ -485,10 +490,12 @@ simde_mm_round_ps (simde__m128 a, int rounding) return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE4_1_NATIVE) - #define simde_mm_round_ps(a, rounding) _mm_round_ps(a, rounding) + #define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding)) +#else + #define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) - #define _mm_round_ps(a, rounding) simde_mm_round_ps(a, rounding) + #define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -500,7 +507,7 @@ simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_flo simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN(16) simde_float32 data[4] = { e0, e1, e2, e3 }; + SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 }; r_.neon_f32 = vld1q_f32(data); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3); @@ -777,7 +784,10 @@ simde_mm_or_ps (simde__m128 a, simde__m128 b) { SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_not_ps(simde__m128 a) { - #if defined(SIMDE_X86_SSE2_NATIVE) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + __m128i ai = _mm_castps_si128(a); + return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55)); + #elif defined(SIMDE_X86_SSE2_NATIVE) /* Note: we use ints instead of floats because we don't want cmpeq * to return false for (NaN, NaN) */ __m128i ai = _mm_castps_si128(a); @@ -1264,6 +1274,9 @@ simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) { Linux Compiler Reference, Version 16.1.1) shows that it should be present. Both GCC and clang support it. */ r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32)); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); + r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); #else @@ -1398,6 +1411,11 @@ simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) { uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vandq_u32(ceqaa, ceqbb); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), + vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -1431,6 +1449,15 @@ simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) { uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), + vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), + vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); + r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -1687,7 +1714,6 @@ simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) { #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; - #else r_.f32[0] = (simde_float32) b_.i32[0]; r_.f32[1] = (simde_float32) b_.i32[1]; @@ -1714,7 +1740,8 @@ simde_mm_cvt_ps2pi (simde__m128 a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); - #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(__clang__) && 0 + #elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128) + a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else a_ = simde__m128_to_private(a); @@ -1763,11 +1790,17 @@ int32_t simde_mm_cvt_ss2si (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_ss2si(a); - #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399) return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0); #else simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); - return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + return ((a_.f32[0] > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && + (a_.f32[0] < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? + SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]) : INT32_MIN; + #else + return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); + #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) @@ -1783,8 +1816,8 @@ simde_mm_cvtpi16_ps (simde__m64 a) { simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && 0 /* TODO */ - r_.neon_f32 = vmovl_s16(vget_low_s16(vuzp1q_s16(a_.neon_i16, vmovq_n_s16(0)))); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16); #else @@ -1921,12 +1954,18 @@ simde_mm_cvtps_pi32 (simde__m128 a) { simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); - #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i])); + simde_float32 v = simde_math_roundf(a_.f32[i]); + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? + SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; + #else + r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v); + #endif } #endif @@ -2038,11 +2077,8 @@ simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else + r_ = a_; r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); - SIMDE_VECTORIZE - for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = a_.i32[i]; - } #endif return simde__m128_from_private(r_); @@ -2137,14 +2173,18 @@ simde_mm_cvtt_ps2pi (simde__m128 a) { simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); - #elif defined(SIMDE_CONVERT_VECTOR_) - SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]); + simde_float32 v = a_.f32[i]; + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? + SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; + #else + r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v); + #endif } #endif @@ -2165,10 +2205,16 @@ simde_mm_cvtt_ss2si (simde__m128 a) { #else simde__m128_private a_ = simde__m128_to_private(a); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0)); #else - return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); + simde_float32 v = a_.f32[0]; + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + return ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? + SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; + #else + return SIMDE_CONVERT_FTOI(int32_t, v); + #endif #endif #endif } @@ -2248,7 +2294,9 @@ simde_mm_div_ps (simde__m128 a, simde__m128 b) { float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32)); r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 / b_.f32; #else @@ -2305,8 +2353,7 @@ simde_mm_extract_pi16 (simde__m64 a, const int imm8) return a_.i16[imm8]; } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) -# if HEDLEY_HAS_WARNING("-Wvector-conversion") - /* https://bugs.llvm.org/show_bug.cgi?id=44589 */ +# if defined(SIMDE_BUG_CLANG_44589) # define simde_mm_extract_pi16(a, imm8) ( \ HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ @@ -2339,8 +2386,7 @@ simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8) return simde__m64_from_private(r_); } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) -# if HEDLEY_HAS_WARNING("-Wvector-conversion") - /* https://bugs.llvm.org/show_bug.cgi?id=44589 */ +# if defined(SIMDE_BUG_CLANG_44589) # define ssimde_mm_insert_pi16(a, i, imm8) ( \ HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ @@ -2362,8 +2408,6 @@ simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { - simde_assert_aligned(16, mem_addr); - #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps(mem_addr); #else @@ -2376,7 +2420,7 @@ simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_ld(0, mem_addr); #else - r_ = *SIMDE_ALIGN_CAST(simde__m128_private const*, mem_addr); + simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_)); #endif return simde__m128_from_private(r_); @@ -2388,7 +2432,7 @@ simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { SIMDE_FUNCTION_ATTRIBUTES simde__m128 -simde_mm_load_ps1 (simde_float32 const* mem_addr) { +simde_mm_load1_ps (simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps1(mem_addr); #else @@ -2403,10 +2447,10 @@ simde_mm_load_ps1 (simde_float32 const* mem_addr) { return simde__m128_from_private(r_); #endif } -#define simde_mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr) +#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) -# define _mm_load_ps1(mem_addr) simde_mm_load_ps1(mem_addr) -# define _mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr) +# define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr) +# define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2457,7 +2501,11 @@ simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) { #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) -# define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr)) + #if HEDLEY_HAS_WARNING("-Wold-style-cast") + #define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr))) + #else + #define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr)) + #endif #endif /* The SSE documentation says that there are no alignment requirements @@ -2495,14 +2543,16 @@ simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) { #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) -# define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr)) + #if HEDLEY_HAS_WARNING("-Wold-style-cast") + #define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr))) + #else + #define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr)) + #endif #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { - simde_assert_aligned(16, mem_addr); - #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadr_ps(mem_addr); #else @@ -2510,21 +2560,19 @@ simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { r_, v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr)); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_f32 = vrev64q_f32(v_.neon_f32); - r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && 0 - /* TODO: XLC documentation has it, but it doesn't seem to work. - * More investigation is necessary. */ - r_.altivec_f32 = vec_reve(a_.altivec_f32); - #elif defined(SIMDE_SHUFFLE_VECTOR_) - r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0); - #else - r_.f32[0] = v_.f32[3]; - r_.f32[1] = v_.f32[2]; - r_.f32[2] = v_.f32[1]; - r_.f32[3] = v_.f32[0]; - #endif + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vrev64q_f32(v_.neon_f32); + r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__) + r_.altivec_f32 = vec_reve(v_.altivec_f32); + #elif defined(SIMDE_SHUFFLE_VECTOR_) + r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0); + #else + r_.f32[0] = v_.f32[3]; + r_.f32[1] = v_.f32[2]; + r_.f32[2] = v_.f32[1]; + r_.f32[3] = v_.f32[0]; + #endif return simde__m128_from_private(r_); #endif @@ -2545,11 +2593,10 @@ simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_load(mem_addr); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__) + r_.altivec_f32 = vec_vsx_ld(0, mem_addr); #else - r_.f32[0] = mem_addr[0]; - r_.f32[1] = mem_addr[1]; - r_.f32[2] = mem_addr[2]; - r_.f32[3] = mem_addr[3]; + simde_memcpy(&r_, mem_addr, sizeof(r_)); #endif return simde__m128_from_private(r_); @@ -2621,12 +2668,18 @@ simde_mm_max_ps (simde__m128 a, simde__m128 b) { a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS) r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_NANS) r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -2855,6 +2908,9 @@ simde_mm_movehl_ps (simde__m128 a, simde__m128 b) { float32x2_t a32 = vget_high_f32(a_.neon_f32); float32x2_t b32 = vget_high_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(b32, a32); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), + vec_mergel(b_.altivec_i64, a_.altivec_i64)); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3); #else @@ -2888,6 +2944,9 @@ simde_mm_movelh_ps (simde__m128 a, simde__m128 b) { r_.neon_f32 = vcombine_f32(a10, b10); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), + vec_mergeh(a_.altivec_i64, b_.altivec_i64)); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; @@ -2988,6 +3047,8 @@ simde_mm_mul_ps (simde__m128 a, simde__m128 b) { r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 * b_.f32; + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -3059,6 +3120,50 @@ simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) { # define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #endif +#if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION) + #define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0) + #define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1) + #define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2) + #define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3) + #define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4) + #define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5) + #define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6) + #define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7) +#else + #define SIMDE_MM_HINT_NTA 0 + #define SIMDE_MM_HINT_T0 1 + #define SIMDE_MM_HINT_T1 2 + #define SIMDE_MM_HINT_T2 3 + #define SIMDE_MM_HINT_ENTA 4 + #define SIMDE_MM_HINT_ET0 5 + #define SIMDE_MM_HINT_ET1 6 + #define SIMDE_MM_HINT_ET2 7 +#endif + +#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) + HEDLEY_DIAGNOSTIC_PUSH + #if HEDLEY_HAS_WARNING("-Wreserved-id-macro") + _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"") + #endif + #undef _MM_HINT_NTA + #define _MM_HINT_NTA SIMDE_MM_HINT_NTA + #undef _MM_HINT_T0 + #define _MM_HINT_T0 SIMDE_MM_HINT_T0 + #undef _MM_HINT_T1 + #define _MM_HINT_T1 SIMDE_MM_HINT_T1 + #undef _MM_HINT_T2 + #define _MM_HINT_T2 SIMDE_MM_HINT_T2 + #undef _MM_HINT_ETNA + #define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA + #undef _MM_HINT_ET0 + #define _MM_HINT_ET0 SIMDE_MM_HINT_ET0 + #undef _MM_HINT_ET1 + #define _MM_HINT_ET1 SIMDE_MM_HINT_ET1 + #undef _MM_HINT_ET1 + #define _MM_HINT_ET2 SIMDE_MM_HINT_ET2 + HEDLEY_DIAGNOSTIC_POP +#endif + SIMDE_FUNCTION_ATTRIBUTES void simde_mm_prefetch (char const* p, int i) { @@ -3071,10 +3176,20 @@ simde_mm_prefetch (char const* p, int i) { (void) i; } #if defined(SIMDE_X86_SSE_NATIVE) -# define simde_mm_prefetch(p, i) _mm_prefetch(p, i) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */ + #define simde_mm_prefetch(p, i) \ + (__extension__({ \ + HEDLEY_DIAGNOSTIC_PUSH \ + HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ + _mm_prefetch((p), (i)); \ + HEDLEY_DIAGNOSTIC_POP \ + })) + #else + #define simde_mm_prefetch(p, i) _mm_prefetch(p, i) + #endif #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) -# define _mm_prefetch(p, i) simde_mm_prefetch(p, i) + #define _mm_prefetch(p, i) simde_mm_prefetch(p, i) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -3094,6 +3209,8 @@ simde_x_mm_negate_ps(simde__m128 a) { r_.neon_f32 = vnegq_f32(a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_VECTOR_NEGATE) r_.f32 = -a_.f32; #else @@ -3195,6 +3312,8 @@ simde_mm_rsqrt_ps (simde__m128 a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrsqrteq_f32(a_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = vec_rsqrte(a_.altivec_f32); #elif defined(SIMDE_IEEE754_STORAGE) /* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf Pages 100 - 103 */ @@ -3502,6 +3621,22 @@ HEDLEY_DIAGNOSTIC_POP #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) # define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8) +#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_mm_shuffle_ps(a, b, imm8) \ + __extension__({ \ + float32x4_t ret; \ + ret = vmovq_n_f32( \ + vgetq_lane_f32(a, (imm8) & (0x3))); \ + ret = vsetq_lane_f32( \ + vgetq_lane_f32(a, ((imm8) >> 2) & 0x3), \ + ret, 1); \ + ret = vsetq_lane_f32( \ + vgetq_lane_f32(b, ((imm8) >> 4) & 0x3), \ + ret, 2); \ + ret = vsetq_lane_f32( \ + vgetq_lane_f32(b, ((imm8) >> 6) & 0x3), \ + ret, 3); \ + }) #elif defined(SIMDE_SHUFFLE_VECTOR_) # define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \ simde__m128_from_private((simde__m128_private) { .f32 = \ @@ -3554,6 +3689,8 @@ simde_mm_sqrt_ps (simde__m128 a) { r_.neon_f32 = vmulq_f32(a_.neon_f32, est); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f32 = vec_sqrt(a_.altivec_f32); #elif defined(simde_math_sqrt) SIMDE_VECTORIZE for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) { @@ -3605,8 +3742,6 @@ simde_mm_sqrt_ss (simde__m128 a) { SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) { - simde_assert_aligned(16, mem_addr); - #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps(mem_addr, a); #else @@ -3614,17 +3749,12 @@ simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); - #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - vec_vsx_st(a_.altivec_f32, 0, mem_addr); - #elif defined(SIMDE_POWER_ALTIVEC_P5_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(a_.altivec_f32, 0, mem_addr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) wasm_v128_store(mem_addr, a_.wasm_v128); #else - SIMDE_VECTORIZE_ALIGNED(mem_addr:16) - for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { - mem_addr[i] = a_.f32[i]; - } + simde_memcpy(mem_addr, &a_, sizeof(a)); #endif #endif } @@ -3634,29 +3764,36 @@ simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) { SIMDE_FUNCTION_ATTRIBUTES void -simde_mm_store_ps1 (simde_float32 mem_addr[4], simde__m128 a) { - simde_assert_aligned(16, mem_addr); - -#if defined(SIMDE_X86_SSE_NATIVE) - _mm_store_ps1(mem_addr, a); -#else - simde__m128_private a_ = simde__m128_to_private(a); +simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) { + simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 0); - mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 0); - mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 0); - mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0); + #if defined(SIMDE_X86_SSE_NATIVE) + _mm_store_ps1(mem_addr_, a); #else - SIMDE_VECTORIZE_ALIGNED(mem_addr:16) - for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { - mem_addr[i] = a_.f32[0]; - } + simde__m128_private a_ = simde__m128_to_private(a); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(mem_addr_, wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_); + #elif defined(SIMDE_SHUFFLE_VECTOR_) + simde__m128_private tmp_; + tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0); + simde_mm_store_ps(mem_addr_, tmp_.f32); + #else + SIMDE_VECTORIZE_ALIGNED(mem_addr_:16) + for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { + mem_addr_[i] = a_.f32[0]; + } + #endif #endif -#endif } +#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) -# define _mm_store_ps1(mem_addr, a) simde_mm_store_ps1(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) +# define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) +# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -3678,35 +3815,18 @@ simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) { # define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif -SIMDE_FUNCTION_ATTRIBUTES -void -simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) { - simde_assert_aligned(16, mem_addr); - -#if defined(SIMDE_X86_SSE_NATIVE) - _mm_store1_ps(mem_addr, a); -#else - simde_mm_store_ps1(mem_addr, a); -#endif -} -#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) -# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) -#endif - SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else - simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr); simde__m128_private a_ = simde__m128_to_private(a); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - dest_->f32[0] = vgetq_lane_f32(a_.neon_f32, 2); - dest_->f32[1] = vgetq_lane_f32(a_.neon_f32, 3); + vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32)); #else - dest_->f32[0] = a_.f32[2]; - dest_->f32[1] = a_.f32[3]; + simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1])); #endif #endif } @@ -3738,28 +3858,26 @@ simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) { SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) { - simde_assert_aligned(16, mem_addr); - -#if defined(SIMDE_X86_SSE_NATIVE) - _mm_storer_ps(mem_addr, a); -#else - simde__m128_private a_ = simde__m128_to_private(a); - - #if defined(SIMDE_SHUFFLE_VECTOR_) - a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0); - simde_mm_store_ps(mem_addr, simde__m128_from_private(a_)); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 3); - mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 2); - mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 1); - mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0); + #if defined(SIMDE_X86_SSE_NATIVE) + _mm_storer_ps(mem_addr, a); #else - SIMDE_VECTORIZE_ALIGNED(mem_addr:16) - for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { - mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i]; - } + simde__m128_private a_ = simde__m128_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + vec_st(vec_reve(a_.altivec_f32), 0, mem_addr); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x4_t tmp = vrev64q_f32(a_.neon_f32); + vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) + a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0); + simde_mm_store_ps(mem_addr, simde__m128_from_private(a_)); + #else + SIMDE_VECTORIZE_ALIGNED(mem_addr:16) + for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { + mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i]; + } + #endif #endif -#endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) @@ -3775,6 +3893,8 @@ simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + vec_vsx_st(a_.altivec_f32, 0, mem_addr); #else simde_memcpy(mem_addr, &a_, sizeof(a_)); #endif @@ -3799,6 +3919,8 @@ simde_mm_sub_ps (simde__m128 a, simde__m128 b) { r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 - b_.f32; #else @@ -4107,6 +4229,8 @@ simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) @@ -4152,19 +4276,14 @@ simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) { SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) { - simde_assert_aligned(16, mem_addr); - -#if defined(SIMDE_X86_SSE_NATIVE) - _mm_stream_ps(mem_addr, a); -#else - simde__m128_private a_ = simde__m128_to_private(a); - -#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - vst1q_f32(SIMDE_ASSUME_ALIGNED(16, mem_addr), a_.neon_f32); -#else - simde_memcpy(SIMDE_ASSUME_ALIGNED(16, mem_addr), &a_, sizeof(a_)); -#endif -#endif + #if defined(SIMDE_X86_SSE_NATIVE) + _mm_stream_ps(mem_addr, a); + #elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + simde__m128_private a_ = simde__m128_to_private(a); + __builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr)); + #else + simde_mm_store_ps(mem_addr, a); + #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) diff --git a/lib/mmseqs/lib/simde/simde/x86/sse2.h b/lib/mmseqs/lib/simde/simde/x86/sse2.h index 0c2be44..10a5a69 100644 --- a/lib/mmseqs/lib/simde/simde/x86/sse2.h +++ b/lib/mmseqs/lib/simde/simde/x86/sse2.h @@ -40,160 +40,160 @@ SIMDE_BEGIN_DECLS_ typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) - SIMDE_ALIGN(16) int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #if defined(SIMDE_HAVE_INT128_) - SIMDE_ALIGN(16) simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - #endif - SIMDE_ALIGN(16) simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) simde_float64 f64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - - SIMDE_ALIGN(16) int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - #else - SIMDE_ALIGN(16) int8_t i8[16]; - SIMDE_ALIGN(16) int16_t i16[8]; - SIMDE_ALIGN(16) int32_t i32[4]; - SIMDE_ALIGN(16) int64_t i64[2]; - SIMDE_ALIGN(16) uint8_t u8[16]; - SIMDE_ALIGN(16) uint16_t u16[8]; - SIMDE_ALIGN(16) uint32_t u32[4]; - SIMDE_ALIGN(16) uint64_t u64[2]; + SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + #endif + SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 simde_float64 f64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + + SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + #else + SIMDE_ALIGN_TO_16 int8_t i8[16]; + SIMDE_ALIGN_TO_16 int16_t i16[8]; + SIMDE_ALIGN_TO_16 int32_t i32[4]; + SIMDE_ALIGN_TO_16 int64_t i64[2]; + SIMDE_ALIGN_TO_16 uint8_t u8[16]; + SIMDE_ALIGN_TO_16 uint16_t u16[8]; + SIMDE_ALIGN_TO_16 uint32_t u32[4]; + SIMDE_ALIGN_TO_16 uint64_t u64[2]; #if defined(SIMDE_HAVE_INT128_) - SIMDE_ALIGN(16) simde_int128 i128[1]; - SIMDE_ALIGN(16) simde_uint128 u128[1]; + SIMDE_ALIGN_TO_16 simde_int128 i128[1]; + SIMDE_ALIGN_TO_16 simde_uint128 u128[1]; #endif - SIMDE_ALIGN(16) simde_float32 f32[4]; - SIMDE_ALIGN(16) simde_float64 f64[2]; + SIMDE_ALIGN_TO_16 simde_float32 f32[4]; + SIMDE_ALIGN_TO_16 simde_float64 f64[2]; - SIMDE_ALIGN(16) int_fast32_t i32f[16 / sizeof(int_fast32_t)]; - SIMDE_ALIGN(16) uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; + SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)]; + SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; #endif - SIMDE_ALIGN(16) simde__m64_private m64_private[2]; - SIMDE_ALIGN(16) simde__m64 m64[2]; + SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2]; + SIMDE_ALIGN_TO_16 simde__m64 m64[2]; #if defined(SIMDE_X86_SSE2_NATIVE) - SIMDE_ALIGN(16) __m128i n; + SIMDE_ALIGN_TO_16 __m128i n; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN(16) int8x16_t neon_i8; - SIMDE_ALIGN(16) int16x8_t neon_i16; - SIMDE_ALIGN(16) int32x4_t neon_i32; - SIMDE_ALIGN(16) int64x2_t neon_i64; - SIMDE_ALIGN(16) uint8x16_t neon_u8; - SIMDE_ALIGN(16) uint16x8_t neon_u16; - SIMDE_ALIGN(16) uint32x4_t neon_u32; - SIMDE_ALIGN(16) uint64x2_t neon_u64; - SIMDE_ALIGN(16) float32x4_t neon_f32; + SIMDE_ALIGN_TO_16 int8x16_t neon_i8; + SIMDE_ALIGN_TO_16 int16x8_t neon_i16; + SIMDE_ALIGN_TO_16 int32x4_t neon_i32; + SIMDE_ALIGN_TO_16 int64x2_t neon_i64; + SIMDE_ALIGN_TO_16 uint8x16_t neon_u8; + SIMDE_ALIGN_TO_16 uint16x8_t neon_u16; + SIMDE_ALIGN_TO_16 uint32x4_t neon_u32; + SIMDE_ALIGN_TO_16 uint64x2_t neon_u64; + SIMDE_ALIGN_TO_16 float32x4_t neon_f32; #if defined(SIMDE_ARCH_AARCH64) - SIMDE_ALIGN(16) float64x2_t neon_f64; + SIMDE_ALIGN_TO_16 float64x2_t neon_f64; #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) - SIMDE_ALIGN(16) v128_t wasm_v128; + SIMDE_ALIGN_TO_16 v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; #if defined(__UINT_FAST32_TYPE__) && defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(__INT_FAST32_TYPE__) altivec_i32f; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(__INT_FAST32_TYPE__) altivec_i32f; #else - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32f; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32f; #endif - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; #if defined(__UINT_FAST32_TYPE__) && defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(__UINT_FAST32_TYPE__) altivec_u32f; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(__UINT_FAST32_TYPE__) altivec_u32f; #else - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32f; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32f; #endif - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; #endif #endif } simde__m128i_private; typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) - SIMDE_ALIGN(16) int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) simde_float64 f64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - SIMDE_ALIGN(16) uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - #else - SIMDE_ALIGN(16) int8_t i8[16]; - SIMDE_ALIGN(16) int16_t i16[8]; - SIMDE_ALIGN(16) int32_t i32[4]; - SIMDE_ALIGN(16) int64_t i64[2]; - SIMDE_ALIGN(16) uint8_t u8[16]; - SIMDE_ALIGN(16) uint16_t u16[8]; - SIMDE_ALIGN(16) uint32_t u32[4]; - SIMDE_ALIGN(16) uint64_t u64[2]; - SIMDE_ALIGN(16) simde_float32 f32[4]; - SIMDE_ALIGN(16) simde_float64 f64[2]; - SIMDE_ALIGN(16) int_fast32_t i32f[16 / sizeof(int_fast32_t)]; - SIMDE_ALIGN(16) uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; - #endif - - SIMDE_ALIGN(16) simde__m64_private m64_private[2]; - SIMDE_ALIGN(16) simde__m64 m64[2]; - - #if defined(SIMDE_X86_SSE2_NATIVE) - SIMDE_ALIGN(16) __m128d n; + SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 simde_float64 f64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + #else + SIMDE_ALIGN_TO_16 int8_t i8[16]; + SIMDE_ALIGN_TO_16 int16_t i16[8]; + SIMDE_ALIGN_TO_16 int32_t i32[4]; + SIMDE_ALIGN_TO_16 int64_t i64[2]; + SIMDE_ALIGN_TO_16 uint8_t u8[16]; + SIMDE_ALIGN_TO_16 uint16_t u16[8]; + SIMDE_ALIGN_TO_16 uint32_t u32[4]; + SIMDE_ALIGN_TO_16 uint64_t u64[2]; + SIMDE_ALIGN_TO_16 simde_float32 f32[4]; + SIMDE_ALIGN_TO_16 simde_float64 f64[2]; + SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)]; + SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; + #endif + + SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2]; + SIMDE_ALIGN_TO_16 simde__m64 m64[2]; + + #if defined(SIMDE_X86_SSE2_NATIVE) + SIMDE_ALIGN_TO_16 __m128d n; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN(16) int8x16_t neon_i8; - SIMDE_ALIGN(16) int16x8_t neon_i16; - SIMDE_ALIGN(16) int32x4_t neon_i32; - SIMDE_ALIGN(16) int64x2_t neon_i64; - SIMDE_ALIGN(16) uint8x16_t neon_u8; - SIMDE_ALIGN(16) uint16x8_t neon_u16; - SIMDE_ALIGN(16) uint32x4_t neon_u32; - SIMDE_ALIGN(16) uint64x2_t neon_u64; - SIMDE_ALIGN(16) float32x4_t neon_f32; + SIMDE_ALIGN_TO_16 int8x16_t neon_i8; + SIMDE_ALIGN_TO_16 int16x8_t neon_i16; + SIMDE_ALIGN_TO_16 int32x4_t neon_i32; + SIMDE_ALIGN_TO_16 int64x2_t neon_i64; + SIMDE_ALIGN_TO_16 uint8x16_t neon_u8; + SIMDE_ALIGN_TO_16 uint16x8_t neon_u16; + SIMDE_ALIGN_TO_16 uint32x4_t neon_u32; + SIMDE_ALIGN_TO_16 uint64x2_t neon_u64; + SIMDE_ALIGN_TO_16 float32x4_t neon_f32; #if defined(SIMDE_ARCH_AARCH64) - SIMDE_ALIGN(16) float64x2_t neon_f64; + SIMDE_ALIGN_TO_16 float64x2_t neon_f64; #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) - SIMDE_ALIGN(16) v128_t wasm_v128; + SIMDE_ALIGN_TO_16 v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; #if defined(__INT_FAST32_TYPE__) && defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(__INT_FAST32_TYPE__) altivec_i32f; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(__INT_FAST32_TYPE__) altivec_i32f; #else - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32f; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32f; #endif - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; #if defined(__UINT_FAST32_TYPE__) && defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(__UINT_FAST32_TYPE__) altivec_u32f; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(__UINT_FAST32_TYPE__) altivec_u32f; #else - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32f; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32f; #endif - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; - SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; #endif #endif } simde__m128d_private; @@ -221,8 +221,8 @@ typedef union { typedef simde__m128d_private simde__m128d; #endif #elif defined(SIMDE_VECTOR_SUBSCRIPT) - typedef int64_t simde__m128i SIMDE_ALIGN(16) SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; - typedef simde_float64 simde__m128d SIMDE_ALIGN(16) SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + typedef int64_t simde__m128i SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + typedef simde_float64 simde__m128d SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else typedef simde__m128i_private simde__m128i; typedef simde__m128d_private simde__m128d; @@ -344,6 +344,9 @@ simde__m128d_to_private(simde__m128d v) { SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, SIMDE_POWER_ALTIVEC_VECTOR(double), altivec, f64) #endif #endif +#elif defined(SIMDE_WASM_SIMD128_NATIVE) + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, v128_t, wasm, v128); + SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, v128_t, wasm, v128); #endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */ SIMDE_FUNCTION_ATTRIBUTES @@ -357,7 +360,7 @@ simde_mm_set_pd (simde_float64 e1, simde_float64 e0) { #if defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f64x2_make(e0, e1); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) - SIMDE_ALIGN(16) simde_float64 data[2] = { e0, e1 }; + SIMDE_ALIGN_TO_16 simde_float64 data[2] = { e0, e1 }; r_.neon_f64 = vld1q_f64(data); #else r_.f64[0] = e0; @@ -430,28 +433,33 @@ simde_x_mm_abs_pd(simde__m128d a) { SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_x_mm_not_pd(simde__m128d a) { - simde__m128d_private - r_, - a_ = simde__m128d_to_private(a); - - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_i32 = vmvnq_s32(a_.neon_i32); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) - r_.altivec_f64 = vec_nor(a_.altivec_f64, a_.altivec_f64); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_v128_not(a_.wasm_v128); - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32f = ~a_.i32f; + #if defined(SIMDE_X86_AVX512VL_NATIVE) + __m128i ai = _mm_castpd_si128(a); + return _mm_castsi128_pd(_mm_ternarylogic_epi64(ai, ai, ai, 0x55)); #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) { - r_.i32f[i] = ~(a_.i32f[i]); - } - #endif + simde__m128d_private + r_, + a_ = simde__m128d_to_private(a); - return simde__m128d_from_private(r_); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vmvnq_s32(a_.neon_i32); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f64 = vec_nor(a_.altivec_f64, a_.altivec_f64); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_v128_not(a_.wasm_v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32f = ~a_.i32f; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) { + r_.i32f[i] = ~(a_.i32f[i]); + } + #endif + + return simde__m128d_from_private(r_); + #endif } SIMDE_FUNCTION_ATTRIBUTES @@ -504,6 +512,8 @@ simde_mm_add_epi8 (simde__m128i a, simde__m128i b) { r_.neon_i8 = vaddq_s8(a_.neon_i8, b_.neon_i8); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i8 = vec_add(a_.altivec_i8, b_.altivec_i8); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i8x16_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i8 = a_.i8 + b_.i8; #else @@ -535,6 +545,8 @@ simde_mm_add_epi16 (simde__m128i a, simde__m128i b) { r_.neon_i16 = vaddq_s16(a_.neon_i16, b_.neon_i16); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i16 = vec_add(a_.altivec_i16, b_.altivec_i16); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i16x8_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i16 = a_.i16 + b_.i16; #else @@ -566,6 +578,8 @@ simde_mm_add_epi32 (simde__m128i a, simde__m128i b) { r_.neon_i32 = vaddq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_add(a_.altivec_i32, b_.altivec_i32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i32x4_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 + b_.i32; #else @@ -597,6 +611,8 @@ simde_mm_add_epi64 (simde__m128i a, simde__m128i b) { r_.neon_i64 = vaddq_s64(a_.neon_i64, b_.neon_i64); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_i64 = vec_add(a_.altivec_i64, b_.altivec_i64); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i64x2_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i64 = a_.i64 + b_.i64; #else @@ -630,6 +646,8 @@ simde_mm_add_pd (simde__m128d a, simde__m128d b) { r_.wasm_v128 = wasm_f64x2_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f64 = vec_add(a_.altivec_f64, b_.altivec_f64); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_f64x2_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f64 = a_.f64 + b_.f64; #else @@ -660,11 +678,13 @@ simde_mm_move_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vsetq_lane_f64(vgetq_lane_f64(b_.neon_f64, 0), a_.neon_f64, 0); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = { - 16, 17, 18, 19, 20, 21, 22, 23, - 8, 9, 10, 11, 12, 13, 14, 15 - }; - r_.altivec_f64 = vec_perm(a_.altivec_f64, b_.altivec_f64, m); + #if defined(HEDLEY_IBM_VERSION) + r_.altivec_f64 = vec_xxpermdi(a_.altivec_f64, b_.altivec_f64, 1); + #else + r_.altivec_f64 = vec_xxpermdi(b_.altivec_f64, a_.altivec_f64, 1); + #endif + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_v64x2_shuffle(a_.wasm_v128, b_.wasm_v128, 2, 1); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 2, 1); #else @@ -684,6 +704,8 @@ simde__m128d simde_mm_add_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_add_sd(a, b); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_add_pd(a, b)); #else simde__m128d_private r_, @@ -693,13 +715,6 @@ simde_mm_add_sd (simde__m128d a, simde__m128d b) { r_.f64[0] = a_.f64[0] + b_.f64[0]; r_.f64[1] = a_.f64[1]; - #if (SIMDE_NATURAL_VECTOR_SIZE > 0) - return simde_mm_move_sd(a, simde_mm_add_pd(a, b)); - #else - r_.f64[0] = a_.f64[0] + b_.f64[0]; - r_.f64[1] = a_.f64[1]; - #endif - return simde__m128d_from_private(r_); #endif } @@ -776,7 +791,6 @@ simde_mm_adds_epi16 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vqaddq_s16(a_.neon_i16, b_.neon_i16); #elif defined(SIMDE_WASM_SIMD128_NATIVE) @@ -815,7 +829,7 @@ simde_mm_adds_epu8 (simde__m128i a, simde__m128i b) { r_.neon_u8 = vqaddq_u8(a_.neon_u8, b_.neon_u8); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_u8x16_add_saturate(a_.wasm_v128, b_.wasm_v128); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_u8 = vec_adds(a_.altivec_u8, b_.altivec_u8); #else SIMDE_VECTORIZE @@ -1113,6 +1127,8 @@ simde_mm_setzero_si128 (void) { r_.neon_i32 = vdupq_n_s32(0); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_splats(HEDLEY_STATIC_CAST(signed int, 0)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i32x4_splat(INT32_C(0)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i32 = __extension__ (__typeof__(r_.i32)) { 0, 0, 0, 0 }; #else @@ -1143,14 +1159,14 @@ simde_mm_bslli_si128 (simde__m128i a, const int imm8) #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_ENDIAN_ORDER) r_.altivec_i8 = - #if (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) - vec_slo - #else /* SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_BIG */ - vec_sro - #endif + #if (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + vec_slo + #else /* SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_BIG */ + vec_sro + #endif (a_.altivec_i8, vec_splats(HEDLEY_STATIC_CAST(unsigned char, imm8 * 8))); - #elif defined(SIMDE_HAVE_INT128_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) && 0 - r_.u128[0] = a_.u128[0] << s; + #elif defined(SIMDE_HAVE_INT128_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + r_.u128[0] = a_.u128[0] << (imm8 * 8); #else r_ = simde__m128i_to_private(simde_mm_setzero_si128()); for (int i = imm8 ; i < HEDLEY_STATIC_CAST(int, sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -1732,8 +1748,8 @@ simde_mm_cmpneq_pd (simde__m128d a, simde__m128d b) { a_ = simde__m128d_to_private(a), b_ = simde__m128d_to_private(b); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_u16 = vmvnq_u16(vceqq_s16(b_.neon_i16, a_.neon_i16)); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u32 = vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(b_.neon_f64, a_.neon_f64))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f64x2_ne(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -2196,6 +2212,32 @@ simde_mm_cmpge_sd (simde__m128d a, simde__m128d b) { #define _mm_cmpge_sd(a, b) simde_mm_cmpge_sd(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_cmpngt_pd (simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_SSE2_NATIVE) + return _mm_cmpngt_pd(a, b); + #else + return simde_mm_cmple_pd(a, b); + #endif +} +#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) + #define _mm_cmpngt_pd(a, b) simde_mm_cmpngt_pd(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_cmpngt_sd (simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI) + return _mm_cmpngt_sd(a, b); + #else + return simde_mm_cmple_sd(a, b); + #endif +} +#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) + #define _mm_cmpngt_sd(a, b) simde_mm_cmpngt_sd(a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_cmpnge_pd (simde__m128d a, simde__m128d b) { @@ -2315,7 +2357,11 @@ simde_mm_cvtsd_f64 (simde__m128d a) { return _mm_cvtsd_f64(a); #else simde__m128d_private a_ = simde__m128d_to_private(a); - return a_.f64[0]; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(simde_float64, vgetq_lane_f64(a_.neon_f64, 0)); + #else + return a_.f64[0]; + #endif #endif } #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) @@ -2469,46 +2515,48 @@ simde_mm_cvtepi32_ps (simde__m128i a) { #endif SIMDE_FUNCTION_ATTRIBUTES -simde__m128i -simde_mm_cvtpd_epi32 (simde__m128d a) { - #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cvtpd_epi32(a); +simde__m64 +simde_mm_cvtpd_pi32 (simde__m128d a) { + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + return _mm_cvtpd_pi32(a); #else - simde__m128i_private r_; + simde__m64_private r_; simde__m128d_private a_ = simde__m128d_to_private(a); SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { - r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_nearbyint(a_.f64[i])); + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + simde_float64 v = simde_math_round(a_.f64[i]); + #if defined(SIMDE_FAST_CONVERSION_RANGE) + r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v); + #else + r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX))) ? + SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; + #endif } - simde_memset(&(r_.m64_private[1]), 0, sizeof(r_.m64_private[1])); - return simde__m128i_from_private(r_); + return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) - #define _mm_cvtpd_epi32(a) simde_mm_cvtpd_epi32(a) + #define _mm_cvtpd_pi32(a) simde_mm_cvtpd_pi32(a) #endif SIMDE_FUNCTION_ATTRIBUTES -simde__m64 -simde_mm_cvtpd_pi32 (simde__m128d a) { - #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_cvtpd_pi32(a); +simde__m128i +simde_mm_cvtpd_epi32 (simde__m128d a) { + #if defined(SIMDE_X86_SSE2_NATIVE) + return _mm_cvtpd_epi32(a); #else - simde__m64_private r_; - simde__m128d_private a_ = simde__m128d_to_private(a); + simde__m128i_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyint(a_.f64[i])); - } + r_.m64[0] = simde_mm_cvtpd_pi32(a); + r_.m64[1] = simde_mm_setzero_si64(); - return simde__m64_from_private(r_); + return simde__m128i_from_private(r_); #endif } #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) - #define _mm_cvtpd_pi32(a) simde_mm_cvtpd_pi32(a) + #define _mm_cvtpd_epi32(a) simde_mm_cvtpd_epi32(a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2574,32 +2622,27 @@ simde_mm_cvtps_epi32 (simde__m128 a) { simde__m128i_private r_; simde__m128_private a_ = simde__m128_to_private(a); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - /* The default rounding mode on SSE is 'round to even', which ArmV7 - does not support! It is supported on ARMv8 however. */ - #if defined(SIMDE_ARCH_AARCH64) - r_.neon_i32 = vcvtnq_s32_f32(a_.neon_f32); - #else - uint32x4_t signmask = vdupq_n_u32(0x80000000); - float32x4_t half = vbslq_f32(signmask, a_.neon_f32, vdupq_n_f32(0.5f)); /* +/- 0.5 */ - int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(a_.neon_f32, half)); /* round to integer: [a + 0.5]*/ - int32x4_t r_trunc = vcvtq_s32_f32(a_.neon_f32); /* truncate to integer: [a] */ - int32x4_t plusone = vshrq_n_s32(vnegq_s32(r_trunc), 31); /* 1 or 0 */ - int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone), vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */ - float32x4_t delta = vsubq_f32(a_.neon_f32, vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */ - uint32x4_t is_delta_half = vceqq_f32(delta, half); /* delta == +/- 0.5 */ - r_.neon_i32 = vbslq_s32(is_delta_half, r_even, r_normal); - #endif - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) + r_.neon_i32 = vcvtnq_s32_f32(a_.neon_f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES) + r_.neon_i32 = vcvtnq_s32_f32(a_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ - r_.altivec_i32 = vec_cts(vec_round(a_.altivec_f32), 0); + r_.altivec_i32 = vec_cts(a_.altivec_f32, 1); HEDLEY_DIAGNOSTIC_POP #else + a_ = simde__m128_to_private(simde_x_mm_round_ps(a, SIMDE_MM_FROUND_TO_NEAREST_INT, 1)); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_roundf(a_.f32[i])); + simde_float32 v = simde_math_roundf(a_.f32[i]); + #if defined(SIMDE_FAST_CONVERSION_RANGE) + r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v); + #else + r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? + SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; + #endif } #endif @@ -2644,7 +2687,14 @@ simde_mm_cvtsd_si32 (simde__m128d a) { return _mm_cvtsd_si32(a); #else simde__m128d_private a_ = simde__m128d_to_private(a); - return SIMDE_CONVERT_FTOI(int32_t, simde_math_round(a_.f64[0])); + + simde_float64 v = simde_math_round(a_.f64[0]); + #if defined(SIMDE_FAST_CONVERSION_RANGE) + return SIMDE_CONVERT_FTOI(int32_t, v); + #else + return ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX))) ? + SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; + #endif #endif } #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) @@ -2699,6 +2749,26 @@ simde_mm_cvtsd_ss (simde__m128 a, simde__m128d b) { #define _mm_cvtsd_ss(a, b) simde_mm_cvtsd_ss(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_x_mm_cvtsi128_si16 (simde__m128i a) { + simde__m128i_private + a_ = simde__m128i_to_private(a); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vgetq_lane_s16(a_.neon_i16, 0); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return HEDLEY_STATIC_CAST(int16_t, wasm_i16x8_extract_lane(a_.wasm_v128, 0)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + #if defined(SIMDE_BUG_GCC_95227) + (void) a_; + #endif + return vec_extract(a_.altivec_i16, 0); + #else + return a_.i16[0]; + #endif +} + SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtsi128_si32 (simde__m128i a) { @@ -2711,7 +2781,7 @@ simde_mm_cvtsi128_si32 (simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vgetq_lane_s32(a_.neon_i32, 0); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_extract_lane(a_.wasm_v128, 0); + return HEDLEY_STATIC_CAST(int32_t, wasm_i32x4_extract_lane(a_.wasm_v128, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) #if defined(SIMDE_BUG_GCC_95227) (void) a_; @@ -2742,7 +2812,7 @@ simde_mm_cvtsi128_si64 (simde__m128i a) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vgetq_lane_s64(a_.neon_i64, 0); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_extract_lane(a_.wasm_v128, 0); + return HEDLEY_STATIC_CAST(int64_t, wasm_i64x2_extract_lane(a_.wasm_v128, 0)); #endif return a_.i64[0]; #endif @@ -2776,6 +2846,29 @@ simde_mm_cvtsi32_sd (simde__m128d a, int32_t b) { #define _mm_cvtsi32_sd(a, b) simde_mm_cvtsi32_sd(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cvtsi16_si128 (int16_t a) { + simde__m128i_private r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vsetq_lane_s16(a, vdupq_n_s16(0), 0); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i16x8_make(a, 0, 0, 0, 0, 0, 0, 0); + #else + r_.i16[0] = a; + r_.i16[1] = 0; + r_.i16[2] = 0; + r_.i16[3] = 0; + r_.i16[4] = 0; + r_.i16[5] = 0; + r_.i16[6] = 0; + r_.i16[7] = 0; + #endif + + return simde__m128i_from_private(r_); +} + SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_cvtsi32_si128 (int32_t a) { @@ -2884,27 +2977,6 @@ simde_mm_cvtss_sd (simde__m128d a, simde__m128 b) { #define _mm_cvtss_sd(a, b) simde_mm_cvtss_sd(a, b) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde__m128i -simde_mm_cvttpd_epi32 (simde__m128d a) { - #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cvttpd_epi32(a); - #else - simde__m128i_private r_; - simde__m128d_private a_ = simde__m128d_to_private(a); - - for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { - r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f64[i]); - } - simde_memset(&(r_.m64_private[1]), 0, sizeof(r_.m64_private[1])); - - return simde__m128i_from_private(r_); - #endif -} -#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) - #define _mm_cvttpd_epi32(a) simde_mm_cvttpd_epi32(a) -#endif - SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvttpd_pi32 (simde__m128d a) { @@ -2914,11 +2986,17 @@ simde_mm_cvttpd_pi32 (simde__m128d a) { simde__m64_private r_; simde__m128d_private a_ = simde__m128d_to_private(a); - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) SIMDE_CONVERT_VECTOR_(r_.i32, a_.f64); #else for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f64[i]); + simde_float64 v = a_.f64[i]; + #if defined(SIMDE_FAST_CONVERSION_RANGE) + r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v); + #else + r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX))) ? + SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; + #endif } #endif @@ -2929,6 +3007,24 @@ simde_mm_cvttpd_pi32 (simde__m128d a) { #define _mm_cvttpd_pi32(a) simde_mm_cvttpd_pi32(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_cvttpd_epi32 (simde__m128d a) { + #if defined(SIMDE_X86_SSE2_NATIVE) + return _mm_cvttpd_epi32(a); + #else + simde__m128i_private r_; + + r_.m64[0] = simde_mm_cvttpd_pi32(a); + r_.m64[1] = simde_mm_setzero_si64(); + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) + #define _mm_cvttpd_epi32(a) simde_mm_cvttpd_epi32(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_cvttps_epi32 (simde__m128 a) { @@ -2938,13 +3034,19 @@ simde_mm_cvttps_epi32 (simde__m128 a) { simde__m128i_private r_; simde__m128_private a_ = simde__m128_to_private(a); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) r_.neon_i32 = vcvtq_s32_f32(a_.neon_f32); - #elif defined(SIMDE_CONVERT_VECTOR_) + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) SIMDE_CONVERT_VECTOR_(r_.i32, a_.f32); #else for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]); + simde_float32 v = a_.f32[i]; + #if defined(SIMDE_FAST_CONVERSION_RANGE) + r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v); + #else + r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? + SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; + #endif } #endif @@ -2962,7 +3064,13 @@ simde_mm_cvttsd_si32 (simde__m128d a) { return _mm_cvttsd_si32(a); #else simde__m128d_private a_ = simde__m128d_to_private(a); - return SIMDE_CONVERT_FTOI(int32_t, a_.f64[0]); + simde_float64 v = a_.f64[0]; + #if defined(SIMDE_FAST_CONVERSION_RANGE) + return SIMDE_CONVERT_FTOI(int32_t, v); + #else + return ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX))) ? + SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; + #endif #endif } #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) @@ -3051,7 +3159,7 @@ simde_mm_div_sd (simde__m128d a, simde__m128d b) { SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_extract_epi16 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 7) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) { uint16_t r; simde__m128i_private a_ = simde__m128i_to_private(a); @@ -3079,7 +3187,7 @@ simde_mm_extract_epi16 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_insert_epi16 (simde__m128i a, int16_t i, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 7) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) { simde__m128i_private a_ = simde__m128i_to_private(a); a_.i16[imm8 & 7] = i; return simde__m128i_from_private(a_); @@ -3096,8 +3204,6 @@ simde_mm_insert_epi16 (simde__m128i a, int16_t i, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_load_pd (simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)]) { - simde_assert_aligned(16, mem_addr); - #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_load_pd(mem_addr); #else @@ -3108,7 +3214,7 @@ simde_mm_load_pd (simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)]) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vld1q_u32(HEDLEY_REINTERPRET_CAST(uint32_t const*, mem_addr)); #else - r_ = *SIMDE_ALIGN_CAST(simde__m128d_private const*, mem_addr); + simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128d), sizeof(r_)); #endif return simde__m128d_from_private(r_); @@ -3120,25 +3226,20 @@ simde_mm_load_pd (simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)]) { SIMDE_FUNCTION_ATTRIBUTES simde__m128d -simde_mm_load_pd1 (simde_float64 const* mem_addr) { +simde_mm_load1_pd (simde_float64 const* mem_addr) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_load1_pd(mem_addr); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return simde__m128d_from_neon_f64(vld1q_dup_f64(mem_addr)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return simde__m128d_from_wasm_v128(wasm_v64x2_load_splat(mem_addr)); #else - simde__m128d_private r_; - - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_f64 = vld1q_dup_f64(mem_addr); - #else - r_.f64[0] = *mem_addr; - r_.f64[1] = *mem_addr; - #endif - - return simde__m128d_from_private(r_); + return simde_mm_set1_pd(*mem_addr); #endif } -#define simde_mm_load1_pd(mem_addr) simde_mm_load_pd1(mem_addr) +#define simde_mm_load_pd1(mem_addr) simde_mm_load1_pd(mem_addr) #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) - #define _mm_load_pd1(mem_addr) simde_mm_load_pd1(mem_addr) + #define _mm_load_pd1(mem_addr) simde_mm_load1_pd(mem_addr) #define _mm_load1_pd(mem_addr) simde_mm_load1_pd(mem_addr) #endif @@ -3167,22 +3268,20 @@ simde_mm_load_sd (simde_float64 const* mem_addr) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_load_si128 (simde__m128i const* mem_addr) { - simde_assert_aligned(16, mem_addr); - #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_load_si128(HEDLEY_REINTERPRET_CAST(__m128i const*, mem_addr)); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) || defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + #else simde__m128i_private r_; #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_ld(0, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(int) const*, mem_addr)); - #else + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vld1q_s32(HEDLEY_REINTERPRET_CAST(int32_t const*, mem_addr)); + #else + simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128i), sizeof(simde__m128i)); #endif return simde__m128i_from_private(r_); - #else - return *mem_addr; #endif } #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) @@ -3269,8 +3368,6 @@ simde_mm_loadl_pd (simde__m128d a, simde_float64 const* mem_addr) { SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_loadr_pd (simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)]) { - simde_assert_aligned(16, mem_addr); - #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_loadr_pd(mem_addr); #else @@ -3278,8 +3375,14 @@ simde_mm_loadr_pd (simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)]) { r_; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - float64x2_t temp = simde_mm_load_pd(mem_addr); - r_.neon_f64 = vcombine_f64(vget_high_f64(temp), vget_low_f64(temp)); + r_.neon_f64 = vld1q_f64(mem_addr); + r_.neon_f64 = vextq_f64(r_.neon_f64, r_.neon_f64, 1); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i64 = vld1q_s64(HEDLEY_REINTERPRET_CAST(int64_t const *, mem_addr)); + r_.neon_i64 = vextq_s64(r_.neon_i64, r_.neon_i64, 1); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t tmp = wasm_v128_load(mem_addr); + r_.wasm_v128 = wasm_v64x2_shuffle(tmp, tmp, 1, 0); #else r_.f64[0] = mem_addr[1]; r_.f64[1] = mem_addr[0]; @@ -3425,12 +3528,19 @@ simde_mm_madd_epi16 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + int32x4_t pl = vmull_s16(vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16)); + int32x4_t ph = vmull_high_s16(a_.neon_i16, b_.neon_i16); + r_.neon_i32 = vpaddq_s32(pl, ph); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) int32x4_t pl = vmull_s16(vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16)); int32x4_t ph = vmull_s16(vget_high_s16(a_.neon_i16), vget_high_s16(b_.neon_i16)); int32x2_t rl = vpadd_s32(vget_low_s32(pl), vget_high_s32(pl)); int32x2_t rh = vpadd_s32(vget_low_s32(ph), vget_high_s32(ph)); r_.neon_i32 = vcombine_s32(rl, rh); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + static const SIMDE_POWER_ALTIVEC_VECTOR(int) tz = { 0, 0, 0, 0 }; + r_.altivec_i32 = vec_msum(a_.altivec_i16, b_.altivec_i16, tz); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i += 2) { @@ -3931,6 +4041,12 @@ simde_mm_mul_epu32 (simde__m128i a, simde__m128i b) { uint32x2_t a_lo = vmovn_u64(a_.neon_u64); uint32x2_t b_lo = vmovn_u64(b_.neon_u64); r_.neon_u64 = vmull_u32(a_lo, b_lo); + #elif defined(SIMDE_SHUFFLE_VECTOR_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + __typeof__(a_.u32) z = { 0, }; + a_.u32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.u32, z, 0, 4, 2, 6); + b_.u32 = SIMDE_SHUFFLE_VECTOR_(32, 16, b_.u32, z, 0, 4, 2, 6); + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u32) * + HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), b_.u32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { @@ -3956,7 +4072,7 @@ simde_x_mm_mul_epi64 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i64 = a_.i64 * b_.i64; #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_f64 = vmulq_f64(a_.neon_f64, b_.neon_f64); + r_.neon_f64 = vmulq_s64(a_.neon_f64, b_.neon_f64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -4057,7 +4173,11 @@ simde_mm_mul_su32 (simde__m64 a, simde__m64 b) { a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); - r_.u64[0] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[0]) * HEDLEY_STATIC_CAST(uint64_t, b_.u32[0]); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.u64[0] = vget_lane_u64(vget_low_u64(vmull_u32(vreinterpret_u32_s64(a_.neon_i64), vreinterpret_u32_s64(b_.neon_i64))), 0); + #else + r_.u64[0] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[0]) * HEDLEY_STATIC_CAST(uint64_t, b_.u32[0]); + #endif return simde__m64_from_private(r_); #endif @@ -4081,11 +4201,16 @@ simde_mm_mulhi_epi16 (simde__m128i a, simde__m128i b) { int16x4_t a3210 = vget_low_s16(a_.neon_i16); int16x4_t b3210 = vget_low_s16(b_.neon_i16); int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */ - int16x4_t a7654 = vget_high_s16(a_.neon_i16); - int16x4_t b7654 = vget_high_s16(b_.neon_i16); - int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */ - uint16x8x2_t rv = vuzpq_u16(vreinterpretq_u16_s32(ab3210), vreinterpretq_u16_s32(ab7654)); - r_.neon_u16 = rv.val[1]; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + int32x4_t ab7654 = vmull_high_s16(a_.neon_i16, b_.neon_i16); + r_.neon_i16 = vuzp2q_s16(vreinterpretq_s16_s32(ab3210), vreinterpretq_s16_s32(ab7654)); + #else + int16x4_t a7654 = vget_high_s16(a_.neon_i16); + int16x4_t b7654 = vget_high_s16(b_.neon_i16); + int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */ + uint16x8x2_t rv = vuzpq_u16(vreinterpretq_u16_s32(ab3210), vreinterpretq_u16_s32(ab7654)); + r_.neon_u16 = rv.val[1]; + #endif #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -4115,12 +4240,16 @@ simde_mm_mulhi_epu16 (simde__m128i a, simde__m128i b) { uint16x4_t a3210 = vget_low_u16(a_.neon_u16); uint16x4_t b3210 = vget_low_u16(b_.neon_u16); uint32x4_t ab3210 = vmull_u16(a3210, b3210); /* 3333222211110000 */ - uint16x4_t a7654 = vget_high_u16(a_.neon_u16); - uint16x4_t b7654 = vget_high_u16(b_.neon_u16); - uint32x4_t ab7654 = vmull_u16(a7654, b7654); /* 7777666655554444 */ - uint16x8x2_t neon_r = - vuzpq_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654)); - r_.neon_u16 = neon_r.val[1]; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + uint32x4_t ab7654 = vmull_high_u16(a_.neon_u16, b_.neon_u16); + r_.neon_u16 = vuzp2q_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654)); + #else + uint16x4_t a7654 = vget_high_u16(a_.neon_u16); + uint16x4_t b7654 = vget_high_u16(b_.neon_u16); + uint32x4_t ab7654 = vmull_u16(a7654, b7654); /* 7777666655554444 */ + uint16x8x2_t neon_r = vuzpq_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654)); + r_.neon_u16 = neon_r.val[1]; + #endif #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -4339,11 +4468,10 @@ simde_mm_sad_epu8 (simde__m128i a, simde__m128i b) { b_ = simde__m128i_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - uint16x8_t t = vpaddlq_u8(vabdq_u8(a_.neon_u8, b_.neon_u8)); - uint16_t r0 = t[0] + t[1] + t[2] + t[3]; - uint16_t r4 = t[4] + t[5] + t[6] + t[7]; - uint16x8_t r = vsetq_lane_u16(r0, vdupq_n_u16(0), 0); - r_.neon_u16 = vsetq_lane_u16(r4, r, 4); + const uint16x8_t t = vpaddlq_u8(vabdq_u8(a_.neon_u8, b_.neon_u8)); + r_.neon_u64 = vcombine_u64( + vpaddl_u32(vpaddl_u16(vget_low_u16(t))), + vpaddl_u32(vpaddl_u16(vget_high_u16(t)))); #else for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { uint16_t tmp = 0; @@ -4382,7 +4510,7 @@ simde_mm_set_epi8 (int8_t e15, int8_t e14, int8_t e13, int8_t e12, e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN_AS(16, int8x16_t) int8_t data[16] = { + SIMDE_ALIGN_LIKE_16(int8x16_t) int8_t data[16] = { e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, @@ -4424,7 +4552,7 @@ simde_mm_set_epi16 (int16_t e7, int16_t e6, int16_t e5, int16_t e4, simde__m128i_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN_AS(16, int16x8_t) int16_t data[8] = { e0, e1, e2, e3, e4, e5, e6, e7 }; + SIMDE_ALIGN_LIKE_16(int16x8_t) int16_t data[8] = { e0, e1, e2, e3, e4, e5, e6, e7 }; r_.neon_i16 = vld1q_s16(data); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i16x8_make(e0, e1, e2, e3, e4, e5, e6, e7); @@ -4446,6 +4574,24 @@ simde_mm_set_epi16 (int16_t e7, int16_t e6, int16_t e5, int16_t e4, #define _mm_set_epi16(e7, e6, e5, e4, e3, e2, e1, e0) simde_mm_set_epi16(e7, e6, e5, e4, e3, e2, e1, e0) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_loadu_si16 (void const* mem_addr) { + #if defined(SIMDE_X86_SSE2_NATIVE) && ( \ + SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \ + HEDLEY_GCC_VERSION_CHECK(11,0,0) || \ + HEDLEY_INTEL_VERSION_CHECK(20,21,1)) + return _mm_loadu_si16(mem_addr); + #else + int16_t val; + simde_memcpy(&val, mem_addr, sizeof(val)); + return simde_x_mm_cvtsi16_si128(val); + #endif +} +#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) + #define _mm_loadu_si16(mem_addr) simde_mm_loadu_si16(mem_addr) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_set_epi32 (int32_t e3, int32_t e2, int32_t e1, int32_t e0) { @@ -4455,7 +4601,7 @@ simde_mm_set_epi32 (int32_t e3, int32_t e2, int32_t e1, int32_t e0) { simde__m128i_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN_AS(16, int32x4_t) int32_t data[4] = { e0, e1, e2, e3 }; + SIMDE_ALIGN_LIKE_16(int32x4_t) int32_t data[4] = { e0, e1, e2, e3 }; r_.neon_i32 = vld1q_s32(data); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i32x4_make(e0, e1, e2, e3); @@ -4473,6 +4619,24 @@ simde_mm_set_epi32 (int32_t e3, int32_t e2, int32_t e1, int32_t e0) { #define _mm_set_epi32(e3, e2, e1, e0) simde_mm_set_epi32(e3, e2, e1, e0) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_loadu_si32 (void const* mem_addr) { + #if defined(SIMDE_X86_SSE2_NATIVE) && ( \ + SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \ + HEDLEY_GCC_VERSION_CHECK(11,0,0) || \ + HEDLEY_INTEL_VERSION_CHECK(20,21,1)) + return _mm_loadu_si32(mem_addr); + #else + int32_t val; + simde_memcpy(&val, mem_addr, sizeof(val)); + return simde_mm_cvtsi32_si128(val); + #endif +} +#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) + #define _mm_loadu_si32(mem_addr) simde_mm_loadu_si32(mem_addr) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_set_epi64 (simde__m64 e1, simde__m64 e0) { @@ -4504,7 +4668,7 @@ simde_mm_set_epi64x (int64_t e1, int64_t e0) { simde__m128i_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN_AS(16, int64x2_t) int64_t data[2] = {e0, e1}; + SIMDE_ALIGN_LIKE_16(int64x2_t) int64_t data[2] = {e0, e1}; r_.neon_i64 = vld1q_s64(data); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i64x2_make(e0, e1); @@ -4520,6 +4684,24 @@ simde_mm_set_epi64x (int64_t e1, int64_t e0) { #define _mm_set_epi64x(e1, e0) simde_mm_set_epi64x(e1, e0) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_loadu_si64 (void const* mem_addr) { + #if defined(SIMDE_X86_SSE2_NATIVE) && ( \ + SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \ + HEDLEY_GCC_VERSION_CHECK(11,0,0) || \ + HEDLEY_INTEL_VERSION_CHECK(20,21,1)) + return _mm_loadu_si64(mem_addr); + #else + int64_t val; + simde_memcpy(&val, mem_addr, sizeof(val)); + return simde_mm_cvtsi64_si128(val); + #endif +} +#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) + #define _mm_loadu_si64(mem_addr) simde_mm_loadu_si64(mem_addr) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_x_mm_set_epu8 (uint8_t e15, uint8_t e14, uint8_t e13, uint8_t e12, @@ -4536,7 +4718,7 @@ simde_x_mm_set_epu8 (uint8_t e15, uint8_t e14, uint8_t e13, uint8_t e12, simde__m128i_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN_AS(16, uint8x16_t) uint8_t data[16] = { + SIMDE_ALIGN_LIKE_16(uint8x16_t) uint8_t data[16] = { e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, @@ -4565,7 +4747,7 @@ simde_x_mm_set_epu16 (uint16_t e7, uint16_t e6, uint16_t e5, uint16_t e4, simde__m128i_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN_AS(16, uint16x8_t) uint16_t data[8] = { e0, e1, e2, e3, e4, e5, e6, e7 }; + SIMDE_ALIGN_LIKE_16(uint16x8_t) uint16_t data[8] = { e0, e1, e2, e3, e4, e5, e6, e7 }; r_.neon_u16 = vld1q_u16(data); #else r_.u16[0] = e0; r_.u16[1] = e1; r_.u16[2] = e2; r_.u16[3] = e3; @@ -4586,7 +4768,7 @@ simde_x_mm_set_epu32 (uint32_t e3, uint32_t e2, uint32_t e1, uint32_t e0) { simde__m128i_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN_AS(16, uint32x4_t) uint32_t data[4] = { e0, e1, e2, e3 }; + SIMDE_ALIGN_LIKE_16(uint32x4_t) uint32_t data[4] = { e0, e1, e2, e3 }; r_.neon_u32 = vld1q_u32(data); #else r_.u32[0] = e0; @@ -4608,7 +4790,7 @@ simde_x_mm_set_epu64x (uint64_t e1, uint64_t e0) { simde__m128i_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - SIMDE_ALIGN_AS(16, uint64x2_t) uint64_t data[2] = {e0, e1}; + SIMDE_ALIGN_LIKE_16(uint64x2_t) uint64_t data[2] = {e0, e1}; r_.neon_u64 = vld1q_u64(data); #else r_.u64[0] = e0; @@ -4727,7 +4909,7 @@ simde_mm_set1_epi64x (int64_t a) { simde__m128i_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_i64 = vmovq_n_s64(a); + r_.neon_i64 = vdupq_n_s64(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i64x2_splat(a); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) @@ -4944,7 +5126,7 @@ simde_x_mm_setone_si128 (void) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_shuffle_epi32 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128i_private r_, a_ = simde__m128i_to_private(a); @@ -4957,6 +5139,23 @@ simde_mm_shuffle_epi32 (simde__m128i a, const int imm8) } #if defined(SIMDE_X86_SSE2_NATIVE) #define simde_mm_shuffle_epi32(a, imm8) _mm_shuffle_epi32((a), (imm8)) +#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_mm_shuffle_epi32(a, imm8) \ + __extension__({ \ + int32x4_t ret; \ + ret = vmovq_n_s32( \ + vgetq_lane_s32(vreinterpretq_s32_s64(a), (imm8) & (0x3))); \ + ret = vsetq_lane_s32( \ + vgetq_lane_s32(vreinterpretq_s32_s64(a), ((imm8) >> 2) & 0x3), \ + ret, 1); \ + ret = vsetq_lane_s32( \ + vgetq_lane_s32(vreinterpretq_s32_s64(a), ((imm8) >> 4) & 0x3), \ + ret, 2); \ + ret = vsetq_lane_s32( \ + vgetq_lane_s32(vreinterpretq_s32_s64(a), ((imm8) >> 6) & 0x3), \ + ret, 3); \ + vreinterpretq_s64_s32(ret); \ + }) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define simde_mm_shuffle_epi32(a, imm8) (__extension__ ({ \ const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \ @@ -4976,7 +5175,7 @@ simde_mm_shuffle_epi32 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_shuffle_pd (simde__m128d a, simde__m128d b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 3) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m128d_private r_, a_ = simde__m128d_to_private(a), @@ -5005,7 +5204,7 @@ simde_mm_shuffle_pd (simde__m128d a, simde__m128d b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_shufflehi_epi16 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128i_private r_, a_ = simde__m128i_to_private(a); @@ -5022,6 +5221,20 @@ simde_mm_shufflehi_epi16 (simde__m128i a, const int imm8) } #if defined(SIMDE_X86_SSE2_NATIVE) #define simde_mm_shufflehi_epi16(a, imm8) _mm_shufflehi_epi16((a), (imm8)) +#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_mm_shufflehi_epi16(a, imm8) \ + __extension__({ \ + int16x8_t ret = vreinterpretq_s16_s64(a); \ + int16x4_t highBits = vget_high_s16(ret); \ + ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm8) & (0x3)), ret, 4); \ + ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm8) >> 2) & 0x3), ret, \ + 5); \ + ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm8) >> 4) & 0x3), ret, \ + 6); \ + ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm8) >> 6) & 0x3), ret, \ + 7); \ + vreinterpretq_s64_s16(ret); \ + }) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define simde_mm_shufflehi_epi16(a, imm8) (__extension__ ({ \ const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \ @@ -5042,7 +5255,7 @@ simde_mm_shufflehi_epi16 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_shufflelo_epi16 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128i_private r_, a_ = simde__m128i_to_private(a); @@ -5059,6 +5272,20 @@ simde_mm_shufflelo_epi16 (simde__m128i a, const int imm8) } #if defined(SIMDE_X86_SSE2_NATIVE) #define simde_mm_shufflelo_epi16(a, imm8) _mm_shufflelo_epi16((a), (imm8)) +#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_mm_shufflelo_epi16(a, imm8) \ + __extension__({ \ + int16x8_t ret = vreinterpretq_s16_s64(a); \ + int16x4_t lowBits = vget_low_s16(ret); \ + ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm8) & (0x3)), ret, 0); \ + ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm8) >> 2) & 0x3), ret, \ + 1); \ + ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm8) >> 4) & 0x3), ret, \ + 2); \ + ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm8) >> 6) & 0x3), ret, \ + 3); \ + vreinterpretq_s64_s16(ret); \ + }) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define simde_mm_shufflelo_epi16(a, imm8) (__extension__ ({ \ const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \ @@ -5456,7 +5683,7 @@ simde_mm_sra_epi32 (simde__m128i a, simde__m128i count) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_slli_epi16 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { if (HEDLEY_UNLIKELY((imm8 > 15))) { return simde_mm_setzero_si128(); } @@ -5507,7 +5734,7 @@ simde_mm_slli_epi16 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_slli_epi32 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { if (HEDLEY_UNLIKELY((imm8 > 31))) { return simde_mm_setzero_si128(); } @@ -5568,7 +5795,7 @@ simde_mm_slli_epi32 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_slli_epi64 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { if (HEDLEY_UNLIKELY((imm8 > 63))) { return simde_mm_setzero_si128(); } @@ -5614,7 +5841,7 @@ simde_mm_slli_epi64 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_srli_epi16 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { if (HEDLEY_UNLIKELY((imm8 > 15))) { return simde_mm_setzero_si128(); } @@ -5663,7 +5890,7 @@ simde_mm_srli_epi16 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_srli_epi32 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { if (HEDLEY_UNLIKELY((imm8 > 31))) { return simde_mm_setzero_si128(); } @@ -5724,7 +5951,7 @@ simde_mm_srli_epi32 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_srli_epi64 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128i_private r_, a_ = simde__m128i_to_private(a); @@ -5774,8 +6001,6 @@ simde_mm_srli_epi64 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128d a) { - simde_assert_aligned(16, mem_addr); - #if defined(SIMDE_X86_SSE2_NATIVE) _mm_store_pd(mem_addr, a); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) @@ -5783,7 +6008,7 @@ simde_mm_store_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128d a #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_s64(HEDLEY_REINTERPRET_CAST(int64_t*, mem_addr), simde__m128d_to_private(a).neon_i64); #else - simde_memcpy(mem_addr, &a, sizeof(a)); + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128d), &a, sizeof(a)); #endif } #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) @@ -5793,8 +6018,6 @@ simde_mm_store_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128d a SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store1_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128d a) { - simde_assert_aligned(16, mem_addr); - #if defined(SIMDE_X86_SSE2_NATIVE) _mm_store1_pd(mem_addr, a); #else @@ -5849,7 +6072,7 @@ simde_mm_store_si128 (simde__m128i* mem_addr, simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_s32(HEDLEY_REINTERPRET_CAST(int32_t*, mem_addr), a_.neon_i32); #else - simde_memcpy(SIMDE_ASSUME_ALIGNED(16, mem_addr), &a_, sizeof(a_)); + simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128i), &a_, sizeof(a_)); #endif #endif } @@ -5930,8 +6153,6 @@ simde_mm_storel_pd (simde_float64* mem_addr, simde__m128d a) { SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storer_pd (simde_float64 mem_addr[2], simde__m128d a) { - simde_assert_aligned(16, mem_addr); - #if defined(SIMDE_X86_SSE2_NATIVE) _mm_storer_pd(mem_addr, a); #else @@ -5982,9 +6203,58 @@ simde_mm_storeu_si128 (simde__m128i* mem_addr, simde__m128i a) { SIMDE_FUNCTION_ATTRIBUTES void -simde_mm_stream_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128d a) { - simde_assert_aligned(16, mem_addr); +simde_mm_storeu_si16 (void* mem_addr, simde__m128i a) { + #if defined(SIMDE_X86_SSE2_NATIVE) && ( \ + SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \ + HEDLEY_GCC_VERSION_CHECK(11,0,0) || \ + HEDLEY_INTEL_VERSION_CHECK(20,21,1)) + _mm_storeu_si16(mem_addr, a); + #else + int16_t val = simde_x_mm_cvtsi128_si16(a); + simde_memcpy(mem_addr, &val, sizeof(val)); + #endif +} +#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) + #define _mm_storeu_si16(mem_addr, a) simde_mm_storeu_si16(mem_addr, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm_storeu_si32 (void* mem_addr, simde__m128i a) { + #if defined(SIMDE_X86_SSE2_NATIVE) && ( \ + SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \ + HEDLEY_GCC_VERSION_CHECK(11,0,0) || \ + HEDLEY_INTEL_VERSION_CHECK(20,21,1)) + _mm_storeu_si32(mem_addr, a); + #else + int32_t val = simde_mm_cvtsi128_si32(a); + simde_memcpy(mem_addr, &val, sizeof(val)); + #endif +} +#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) + #define _mm_storeu_si32(mem_addr, a) simde_mm_storeu_si32(mem_addr, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm_storeu_si64 (void* mem_addr, simde__m128i a) { + #if defined(SIMDE_X86_SSE2_NATIVE) && ( \ + SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \ + HEDLEY_GCC_VERSION_CHECK(11,0,0) || \ + HEDLEY_INTEL_VERSION_CHECK(20,21,1)) + _mm_storeu_si64(mem_addr, a); + #else + int64_t val = simde_mm_cvtsi128_si64(a); + simde_memcpy(mem_addr, &val, sizeof(val)); + #endif +} +#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) + #define _mm_storeu_si64(mem_addr, a) simde_mm_storeu_si64(mem_addr, a) +#endif +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm_stream_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128d a) { #if defined(SIMDE_X86_SSE2_NATIVE) _mm_stream_pd(mem_addr, a); #else @@ -5998,8 +6268,6 @@ simde_mm_stream_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128d SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_si128 (simde__m128i* mem_addr, simde__m128i a) { - simde_assert_aligned(16, mem_addr); - #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64) _mm_stream_si128(HEDLEY_STATIC_CAST(__m128i*, mem_addr), a); #else @@ -6805,6 +7073,8 @@ simde_mm_unpackhi_pd (simde__m128d a, simde__m128d b) { float64x1_t a_l = vget_high_f64(a_.f64); float64x1_t b_l = vget_high_f64(b_.f64); r_.neon_f64 = vcombine_f64(a_l, b_l); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_v64x2_shuffle(a_.wasm_v128, b_.wasm_v128, 1, 3); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 1, 3); #else @@ -7006,8 +7276,8 @@ simde_x_mm_negate_pd(simde__m128d a) { r_.altivec_f64 = vec_neg(a_.altivec_f64); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vnegq_f64(a_.neon_f64); - #elif defined(SIMDE_WASM_SIMD128d_NATIVE) - r_.wasm_v128d = wasm_f64x2_neg(a_.wasm_v128d); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_f64x2_neg(a_.wasm_v128); #elif defined(SIMDE_VECTOR_NEGATE) r_.f64 = -a_.f64; #else @@ -7055,26 +7325,30 @@ simde_mm_xor_si128 (simde__m128i a, simde__m128i b) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_x_mm_not_si128 (simde__m128i a) { - simde__m128i_private - r_, - a_ = simde__m128i_to_private(a); - - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_i32 = vmvnq_s32(a_.neon_i32); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_v128_not(a_.wasm_v128); - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32f = ~a_.i32f; + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_ternarylogic_epi32(a, a, a, 0x55); #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) { - r_.i32f[i] = ~(a_.i32f[i]); - } - #endif + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a); - return simde__m128i_from_private(r_); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vmvnq_s32(a_.neon_i32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_v128_not(a_.wasm_v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32f = ~a_.i32f; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) { + r_.i32f[i] = ~(a_.i32f[i]); + } + #endif + + return simde__m128i_from_private(r_); + #endif } #define SIMDE_MM_SHUFFLE2(x, y) (((x) << 1) | (y)) diff --git a/lib/mmseqs/lib/simde/simde/x86/sse3.h b/lib/mmseqs/lib/simde/simde/x86/sse3.h index 09c94fc..bdf8d10 100644 --- a/lib/mmseqs/lib/simde/simde/x86/sse3.h +++ b/lib/mmseqs/lib/simde/simde/x86/sse3.h @@ -43,6 +43,9 @@ simde_x_mm_deinterleaveeven_epi16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i16 = vuzp1q_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8x2_t t = vuzpq_s16(a_.neon_i16, b_.neon_i16); + r_.neon_i16 = t.val[0]; #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.i16, b_.i16, 0, 2, 4, 6, 8, 10, 12, 14); #else @@ -66,6 +69,9 @@ simde_x_mm_deinterleaveodd_epi16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i16 = vuzp2q_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8x2_t t = vuzpq_s16(a_.neon_i16, b_.neon_i16); + r_.neon_i16 = t.val[1]; #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.i16, b_.i16, 1, 3, 5, 7, 9, 11, 13, 15); #else @@ -89,6 +95,9 @@ simde_x_mm_deinterleaveeven_epi32 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i32 = vuzp1q_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4x2_t t = vuzpq_s32(a_.neon_i32, b_.neon_i32); + r_.neon_i32 = t.val[0]; #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.i32, b_.i32, 0, 2, 4, 6); #else @@ -112,6 +121,9 @@ simde_x_mm_deinterleaveodd_epi32 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i32 = vuzp2q_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4x2_t t = vuzpq_s32(a_.neon_i32, b_.neon_i32); + r_.neon_i32 = t.val[1]; #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.i32, b_.i32, 1, 3, 5, 7); #else @@ -135,6 +147,9 @@ simde_x_mm_deinterleaveeven_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vuzp1q_f32(a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x4x2_t t = vuzpq_f32(a_.neon_f32, b_.neon_f32); + r_.neon_f32 = t.val[0]; #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 2, 4, 6); #else @@ -158,6 +173,9 @@ simde_x_mm_deinterleaveodd_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vuzp2q_f32(a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x4x2_t t = vuzpq_f32(a_.neon_f32, b_.neon_f32); + r_.neon_f32 = t.val[1]; #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 1, 3, 5, 7); #else @@ -284,13 +302,10 @@ simde__m128d simde_mm_hadd_pd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE3_NATIVE) return _mm_hadd_pd(a, b); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return simde__m128d_from_neon_f64(vpaddq_f64(simde__m128d_to_neon_f64(a), simde__m128d_to_neon_f64(b))); #else - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - simde_float64 res[2] = { vaddvq_f64(simde__m128d_to_private(a).neon_f64), vaddvq_f64(simde__m128d_to_private(b).neon_f64)}; - return vld1q_f64(res); - #else - return simde_mm_add_pd(simde_x_mm_deinterleaveeven_pd(a, b), simde_x_mm_deinterleaveodd_pd(a, b)); - #endif + return simde_mm_add_pd(simde_x_mm_deinterleaveeven_pd(a, b), simde_x_mm_deinterleaveodd_pd(a, b)); #endif } #if defined(SIMDE_X86_SSE3_ENABLE_NATIVE_ALIASES) @@ -302,18 +317,13 @@ simde__m128 simde_mm_hadd_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE3_NATIVE) return _mm_hadd_ps(a, b); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return simde__m128_from_neon_f32(vpaddq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b))); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x4x2_t t = vuzpq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)); + return simde__m128_from_neon_f32(vaddq_f32(t.val[0], t.val[1])); #else - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - return vpaddq_f32(simde__m128_to_private(a).neon_f32, simde__m128_to_private(b).neon_f32); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - float32x2_t a10 = vget_low_f32(simde__m128_to_private(a).neon_f32); - float32x2_t a32 = vget_high_f32(simde__m128_to_private(a).neon_f32); - float32x2_t b10 = vget_low_f32(simde__m128_to_private(b).neon_f32); - float32x2_t b32 = vget_high_f32(simde__m128_to_private(b).neon_f32); - return vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32)); - #else - return simde_mm_add_ps(simde_x_mm_deinterleaveeven_ps(a, b), simde_x_mm_deinterleaveodd_ps(a, b)); - #endif + return simde_mm_add_ps(simde_x_mm_deinterleaveeven_ps(a, b), simde_x_mm_deinterleaveodd_ps(a, b)); #endif } #if defined(SIMDE_X86_SSE3_ENABLE_NATIVE_ALIASES) @@ -338,12 +348,11 @@ simde__m128 simde_mm_hsub_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE3_NATIVE) return _mm_hsub_ps(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x4x2_t t = vuzpq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)); + return simde__m128_from_neon_f32(vaddq_f32(t.val[0], vnegq_f32(t.val[1]))); #else - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - return vsubq_f32(vuzp1q_f32(simde__m128_to_private(a).neon_f32, simde__m128_to_private(b).neon_f32), vuzp2q_f32(simde__m128_to_private(a).neon_f32, simde__m128_to_private(b).neon_f32)); - #else - return simde_mm_sub_ps(simde_x_mm_deinterleaveeven_ps(a, b), simde_x_mm_deinterleaveodd_ps(a, b)); - #endif + return simde_mm_sub_ps(simde_x_mm_deinterleaveeven_ps(a, b), simde_x_mm_deinterleaveodd_ps(a, b)); #endif } #if defined(SIMDE_X86_SSE3_ENABLE_NATIVE_ALIASES) @@ -380,7 +389,9 @@ simde_mm_loaddup_pd (simde_float64 const* mem_addr) { simde__m128d_private r_; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_f64 = vsetq_lane_f64(*mem_addr, vsetq_lane_f64(*mem_addr, vdupq_n_f64(0), 0), 1); + r_.neon_f64 = vdupq_n_f64(*mem_addr); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i64 = vdupq_n_s64(*HEDLEY_REINTERPRET_CAST(int64_t const*, mem_addr)); #else r_.f64[0] = *mem_addr; r_.f64[1] = *mem_addr; @@ -403,14 +414,12 @@ simde_mm_movedup_pd (simde__m128d a) { r_, a_ = simde__m128d_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_SHUFFLE_VECTOR_) - r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, a_.f64, 0, 0); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - float64_t a0 = vgetq_lane_f64(a_.neon_f64, 0); - simde_float64 data[2] = { a0, a0 }; - r_.neon_f64 = vld1q_f64(data); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vdupq_laneq_f64(a_.neon_f64, 0); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v64x2_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_SHUFFLE_VECTOR_) + r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, a_.f64, 0, 0); #else r_.f64[0] = a_.f64[0]; r_.f64[1] = a_.f64[0]; @@ -433,15 +442,12 @@ simde_mm_movehdup_ps (simde__m128 a) { r_, a_ = simde__m128_to_private(a); - #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_SHUFFLE_VECTOR_) - r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 1, 1, 3, 3); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - float32_t a1 = vgetq_lane_f32(a_.neon_f32, 1); - float32_t a3 = vgetq_lane_f32(a_.neon_f32, 3); - SIMDE_ALIGN(16) simde_float32 data[4] = { a1, a1, a3, a3 }; - r_.neon_f32 = vld1q_f32(data); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f32 = vtrn2q_f32(a_.neon_f32, a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 1, 1, 3, 3); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_SHUFFLE_VECTOR_) + r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 1, 1, 3, 3); #else r_.f32[0] = a_.f32[1]; r_.f32[1] = a_.f32[1]; @@ -466,15 +472,12 @@ simde_mm_moveldup_ps (simde__m128 a) { r_, a_ = simde__m128_to_private(a); - #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_SHUFFLE_VECTOR_) - r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 2, 2); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - float32_t a0 = vgetq_lane_f32(a_.neon_f32, 0); - float32_t a2 = vgetq_lane_f32(a_.neon_f32, 2); - SIMDE_ALIGN(16) simde_float32 data[4] = { a0, a0, a2, a2 }; - r_.neon_f32 = vld1q_f32(data); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f32 = vtrn1q_f32(a_.neon_f32, a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 2, 2); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_SHUFFLE_VECTOR_) + r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 2, 2); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[0]; diff --git a/lib/mmseqs/lib/simde/simde/x86/sse4.1.h b/lib/mmseqs/lib/simde/simde/x86/sse4.1.h index 5549a99..21c9c0b 100644 --- a/lib/mmseqs/lib/simde/simde/x86/sse4.1.h +++ b/lib/mmseqs/lib/simde/simde/x86/sse4.1.h @@ -41,7 +41,7 @@ SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_blend_epi16 (simde__m128i a, simde__m128i b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128i_private r_, a_ = simde__m128i_to_private(a), @@ -96,7 +96,7 @@ simde_mm_blend_epi16 (simde__m128i a, simde__m128i b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_blend_pd (simde__m128d a, simde__m128d b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 3) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m128d_private r_, a_ = simde__m128d_to_private(a), @@ -138,7 +138,7 @@ simde_mm_blend_pd (simde__m128d a, simde__m128d b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_blend_ps (simde__m128 a, simde__m128 b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 15) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { simde__m128_private r_, a_ = simde__m128_to_private(a), @@ -397,7 +397,7 @@ simde_mm_round_pd (simde__m128d a, int rounding) case SIMDE_MM_FROUND_CUR_DIRECTION: #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_round(a_.altivec_f64)); - #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) && 0 + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vrndiq_f64(a_.neon_f64); #elif defined(simde_math_nearbyint) SIMDE_VECTORIZE @@ -412,12 +412,12 @@ simde_mm_round_pd (simde__m128d a, int rounding) case SIMDE_MM_FROUND_TO_NEAREST_INT: #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_round(a_.altivec_f64)); - #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) && 0 + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vrndaq_f64(a_.neon_f64); - #elif defined(simde_math_round) + #elif defined(simde_math_roundeven) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.f64[i] = simde_math_round(a_.f64[i]); + r_.f64[i] = simde_math_roundeven(a_.f64[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); @@ -427,7 +427,7 @@ simde_mm_round_pd (simde__m128d a, int rounding) case SIMDE_MM_FROUND_TO_NEG_INF: #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_floor(a_.altivec_f64)); - #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) && 0 + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vrndmq_f64(a_.neon_f64); #else SIMDE_VECTORIZE @@ -440,7 +440,7 @@ simde_mm_round_pd (simde__m128d a, int rounding) case SIMDE_MM_FROUND_TO_POS_INF: #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_ceil(a_.altivec_f64)); - #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) && 0 + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vrndpq_f64(a_.neon_f64); #elif defined(simde_math_ceil) SIMDE_VECTORIZE @@ -455,7 +455,7 @@ simde_mm_round_pd (simde__m128d a, int rounding) case SIMDE_MM_FROUND_TO_ZERO: #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_trunc(a_.altivec_f64)); - #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) && 0 + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vrndq_f64(a_.neon_f64); #else SIMDE_VECTORIZE @@ -593,6 +593,8 @@ simde__m128i simde_mm_cvtepi8_epi16 (simde__m128i a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_cvtepi8_epi16(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_srai_epi16(_mm_unpacklo_epi8(a, a), 8); #else simde__m128i_private r_, @@ -602,6 +604,11 @@ simde_mm_cvtepi8_epi16 (simde__m128i a) { int8x16_t s8x16 = a_.neon_i8; /* xxxx xxxx xxxx DCBA */ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */ r_.neon_i16 = s16x8; + #elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_VECTOR_SCALAR) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), SIMDE_SHUFFLE_VECTOR_(8, 16, a_.i8, a_.i8, + -1, 0, -1, 1, -1, 2, -1, 3, + -1, 4, -1, 5, -1, 6, -1, 7)); + r_.i16 >>= 8; #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.i16, a_.m64_private[0].i8); #else @@ -624,6 +631,10 @@ simde__m128i simde_mm_cvtepi8_epi32 (simde__m128i a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_cvtepi8_epi32(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i tmp = _mm_unpacklo_epi8(a, a); + tmp = _mm_unpacklo_epi16(tmp, tmp); + return _mm_srai_epi32(tmp, 24); #else simde__m128i_private r_, @@ -634,6 +645,11 @@ simde_mm_cvtepi8_epi32 (simde__m128i a) { int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000D 000C 000B 000A */ r_.neon_i32 = s32x4; + #elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_VECTOR_SCALAR) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), SIMDE_SHUFFLE_VECTOR_(8, 16, a_.i8, a_.i8, + -1, -1, -1, 0, -1, -1, -1, 1, + -1, -1, -1, 2, -1, -1, -1, 3)); + r_.i32 >>= 24; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -665,6 +681,14 @@ simde_mm_cvtepi8_epi64 (simde__m128i a) { int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */ int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */ r_.neon_i64 = s64x2; + #elif (!defined(SIMDE_ARCH_X86) && !defined(SIMDE_ARCH_AMD64)) && defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_VECTOR_SCALAR) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + /* Disabled on x86 due to lack of 64-bit arithmetic shift until + * until AVX-512 (at which point we would be using the native + * _mm_cvtepi_epi64 anyways). */ + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), SIMDE_SHUFFLE_VECTOR_(8, 16, a_.i8, a_.i8, + -1, -1, -1, -1, -1, -1, -1, 0, + -1, -1, -1, -1, -1, -1, -1, 1)); + r_.i64 >>= 56; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -685,6 +709,8 @@ simde__m128i simde_mm_cvtepu8_epi16 (simde__m128i a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_cvtepu8_epi16(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi8(a, _mm_setzero_si128()); #else simde__m128i_private r_, @@ -694,6 +720,11 @@ simde_mm_cvtepu8_epi16 (simde__m128i a) { uint8x16_t u8x16 = a_.neon_u8; /* xxxx xxxx xxxx DCBA */ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */ r_.neon_u16 = u16x8; + #elif defined(SIMDE_SHUFFLE_VECTOR_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + __typeof__(r_.i8) z = { 0, }; + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), SIMDE_SHUFFLE_VECTOR_(8, 16, a_.i8, z, + 0, 16, 1, 17, 2, 18, 3, 19, + 4, 20, 5, 21, 6, 22, 7, 23)); #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_CLANG_45541) && (!defined(SIMDE_ARCH_POWER) || !defined(__clang__)) SIMDE_CONVERT_VECTOR_(r_.i16, a_.m64_private[0].u8); #else @@ -716,6 +747,14 @@ simde__m128i simde_mm_cvtepu8_epi32 (simde__m128i a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_cvtepu8_epi32(a); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + __m128i s = _mm_set_epi8( + 0x80, 0x80, 0x80, 0x03, 0x80, 0x80, 0x80, 0x02, + 0x80, 0x80, 0x80, 0x01, 0x80, 0x80, 0x80, 0x00); + return _mm_shuffle_epi8(a, s); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i z = _mm_setzero_si128(); + return _mm_unpacklo_epi16(_mm_unpacklo_epi8(a, z), z); #else simde__m128i_private r_, @@ -726,6 +765,11 @@ simde_mm_cvtepu8_epi32 (simde__m128i a) { uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */ r_.neon_u32 = u32x4; + #elif defined(SIMDE_SHUFFLE_VECTOR_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + __typeof__(r_.i8) z = { 0, }; + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), SIMDE_SHUFFLE_VECTOR_(8, 16, a_.i8, z, + 0, 17, 18, 19, 1, 21, 22, 23, + 2, 25, 26, 27, 3, 29, 30, 31)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -746,6 +790,14 @@ simde__m128i simde_mm_cvtepu8_epi64 (simde__m128i a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_cvtepu8_epi64(a); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + __m128i s = _mm_set_epi8( + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00); + return _mm_shuffle_epi8(a, s); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i z = _mm_setzero_si128(); + return _mm_unpacklo_epi32(_mm_unpacklo_epi16(_mm_unpacklo_epi8(a, z), z), z); #else simde__m128i_private r_, @@ -757,6 +809,11 @@ simde_mm_cvtepu8_epi64 (simde__m128i a) { uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */ uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */ r_.neon_u64 = u64x2; + #elif defined(SIMDE_SHUFFLE_VECTOR_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + __typeof__(r_.i8) z = { 0, }; + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), SIMDE_SHUFFLE_VECTOR_(8, 16, a_.i8, z, + 0, 17, 18, 19, 20, 21, 22, 23, + 1, 25, 26, 27, 28, 29, 30, 31)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -777,6 +834,8 @@ simde__m128i simde_mm_cvtepi16_epi32 (simde__m128i a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_cvtepi16_epi32(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_srai_epi32(_mm_unpacklo_epi16(a, a), 16); #else simde__m128i_private r_, @@ -784,6 +843,9 @@ simde_mm_cvtepi16_epi32 (simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vmovl_s16(vget_low_s16(a_.neon_i16)); + #elif !defined(SIMDE_ARCH_X86) && defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_VECTOR_SCALAR) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), SIMDE_SHUFFLE_VECTOR_(16, 16, a_.i16, a_.i16, 8, 0, 10, 1, 12, 2, 14, 3)); + r_.i32 >>= 16; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -804,6 +866,8 @@ simde__m128i simde_mm_cvtepu16_epi32 (simde__m128i a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_cvtepu16_epi32(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi16(a, _mm_setzero_si128()); #else simde__m128i_private r_, @@ -811,6 +875,10 @@ simde_mm_cvtepu16_epi32 (simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vmovl_u16(vget_low_u16(a_.neon_u16)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + __typeof__(r_.u16) z = { 0, }; + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), SIMDE_SHUFFLE_VECTOR_(16, 16, a_.u16, z, + 0, 9, 1, 11, 2, 13, 3, 15)); #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_CLANG_45541) && (!defined(SIMDE_ARCH_POWER) || !defined(__clang__)) SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].u16); #else @@ -833,6 +901,9 @@ simde__m128i simde_mm_cvtepu16_epi64 (simde__m128i a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_cvtepu16_epi64(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i z = _mm_setzero_si128(); + return _mm_unpacklo_epi32(_mm_unpacklo_epi16(a, z), z); #else simde__m128i_private r_, @@ -843,6 +914,11 @@ simde_mm_cvtepu16_epi64 (simde__m128i a) { uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */ uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */ r_.neon_u64 = u64x2; + #elif defined(SIMDE_SHUFFLE_VECTOR_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + __typeof__(r_.u16) z = { 0, }; + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), SIMDE_SHUFFLE_VECTOR_(16, 16, a_.u16, z, + 0, 9, 10, 11, + 1, 13, 14, 15)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -873,6 +949,11 @@ simde_mm_cvtepi16_epi64 (simde__m128i a) { int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */ int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */ r_.neon_i64 = s64x2; + #elif (!defined(SIMDE_ARCH_X86) && !defined(SIMDE_ARCH_AMD64)) && defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_VECTOR_SCALAR) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), SIMDE_SHUFFLE_VECTOR_(16, 16, a_.i16, a_.i16, + 8, 9, 10, 0, + 12, 13, 14, 1)); + r_.i64 >>= 48; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -893,6 +974,11 @@ simde__m128i simde_mm_cvtepi32_epi64 (simde__m128i a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_cvtepi32_epi64(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i tmp = _mm_shuffle_epi32(a, 0x50); + tmp = _mm_srai_epi32(tmp, 31); + tmp = _mm_shuffle_epi32(tmp, 0xed); + return _mm_unpacklo_epi32(a, tmp); #else simde__m128i_private r_, @@ -900,6 +986,9 @@ simde_mm_cvtepi32_epi64 (simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i64 = vmovl_s32(vget_low_s32(a_.neon_i32)); + #elif !defined(SIMDE_ARCH_X86) && defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_VECTOR_SCALAR) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), SIMDE_SHUFFLE_VECTOR_(32, 16, a_.i32, a_.i32, -1, 0, -1, 1)); + r_.i64 >>= 32; #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.i64, a_.m64_private[0].i32); #else @@ -922,6 +1011,8 @@ simde__m128i simde_mm_cvtepu32_epi64 (simde__m128i a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_cvtepu32_epi64(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_unpacklo_epi32(a, _mm_setzero_si128()); #else simde__m128i_private r_, @@ -929,6 +1020,9 @@ simde_mm_cvtepu32_epi64 (simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u64 = vmovl_u32(vget_low_u32(a_.neon_u32)); + #elif defined(SIMDE_VECTOR_SCALAR) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + __typeof__(r_.u32) z = { 0, }; + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), SIMDE_SHUFFLE_VECTOR_(32, 16, a_.u32, z, 0, 4, 1, 6)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.i64, a_.m64_private[0].u32); #else @@ -946,67 +1040,45 @@ simde_mm_cvtepu32_epi64 (simde__m128i a) { #define _mm_cvtepu32_epi64(a) simde_mm_cvtepu32_epi64(a) #endif -SIMDE_FUNCTION_ATTRIBUTES -void -simde_x_kadd_f32(simde_float32 *sum, simde_float32 *c, simde_float32 y) -{ - /* Kahan summation for accurate summation of floating-point numbers. - * http://blog.zachbjornson.com/2019/08/11/fast-float-summation.html */ - y -= *c; - simde_float32 t = *sum + y; - *c = (t - *sum) - y; - *sum = t; -} - -SIMDE_FUNCTION_ATTRIBUTES -void -simde_x_kadd_f64(simde_float64 *sum, simde_float64 *c, simde_float64 y) { - /* Kahan summation for accurate summation of floating-point numbers. - * http://blog.zachbjornson.com/2019/08/11/fast-float-summation.html */ - y -= *c; - simde_float64 t = *sum + y; - *c = (t - *sum) - y; - *sum = t; -} - SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_dp_pd (simde__m128d a, simde__m128d b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128d_private r_, a_ = simde__m128d_to_private(a), b_ = simde__m128d_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - /* shortcut cases */ - if (imm8 == 0xFF) { - r_.neon_f64 = vdupq_n_f64(vaddvq_f64(simde_mm_mul_pd(a, b))); - return simde__m128d_from_private(r_); - } - if (imm8 == 0x13) { - float64x2_t m = simde_mm_mul_pd(a, b); - m = vsetq_lane_f64(0, m, 1); - r_.neon_f64 = vdupq_n_f64(vaddvq_f64(m)); - return simde__m128d_from_private(r_); + r_.neon_f64 = vmulq_f64(a_.neon_f64, b_.neon_f64); + + switch (imm8) { + case 0xff: + r_.neon_f64 = vaddq_f64(r_.neon_f64, vextq_f64(r_.neon_f64, r_.neon_f64, 1)); + break; + case 0x13: + r_.neon_f64 = vdupq_lane_f64(vget_low_f64(r_.neon_f64), 0); + break; + default: + { /* imm8 is a compile-time constant, so this all becomes just a load */ + uint64_t mask_data[] = { + (imm8 & (1 << 4)) ? ~UINT64_C(0) : UINT64_C(0), + (imm8 & (1 << 5)) ? ~UINT64_C(0) : UINT64_C(0), + }; + r_.neon_f64 = vreinterpretq_f64_u64(vandq_u64(vld1q_u64(mask_data), vreinterpretq_u64_f64(r_.neon_f64))); + } + + r_.neon_f64 = vdupq_n_f64(vaddvq_f64(r_.neon_f64)); + + { + uint64_t mask_data[] = { + (imm8 & 1) ? ~UINT64_C(0) : UINT64_C(0), + (imm8 & 2) ? ~UINT64_C(0) : UINT64_C(0) + }; + r_.neon_f64 = vreinterpretq_f64_u64(vandq_u64(vld1q_u64(mask_data), vreinterpretq_u64_f64(r_.neon_f64))); + } + break; } - simde_float64 s = 0, c = 0; - float64x2_t f64a = a_.neon_f64; - float64x2_t f64b = b_.neon_f64; - - /* To improve the accuracy of floating-point summation, Kahan algorithm - * is used for each operation. */ - if (imm8 & (1 << 4)) - simde_x_kadd_f64(&s, &c, vgetq_lane_f64(f64a, 0) * vgetq_lane_f64(f64b, 0)); - if (imm8 & (1 << 5)) - simde_x_kadd_f64(&s, &c, vgetq_lane_f64(f64a, 1) * vgetq_lane_f64(f64b, 1)); - s += c; - - simde_float64 res[2] = { - (imm8 & 0x1) ? s : 0, - (imm8 & 0x2) ? s : 0 - }; - r_.neon_f64 = vld1q_f64(res); #else simde_float64 sum = SIMDE_FLOAT64_C(0.0); @@ -1034,50 +1106,49 @@ simde_mm_dp_pd (simde__m128d a, simde__m128d b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_dp_ps (simde__m128 a, simde__m128 b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - /* shortcut cases */ - if (imm8 == 0xFF) { - r_.neon_f32 = vdupq_n_f32(vaddvq_f32(simde_mm_mul_ps(a, b))); - return simde__m128_from_private(r_); - } - if (imm8 == 0x7F) { - float32x4_t m = simde_mm_mul_ps(a, b); - m = vsetq_lane_f32(0, m, 3); - r_.neon_f32 = vdupq_n_f32(vaddvq_f32(m)); - return simde__m128_from_private(r_); - } - #endif + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32); - simde_float32 s = 0, c = 0; - float32x4_t f32a = a_.neon_f32; - float32x4_t f32b = b_.neon_f32; - - /* To improve the accuracy of floating-point summation, Kahan algorithm - * is used for each operation. */ - if (imm8 & (1 << 4)) - simde_x_kadd_f32(&s, &c, vgetq_lane_f32(f32a, 0) * vgetq_lane_f32(f32b, 0)); - if (imm8 & (1 << 5)) - simde_x_kadd_f32(&s, &c, vgetq_lane_f32(f32a, 1) * vgetq_lane_f32(f32b, 1)); - if (imm8 & (1 << 6)) - simde_x_kadd_f32(&s, &c, vgetq_lane_f32(f32a, 2) * vgetq_lane_f32(f32b, 2)); - if (imm8 & (1 << 7)) - simde_x_kadd_f32(&s, &c, vgetq_lane_f32(f32a, 3) * vgetq_lane_f32(f32b, 3)); - s += c; - - simde_float32 res[4] = { - (imm8 & 0x1) ? s : 0, - (imm8 & 0x2) ? s : 0, - (imm8 & 0x4) ? s : 0, - (imm8 & 0x8) ? s : 0 - }; - r_.neon_f32 = vld1q_f32(res); + switch (imm8) { + case 0xff: + r_.neon_f32 = vdupq_n_f32(vaddvq_f32(r_.neon_f32)); + break; + case 0x7f: + r_.neon_f32 = vsetq_lane_f32(0, r_.neon_f32, 3); + r_.neon_f32 = vdupq_n_f32(vaddvq_f32(r_.neon_f32)); + break; + default: + { + { + uint32_t mask_data[] = { + (imm8 & (1 << 4)) ? ~UINT32_C(0) : UINT32_C(0), + (imm8 & (1 << 5)) ? ~UINT32_C(0) : UINT32_C(0), + (imm8 & (1 << 6)) ? ~UINT32_C(0) : UINT32_C(0), + (imm8 & (1 << 7)) ? ~UINT32_C(0) : UINT32_C(0) + }; + r_.neon_f32 = vreinterpretq_f32_u32(vandq_u32(vld1q_u32(mask_data), vreinterpretq_u32_f32(r_.neon_f32))); + } + + r_.neon_f32 = vdupq_n_f32(vaddvq_f32(r_.neon_f32)); + + { + uint32_t mask_data[] = { + (imm8 & 1) ? ~UINT32_C(0) : UINT32_C(0), + (imm8 & 2) ? ~UINT32_C(0) : UINT32_C(0), + (imm8 & 4) ? ~UINT32_C(0) : UINT32_C(0), + (imm8 & 8) ? ~UINT32_C(0) : UINT32_C(0) + }; + r_.neon_f32 = vreinterpretq_f32_u32(vandq_u32(vld1q_u32(mask_data), vreinterpretq_u32_f32(r_.neon_f32))); + } + } + break; + } #else simde_float32 sum = SIMDE_FLOAT32_C(0.0); @@ -1108,7 +1179,7 @@ simde_mm_dp_ps (simde__m128 a, simde__m128 b, const int imm8) SIMDE_FUNCTION_ATTRIBUTES int8_t simde_mm_extract_epi8 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 15) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { simde__m128i_private a_ = simde__m128i_to_private(a); @@ -1138,7 +1209,7 @@ simde_mm_extract_epi8 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_extract_epi32 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 3) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m128i_private a_ = simde__m128i_to_private(a); @@ -1170,7 +1241,7 @@ simde_mm_extract_epi32 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_extract_epi64 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 1) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { simde__m128i_private a_ = simde__m128i_to_private(a); @@ -1202,7 +1273,7 @@ simde_mm_extract_epi64 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_extract_ps (simde__m128 a, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 3) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m128_private a_ = simde__m128_to_private(a); @@ -1297,7 +1368,7 @@ simde_mm_floor_ss (simde__m128 a, simde__m128 b) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_insert_epi8 (simde__m128i a, int i, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 15) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { simde__m128i_private r_ = simde__m128i_to_private(a); @@ -1325,7 +1396,7 @@ simde_mm_insert_epi8 (simde__m128i a, int i, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_insert_epi32 (simde__m128i a, int i, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 3) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m128i_private r_ = simde__m128i_to_private(a); @@ -1350,7 +1421,7 @@ simde_mm_insert_epi32 (simde__m128i a, int i, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_insert_epi64 (simde__m128i a, int64_t i, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 1) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { #if defined(SIMDE_BUG_GCC_94482) simde__m128i_private a_ = simde__m128i_to_private(a); @@ -1387,7 +1458,7 @@ simde_mm_insert_epi64 (simde__m128i a, int64_t i, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_insert_ps (simde__m128 a, simde__m128 b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128_private r_, a_ = simde__m128_to_private(a), @@ -1688,7 +1759,7 @@ simde_mm_minpos_epu16 (simde__m128i a) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_mpsadbw_epu8 (simde__m128i a, simde__m128i b, const int imm8) - SIMDE_REQUIRE_RANGE(imm8, 0, 7) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128i_private r_, a_ = simde__m128i_to_private(a), diff --git a/lib/mmseqs/lib/simde/simde/x86/sse4.2.h b/lib/mmseqs/lib/simde/simde/x86/sse4.2.h index 6ce6eb6..733fa13 100644 --- a/lib/mmseqs/lib/simde/simde/x86/sse4.2.h +++ b/lib/mmseqs/lib/simde/simde/x86/sse4.2.h @@ -95,7 +95,7 @@ SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES int simde_mm_cmpestrs (simde__m128i a, int la, simde__m128i b, int lb, const int imm8) - SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 127) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { #if !defined(HEDLEY_PGI_VERSION) /* https://www.pgroup.com/userforum/viewtopic.php?f=4&p=27590&sid=cf89f8bf30be801831fe4a2ff0a2fa6c */ (void) a; @@ -115,7 +115,7 @@ int simde_mm_cmpestrs (simde__m128i a, int la, simde__m128i b, int lb, const int SIMDE_FUNCTION_ATTRIBUTES int simde_mm_cmpestrz (simde__m128i a, int la, simde__m128i b, int lb, const int imm8) - SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 127) { + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { #if !defined(HEDLEY_PGI_VERSION) /* https://www.pgroup.com/userforum/viewtopic.php?f=4&p=27590&sid=cf89f8bf30be801831fe4a2ff0a2fa6c */ (void) a; @@ -136,8 +136,13 @@ int simde_mm_cmpestrz (simde__m128i a, int la, simde__m128i b, int lb, const int SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_cmpgt_epi64 (simde__m128i a, simde__m128i b) { - #if defined(SIMDE_X86_SSE4_2_NATIVE) + #if defined(SIMDE_X86_SSE4_2_NATIVE) && 0 return _mm_cmpgt_epi64(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://stackoverflow.com/a/65175746/501126 */ + __m128i r = _mm_and_si128(_mm_cmpeq_epi32(a, b), _mm_sub_epi64(b, a)); + r = _mm_or_si128(r, _mm_cmpgt_epi32(a, b)); + return _mm_shuffle_epi32(r, _MM_SHUFFLE(3, 3, 1, 1)); #else simde__m128i_private r_, @@ -147,31 +152,8 @@ simde_mm_cmpgt_epi64 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_u64 = vcgtq_s64(a_.neon_i64, b_.neon_i64); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - // ARMv7 lacks vcgtq_s64. - // This is based off of Clang's SSE2 polyfill: - // (a > b) -> ((a_hi > b_hi) || (a_lo > b_lo && a_hi == b_hi)) - - // Mask the sign bit out since we need a signed AND an unsigned comparison - // and it is ugly to try and split them. - int32x4_t mask = vreinterpretq_s32_s64(vdupq_n_s64(0x80000000ull)); - int32x4_t a_mask = veorq_s32(a_.neon_i32, mask); - int32x4_t b_mask = veorq_s32(b_.neon_i32, mask); - // Check if a > b - int64x2_t greater = vreinterpretq_s64_u32(vcgtq_s32(a_mask, b_mask)); - // Copy upper mask to lower mask - // a_hi > b_hi - int64x2_t gt_hi = vshrq_n_s64(greater, 63); - // Copy lower mask to upper mask - // a_lo > b_lo - int64x2_t gt_lo = vsliq_n_s64(greater, greater, 32); - // Compare for equality - int64x2_t equal = vreinterpretq_s64_u32(vceqq_s32(a_mask, b_mask)); - // Copy upper mask to lower mask - // a_hi == b_hi - int64x2_t eq_hi = vshrq_n_s64(equal, 63); - // a_hi > b_hi || (a_lo > b_lo && a_hi == b_hi) - int64x2_t ret = vorrq_s64(gt_hi, vandq_s64(gt_lo, eq_hi)); - r_.neon_i64 = ret; + /* https://stackoverflow.com/a/65223269/501126 */ + r_.neon_i64 = vshrq_n_s64(vqsubq_s64(b_.neon_i64, a_.neon_i64), 63); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_u64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpgt(a_.altivec_i64, b_.altivec_i64)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) diff --git a/lib/mmseqs/lib/simde/simde/x86/ssse3.h b/lib/mmseqs/lib/simde/simde/x86/ssse3.h index 5420119..bac06d1 100644 --- a/lib/mmseqs/lib/simde/simde/x86/ssse3.h +++ b/lib/mmseqs/lib/simde/simde/x86/ssse3.h @@ -45,8 +45,8 @@ simde_mm_abs_epi8 (simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vabsq_s8(a_.neon_i8); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - r_.altivec_i8 = vec_abs(a_.altivec_i8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_abs(a_.altivec_i8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -205,7 +205,7 @@ simde_mm_abs_pi32 (simde__m64 a) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_alignr_epi8 (simde__m128i a, simde__m128i b, int count) - SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 31) { + SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) { simde__m128i_private r_, a_ = simde__m128i_to_private(a), @@ -245,7 +245,8 @@ simde_mm_alignr_epi8 (simde__m128i a, simde__m128i b, int count) SIMDE_FUNCTION_ATTRIBUTES simde__m64 -simde_mm_alignr_pi8 (simde__m64 a, simde__m64 b, const int count) { +simde_mm_alignr_pi8 (simde__m64 a, simde__m64 b, const int count) + SIMDE_REQUIRE_CONSTANT(count) { simde__m64_private r_, a_ = simde__m64_to_private(a), @@ -298,8 +299,8 @@ simde_mm_shuffle_epi8 (simde__m128i a, simde__m128i b) { r_.neon_i8 = vqtbl1q_s8(a_.neon_i8, vandq_u8(b_.neon_u8, vdupq_n_u8(0x8F))); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) /* Mask out the bits we're not interested in. vtbl will result in 0 - for any values outside of [0, 15], so if the high bit is set it - will return 0, just like in SSSE3. */ + * for any values outside of [0, 15], so if the high bit is set it + * will return 0, just like in SSSE3. */ b_.neon_i8 = vandq_s8(b_.neon_i8, vdupq_n_s8(HEDLEY_STATIC_CAST(int8_t, (1 << 7) | 15))); /* Convert a from an int8x16_t to an int8x8x2_t */ @@ -314,7 +315,7 @@ simde_mm_shuffle_epi8 (simde__m128i a, simde__m128i b) { r_.neon_i8 = vcombine_s8(l, h); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) /* This is a bit ugly because of the casts and the awful type - * macros (SIMDE_POWER_ALTIVEC_VECTOR), but it's really just + * macros (SIMDE_POWER_ALTIVEC_VECTOR), but it's really just * vec_sel(vec_perm(a, a, b), 0, vec_cmplt(b, 0)) */ SIMDE_POWER_ALTIVEC_VECTOR(signed char) z = { 0, }; SIMDE_POWER_ALTIVEC_VECTOR(signed char) msb_mask = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_cmplt(b_.altivec_i8, z)); @@ -365,15 +366,13 @@ simde__m128i simde_mm_hadd_epi16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSSE3_NATIVE) return _mm_hadd_epi16(a, b); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return simde__m128i_from_neon_i16(vpaddq_s16(simde__m128i_to_neon_i16(a), simde__m128i_to_neon_i16(b))); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8x2_t t = vuzpq_s16(simde__m128i_to_neon_i16(a), simde__m128i_to_neon_i16(b)); + return simde__m128i_from_neon_i16(vaddq_s16(t.val[0], t.val[1])); #else - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - return vreinterpretq_s64_s16(vpaddq_s16(simde__m128i_to_private(a).neon_i16, simde__m128i_to_private(b).neon_i16)); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - return vreinterpretq_s64_s16(vcombine_s16(vpadd_s16(vget_low_s16(simde__m128i_to_private(a).neon_i16), vget_high_s16(simde__m128i_to_private(a).neon_i16)), - vpadd_s16(vget_low_s16(simde__m128i_to_private(b).neon_i16), vget_high_s16(simde__m128i_to_private(b).neon_i16)))); - #else - return simde_mm_add_epi16(simde_x_mm_deinterleaveeven_epi16(a, b), simde_x_mm_deinterleaveodd_epi16(a, b)); - #endif + return simde_mm_add_epi16(simde_x_mm_deinterleaveeven_epi16(a, b), simde_x_mm_deinterleaveodd_epi16(a, b)); #endif } #if defined(SIMDE_X86_SSSE3_ENABLE_NATIVE_ALIASES) @@ -385,13 +384,13 @@ simde__m128i simde_mm_hadd_epi32 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSSE3_NATIVE) return _mm_hadd_epi32(a, b); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return simde__m128i_from_neon_i32(vpaddq_s32(simde__m128i_to_neon_i32(a), simde__m128i_to_neon_i32(b))); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4x2_t t = vuzpq_s32(simde__m128i_to_neon_i32(a), simde__m128i_to_neon_i32(b)); + return simde__m128i_from_neon_i32(vaddq_s32(t.val[0], t.val[1])); #else - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - return vreinterpretq_s64_s32(vcombine_s32(vpadd_s32(vget_low_s32(simde__m128i_to_private(a).neon_i32), vget_high_s32(simde__m128i_to_private(a).neon_i32)), - vpadd_s32(vget_low_s32(simde__m128i_to_private(b).neon_i32), vget_high_s32(simde__m128i_to_private(b).neon_i32)))); - #else - return simde_mm_add_epi32(simde_x_mm_deinterleaveeven_epi32(a, b), simde_x_mm_deinterleaveodd_epi32(a, b)); - #endif + return simde_mm_add_epi32(simde_x_mm_deinterleaveeven_epi32(a, b), simde_x_mm_deinterleaveodd_epi32(a, b)); #endif } #if defined(SIMDE_X86_SSSE3_ENABLE_NATIVE_ALIASES) @@ -410,7 +409,10 @@ simde_mm_hadd_pi16 (simde__m64 a, simde__m64 b) { b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_i16 = vadd_s16(vuzp1_s16(a_.neon_i16, b_.neon_i16), vuzp2_s16(a_.neon_i16, b_.neon_i16)); + r_.neon_i16 = vpadd_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x4x2_t t = vuzp_s16(a_.neon_i16, b_.neon_i16); + r_.neon_i16 = vadd_s16(t.val[0], t.val[1]); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_SHUFFLE_VECTOR_) r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.i16, b_.i16, 0, 2, 4, 6) + @@ -441,7 +443,10 @@ simde_mm_hadd_pi32 (simde__m64 a, simde__m64 b) { b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_i32 = vadd_s32(vuzp1_s32(a_.neon_i32, b_.neon_i32), vuzp2_s32(a_.neon_i32, b_.neon_i32)); + r_.neon_i32 = vpadd_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x2x2_t t = vuzp_s32(a_.neon_i32, b_.neon_i32); + r_.neon_i32 = vadd_s32(t.val[0], t.val[1]); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_SHUFFLE_VECTOR_) r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.i32, b_.i32, 0, 2) + @@ -463,22 +468,11 @@ simde__m128i simde_mm_hadds_epi16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSSE3_NATIVE) return _mm_hadds_epi16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8x2_t t = vuzpq_s16(simde__m128i_to_neon_i16(a), simde__m128i_to_neon_i16(b)); + return simde__m128i_from_neon_i16(vqaddq_s16(t.val[0], t.val[1])); #else - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - return vreinterpretq_s64_s16(vqaddq_s16(vuzp1q_s16(simde__m128i_to_private(a).neon_i16, simde__m128i_to_private(b).neon_i16), vuzp2q_s16(simde__m128i_to_private(a).neon_i16, simde__m128i_to_private(b).neon_i16))); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - int32x4_t ax = simde__m128i_to_private(a).neon_i32; - int32x4_t bx = simde__m128i_to_private(b).neon_i32; - // Interleave using vshrn/vmovn - // [a0|a2|a4|a6|b0|b2|b4|b6] - // [a1|a3|a5|a7|b1|b3|b5|b7] - int16x8_t ab0246 = vcombine_s16(vmovn_s32(ax), vmovn_s32(bx)); - int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(ax, 16), vshrn_n_s32(bx, 16)); - // Saturated add - return vreinterpretq_s64_s16(vqaddq_s16(ab0246, ab1357)); - #else - return simde_mm_adds_epi16(simde_x_mm_deinterleaveeven_epi16(a, b), simde_x_mm_deinterleaveodd_epi16(a, b)); - #endif + return simde_mm_adds_epi16(simde_x_mm_deinterleaveeven_epi16(a, b), simde_x_mm_deinterleaveodd_epi16(a, b)); #endif } #if defined(SIMDE_X86_SSSE3_ENABLE_NATIVE_ALIASES) @@ -496,8 +490,9 @@ simde_mm_hadds_pi16 (simde__m64 a, simde__m64 b) { a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_i16 = vqadd_s16(vuzp1_s16(a_.neon_i16, b_.neon_i16), vuzp2_s16(a_.neon_i16, b_.neon_i16)); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x4x2_t t = vuzp_s16(a_.neon_i16, b_.neon_i16); + r_.neon_i16 = vqadd_s16(t.val[0], t.val[1]); #else for (size_t i = 0 ; i < ((sizeof(r_.i16) / sizeof(r_.i16[0])) / 2) ; i++) { int32_t ta = HEDLEY_STATIC_CAST(int32_t, a_.i16[i * 2]) + HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2) + 1]); @@ -519,20 +514,11 @@ simde__m128i simde_mm_hsub_epi16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSSE3_NATIVE) return _mm_hsub_epi16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8x2_t t = vuzpq_s16(simde__m128i_to_neon_i16(a), simde__m128i_to_neon_i16(b)); + return simde__m128i_from_neon_i16(vsubq_s16(t.val[0], t.val[1])); #else - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - return vreinterpretq_s64_s16(vsubq_s16(vuzp1q_s16(simde__m128i_to_private(a).neon_i16, simde__m128i_to_private(b).neon_i16), vuzp2q_s16(simde__m128i_to_private(a).neon_i16, simde__m128i_to_private(b).neon_i16))); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - // Interleave using vshrn/vmovn - // [a0|a2|a4|a6|b0|b2|b4|b6] - // [a1|a3|a5|a7|b1|b3|b5|b7] - int16x8_t ab0246 = vcombine_s16(vmovn_s32(simde__m128i_to_private(a).neon_i32), vmovn_s32(simde__m128i_to_private(b).neon_i32)); - int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(simde__m128i_to_private(a).neon_i32, 16), vshrn_n_s32(simde__m128i_to_private(b).neon_i32, 16)); - // Subtract - return vreinterpretq_s64_s16(vsubq_s16(ab0246, ab1357)); - #else - return simde_mm_sub_epi16(simde_x_mm_deinterleaveeven_epi16(a, b), simde_x_mm_deinterleaveodd_epi16(a, b)); - #endif + return simde_mm_sub_epi16(simde_x_mm_deinterleaveeven_epi16(a, b), simde_x_mm_deinterleaveodd_epi16(a, b)); #endif } #if defined(SIMDE_X86_SSSE3_ENABLE_NATIVE_ALIASES) @@ -544,20 +530,11 @@ simde__m128i simde_mm_hsub_epi32 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSSE3_NATIVE) return _mm_hsub_epi32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4x2_t t = vuzpq_s32(simde__m128i_to_neon_i32(a), simde__m128i_to_neon_i32(b)); + return simde__m128i_from_neon_i32(vsubq_s32(t.val[0], t.val[1])); #else - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - return vreinterpretq_s64_s32(vsubq_s32(vuzp1q_s32(simde__m128i_to_private(a).neon_i32, simde__m128i_to_private(b).neon_i32), vuzp2q_s32(simde__m128i_to_private(a).neon_i32, simde__m128i_to_private(b).neon_i32))); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - // Interleave using vshrn/vmovn - // [a0|a2|b0|b2] - // [a1|a2|b1|b3] - int32x4_t ab02 = vcombine_s32(vmovn_s64(simde__m128i_to_private(a).neon_i64), vmovn_s64(simde__m128i_to_private(b).neon_i64)); - int32x4_t ab13 = vcombine_s32(vshrn_n_s64(simde__m128i_to_private(a).neon_i64, 32), vshrn_n_s64(simde__m128i_to_private(b).neon_i64, 32)); - // Subtract - return vreinterpretq_s64_s32(vsubq_s32(ab02, ab13)); - #else - return simde_mm_sub_epi32(simde_x_mm_deinterleaveeven_epi32(a, b), simde_x_mm_deinterleaveodd_epi32(a, b)); - #endif + return simde_mm_sub_epi32(simde_x_mm_deinterleaveeven_epi32(a, b), simde_x_mm_deinterleaveodd_epi32(a, b)); #endif } #if defined(SIMDE_X86_SSSE3_ENABLE_NATIVE_ALIASES) @@ -575,8 +552,9 @@ simde_mm_hsub_pi16 (simde__m64 a, simde__m64 b) { a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_i16 = vsub_s16(vuzp1_s16(a_.neon_i16, b_.neon_i16), vuzp2_s16(a_.neon_i16, b_.neon_i16)); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x4x2_t t = vuzp_s16(a_.neon_i16, b_.neon_i16); + r_.neon_i16 = vsub_s16(t.val[0], t.val[1]); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_SHUFFLE_VECTOR_) r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.i16, b_.i16, 0, 2, 4, 6) - @@ -606,8 +584,9 @@ simde_mm_hsub_pi32 (simde__m64 a, simde__m64 b) { a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_i32 = vsub_s32(vuzp1_s32(a_.neon_i32, b_.neon_i32), vuzp2_s32(a_.neon_i32, b_.neon_i32)); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x2x2_t t = vuzp_s32(a_.neon_i32, b_.neon_i32); + r_.neon_i32 = vsub_s32(t.val[0], t.val[1]); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_SHUFFLE_VECTOR_) r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.i32, b_.i32, 0, 2) - @@ -629,20 +608,11 @@ simde__m128i simde_mm_hsubs_epi16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSSE3_NATIVE) return _mm_hsubs_epi16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8x2_t t = vuzpq_s16(simde__m128i_to_neon_i16(a), simde__m128i_to_neon_i16(b)); + return simde__m128i_from_neon_i16(vqsubq_s16(t.val[0], t.val[1])); #else - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - return vreinterpretq_s64_s16(vqsubq_s16(vuzp1q_s16(simde__m128i_to_private(a).neon_i16, simde__m128i_to_private(b).neon_i16), vuzp2q_s16(simde__m128i_to_private(a).neon_i16, simde__m128i_to_private(b).neon_i16))); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - // Interleave using vshrn/vmovn - // [a0|a2|a4|a6|b0|b2|b4|b6] - // [a1|a3|a5|a7|b1|b3|b5|b7] - int16x8_t ab0246 = vcombine_s16(vmovn_s32(simde__m128i_to_private(a).neon_i32), vmovn_s32(simde__m128i_to_private(b).neon_i32)); - int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(simde__m128i_to_private(a).neon_i32, 16), vshrn_n_s32(simde__m128i_to_private(b).neon_i32, 16)); - // Saturated subtract - return vreinterpretq_s64_s16(vqsubq_s16(ab0246, ab1357)); - #else - return simde_mm_subs_epi16(simde_x_mm_deinterleaveeven_epi16(a, b), simde_x_mm_deinterleaveodd_epi16(a, b)); - #endif + return simde_mm_subs_epi16(simde_x_mm_deinterleaveeven_epi16(a, b), simde_x_mm_deinterleaveodd_epi16(a, b)); #endif } #if defined(SIMDE_X86_SSSE3_ENABLE_NATIVE_ALIASES) @@ -660,14 +630,13 @@ simde_mm_hsubs_pi16 (simde__m64 a, simde__m64 b) { a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_i16 = vqsub_s16(vuzp1_s16(a_.neon_i16, b_.neon_i16), vuzp2_s16(a_.neon_i16, b_.neon_i16)); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x4x2_t t = vuzp_s16(a_.neon_i16, b_.neon_i16); + r_.neon_i16 = vqsub_s16(t.val[0], t.val[1]); #else for (size_t i = 0 ; i < ((sizeof(r_.i16) / sizeof(r_.i16[0])) / 2) ; i++) { - int32_t ta = HEDLEY_STATIC_CAST(int32_t, a_.i16[i * 2]) - HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2) + 1]); - r_.i16[ i ] = HEDLEY_LIKELY(ta > INT16_MIN) ? (HEDLEY_LIKELY(ta < INT16_MAX) ? HEDLEY_STATIC_CAST(int16_t, ta) : INT16_MAX) : INT16_MIN; - int32_t tb = HEDLEY_STATIC_CAST(int32_t, b_.i16[i * 2]) - HEDLEY_STATIC_CAST(int32_t, b_.i16[(i * 2) + 1]); - r_.i16[i + 2] = HEDLEY_LIKELY(tb > INT16_MIN) ? (HEDLEY_LIKELY(tb < INT16_MAX) ? HEDLEY_STATIC_CAST(int16_t, tb) : INT16_MAX) : INT16_MIN; + r_.i16[ i ] = simde_math_subs_i16(a_.i16[i * 2], a_.i16[(i * 2) + 1]); + r_.i16[i + 2] = simde_math_subs_i16(b_.i16[i * 2], b_.i16[(i * 2) + 1]); } #endif @@ -689,28 +658,20 @@ simde_mm_maddubs_epi16 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(a_.neon_u8))), vmovl_s8(vget_low_s8(b_.neon_i8))); - int16x8_t th = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(a_.neon_u8))), vmovl_s8(vget_high_s8(b_.neon_i8))); - r_.neon_i16 = vqaddq_s16(vuzp1q_s16(tl, th), vuzp2q_s16(tl, th)); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - // This would be much simpler if x86 would choose to zero extend OR sign extend, - // not both. - // This could probably be optimized better. - - // Zero extend a + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + /* Zero extend a */ int16x8_t a_odd = vreinterpretq_s16_u16(vshrq_n_u16(a_.neon_u16, 8)); int16x8_t a_even = vreinterpretq_s16_u16(vbicq_u16(a_.neon_u16, vdupq_n_u16(0xff00))); - // Sign extend by shifting left then shifting right. + /* Sign extend by shifting left then shifting right. */ int16x8_t b_even = vshrq_n_s16(vshlq_n_s16(b_.neon_i16, 8), 8); int16x8_t b_odd = vshrq_n_s16(b_.neon_i16, 8); - // multiply + /* multiply */ int16x8_t prod1 = vmulq_s16(a_even, b_even); int16x8_t prod2 = vmulq_s16(a_odd, b_odd); - // saturated add + /* saturated add */ r_.neon_i16 = vqaddq_s16(prod1, prod2); #else for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -718,7 +679,7 @@ simde_mm_maddubs_epi16 (simde__m128i a, simde__m128i b) { int32_t ts = (HEDLEY_STATIC_CAST(int16_t, a_.u8[ idx ]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[ idx ])) + (HEDLEY_STATIC_CAST(int16_t, a_.u8[idx + 1]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[idx + 1])); - r_.i16[i] = HEDLEY_LIKELY(ts > INT16_MIN) ? (HEDLEY_LIKELY(ts < INT16_MAX) ? HEDLEY_STATIC_CAST(int16_t, ts) : INT16_MAX) : INT16_MIN; + r_.i16[i] = (ts > INT16_MIN) ? ((ts < INT16_MAX) ? HEDLEY_STATIC_CAST(int16_t, ts) : INT16_MAX) : INT16_MIN; } #endif @@ -753,7 +714,7 @@ simde_mm_maddubs_pi16 (simde__m64 a, simde__m64 b) { int32_t ts = (HEDLEY_STATIC_CAST(int16_t, a_.u8[ idx ]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[ idx ])) + (HEDLEY_STATIC_CAST(int16_t, a_.u8[idx + 1]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[idx + 1])); - r_.i16[i] = HEDLEY_LIKELY(ts > INT16_MIN) ? (HEDLEY_LIKELY(ts < INT16_MAX) ? HEDLEY_STATIC_CAST(int16_t, ts) : INT16_MAX) : INT16_MIN; + r_.i16[i] = (ts > INT16_MIN) ? ((ts < INT16_MAX) ? HEDLEY_STATIC_CAST(int16_t, ts) : INT16_MAX) : INT16_MIN; } #endif @@ -776,21 +737,18 @@ simde_mm_mulhrs_epi16 (simde__m128i a, simde__m128i b) { b_ = simde__m128i_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - // Has issues due to saturation - // r_.neon_i16 = vqrdmulhq_s16(a, b); - - // Multiply + /* Multiply */ int32x4_t mul_lo = vmull_s16(vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16)); int32x4_t mul_hi = vmull_s16(vget_high_s16(a_.neon_i16), vget_high_s16(b_.neon_i16)); - // Rounding narrowing shift right - // narrow = (int16_t)((mul + 16384) >> 15); + /* Rounding narrowing shift right + * narrow = (int16_t)((mul + 16384) >> 15); */ int16x4_t narrow_lo = vrshrn_n_s32(mul_lo, 15); int16x4_t narrow_hi = vrshrn_n_s32(mul_hi, 15); - // Join together + /* Join together */ r_.neon_i16 = vcombine_s16(narrow_lo, narrow_hi); #else SIMDE_VECTORIZE @@ -818,17 +776,14 @@ simde_mm_mulhrs_pi16 (simde__m64 a, simde__m64 b) { b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - // Has issues due to saturation - // r_.neon_i16 = vqrdmulh_s16(a, b); - - // Multiply + /* Multiply */ int32x4_t mul = vmull_s16(a_.neon_i16, b_.neon_i16); - // Rounding narrowing shift right - // narrow = (int16_t)((mul + 16384) >> 15); + /* Rounding narrowing shift right + * narrow = (int16_t)((mul + 16384) >> 15); */ int16x4_t narrow = vrshrn_n_s32(mul, 15); - // Join together + /* Join together */ r_.neon_i16 = narrow; #else SIMDE_VECTORIZE @@ -866,6 +821,10 @@ simde_mm_sign_epi8 (simde__m128i a, simde__m128i b) { bnz_mask = vmvnq_u8(bnz_mask); r_.neon_i8 = vbslq_s8(aneg_mask, vnegq_s8(a_.neon_i8), vandq_s8(a_.neon_i8, vreinterpretq_s8_u8(bnz_mask))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + simde__m128i mask = wasm_i8x16_shr(b_.wasm_v128, 7); + simde__m128i zeromask = simde_mm_cmpeq_epi8(b_.wasm_v128, simde_mm_setzero_si128()); + r_.wasm_v128 = simde_mm_andnot_si128(zeromask, simde_mm_xor_si128(simde_mm_add_epi8(a_.wasm_v128, mask), mask)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -902,6 +861,10 @@ simde_mm_sign_epi16 (simde__m128i a, simde__m128i b) { bnz_mask = vmvnq_u16(bnz_mask); r_.neon_i16 = vbslq_s16(aneg_mask, vnegq_s16(a_.neon_i16), vandq_s16(a_.neon_i16, vreinterpretq_s16_u16(bnz_mask))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + simde__m128i mask = simde_mm_srai_epi16(b_.wasm_v128, 15); + simde__m128i zeromask = simde_mm_cmpeq_epi16(b_.wasm_v128, simde_mm_setzero_si128()); + r_.wasm_v128 = simde_mm_andnot_si128(zeromask, simde_mm_xor_si128(simde_mm_add_epi16(a_.wasm_v128, mask), mask)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -938,6 +901,10 @@ simde_mm_sign_epi32 (simde__m128i a, simde__m128i b) { bnz_mask = vmvnq_u32(bnz_mask); r_.neon_i32 = vbslq_s32(aneg_mask, vnegq_s32(a_.neon_i32), vandq_s32(a_.neon_i32, vreinterpretq_s32_u32(bnz_mask))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + simde__m128i mask = simde_mm_srai_epi32(b_.wasm_v128, 31); + simde__m128i zeromask = simde_mm_cmpeq_epi32(b_.wasm_v128, simde_mm_setzero_si128()); + r_.wasm_v128 = simde_mm_andnot_si128(zeromask, simde_mm_xor_si128(simde_mm_add_epi32(a_.wasm_v128, mask), mask)); #else for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = (b_.i32[i] < 0) ? (- a_.i32[i]) : ((b_.i32[i] != 0) ? (a_.i32[i]) : INT32_C(0)); diff --git a/lib/mmseqs/lib/simde/simde/x86/svml.h b/lib/mmseqs/lib/simde/simde/x86/svml.h index 7380f87..cb94a34 100644 --- a/lib/mmseqs/lib/simde/simde/x86/svml.h +++ b/lib/mmseqs/lib/simde/simde/x86/svml.h @@ -292,7 +292,7 @@ simde_mm_acosh_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_acosh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_acoshf4_u10(a); + return Sleef_acoshf4_u10(a); #else simde__m128_private r_, @@ -317,7 +317,7 @@ simde_mm_acosh_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_acosh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_acoshd2_u10(a); + return Sleef_acoshd2_u10(a); #else simde__m128d_private r_, @@ -342,7 +342,7 @@ simde_mm256_acosh_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_acosh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_acoshf8_u10(a); + return Sleef_acoshf8_u10(a); #else simde__m256_private r_, @@ -374,7 +374,7 @@ simde_mm256_acosh_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_acosh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_acoshd4_u10(a); + return Sleef_acoshd4_u10(a); #else simde__m256d_private r_, @@ -405,7 +405,7 @@ simde_mm512_acosh_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_acosh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_acoshf16_u10(a); + return Sleef_acoshf16_u10(a); #else simde__m512_private r_, @@ -436,7 +436,7 @@ simde_mm512_acosh_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_acosh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_acoshd8_u10(a); + return Sleef_acoshd8_u10(a); #else simde__m512d_private r_, @@ -722,7 +722,7 @@ simde_mm_asinh_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_asinh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_asinhf4_u10(a); + return Sleef_asinhf4_u10(a); #else simde__m128_private r_, @@ -747,7 +747,7 @@ simde_mm_asinh_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_asinh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_asinhd2_u10(a); + return Sleef_asinhd2_u10(a); #else simde__m128d_private r_, @@ -772,7 +772,7 @@ simde_mm256_asinh_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_asinh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_asinhf8_u10(a); + return Sleef_asinhf8_u10(a); #else simde__m256_private r_, @@ -804,7 +804,7 @@ simde_mm256_asinh_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_asinh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_asinhd4_u10(a); + return Sleef_asinhd4_u10(a); #else simde__m256d_private r_, @@ -835,7 +835,7 @@ simde_mm512_asinh_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_asinh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_asinhf16_u10(a); + return Sleef_asinhf16_u10(a); #else simde__m512_private r_, @@ -866,7 +866,7 @@ simde_mm512_asinh_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_asinh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_asinhd8_u10(a); + return Sleef_asinhd8_u10(a); #else simde__m512d_private r_, @@ -1385,7 +1385,7 @@ simde_mm_atanh_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_atanh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_atanhf4_u10(a); + return Sleef_atanhf4_u10(a); #else simde__m128_private r_, @@ -1410,7 +1410,7 @@ simde_mm_atanh_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_atanh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_atanhd2_u10(a); + return Sleef_atanhd2_u10(a); #else simde__m128d_private r_, @@ -1435,7 +1435,7 @@ simde_mm256_atanh_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_atanh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_atanhf8_u10(a); + return Sleef_atanhf8_u10(a); #else simde__m256_private r_, @@ -1467,7 +1467,7 @@ simde_mm256_atanh_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_atanh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_atanhd4_u10(a); + return Sleef_atanhd4_u10(a); #else simde__m256d_private r_, @@ -1498,7 +1498,7 @@ simde_mm512_atanh_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_atanh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_atanhf16_u10(a); + return Sleef_atanhf16_u10(a); #else simde__m512_private r_, @@ -1529,7 +1529,7 @@ simde_mm512_atanh_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_atanh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_atanhd8_u10(a); + return Sleef_atanhd8_u10(a); #else simde__m512d_private r_, @@ -1588,7 +1588,7 @@ simde_mm_cbrt_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_cbrt_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_cbrtf4_u10(a); + return Sleef_cbrtf4_u10(a); #else simde__m128_private r_, @@ -1613,7 +1613,7 @@ simde_mm_cbrt_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_cbrt_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_cbrtd2_u10(a); + return Sleef_cbrtd2_u10(a); #else simde__m128d_private r_, @@ -1638,7 +1638,7 @@ simde_mm256_cbrt_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_cbrt_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_cbrtf8_u10(a); + return Sleef_cbrtf8_u10(a); #else simde__m256_private r_, @@ -1670,7 +1670,7 @@ simde_mm256_cbrt_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_cbrt_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_cbrtd4_u10(a); + return Sleef_cbrtd4_u10(a); #else simde__m256d_private r_, @@ -1701,7 +1701,7 @@ simde_mm512_cbrt_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_cbrt_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_cbrtf16_u10(a); + return Sleef_cbrtf16_u10(a); #else simde__m512_private r_, @@ -1732,7 +1732,7 @@ simde_mm512_cbrt_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_cbrt_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_cbrtd8_u10(a); + return Sleef_cbrtd8_u10(a); #else simde__m512d_private r_, @@ -2075,9 +2075,9 @@ simde_x_mm_deg2rad_ps(simde__m128 a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vmulq_n_f32(a_.neon_i32, SIMDE_MATH_PI_OVER_180F); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) - r_.f32 = a_.f32 * SIMDE_MATH_PI_OVER_180F; - #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - const __typeof__(r_.f32) tmp = { SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F }; + r_.f32 = a_.f32 * SIMDE_MATH_PI_OVER_180F; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + const __typeof__(r_.f32) tmp = { SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F }; r_.f32 = a_.f32 * tmp; #else SIMDE_VECTORIZE @@ -2103,9 +2103,9 @@ simde_x_mm_deg2rad_pd(simde__m128d a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vmulq_n_f64(a_.neon_i64, SIMDE_MATH_PI_OVER_180); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) - r_.f64 = a_.f64 * SIMDE_MATH_PI_OVER_180; - #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - const __typeof__(r_.f64) tmp = { SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180 }; + r_.f64 = a_.f64 * SIMDE_MATH_PI_OVER_180; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + const __typeof__(r_.f64) tmp = { SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180 }; r_.f64 = a_.f64 * tmp; #else SIMDE_VECTORIZE @@ -2132,8 +2132,14 @@ simde_x_mm256_deg2rad_ps(simde__m256 a) { for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) { r_.m128[i] = simde_x_mm_deg2rad_ps(a_.m128[i]); } - #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.f32 = a_.f32 * SIMDE_MATH_PI_OVER_180F; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) + r_.f32 = a_.f32 * SIMDE_MATH_PI_OVER_180F; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + const __typeof__(r_.f32) tmp = { + SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, + SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F + }; + r_.f32 = a_.f32 * tmp; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -2159,8 +2165,11 @@ simde_x_mm256_deg2rad_pd(simde__m256d a) { for (size_t i = 0 ; i < (sizeof(r_.m128d) / sizeof(r_.m128d[0])) ; i++) { r_.m128d[i] = simde_x_mm_deg2rad_pd(a_.m128d[i]); } - #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.f64 = a_.f64 * SIMDE_MATH_PI_OVER_180; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) + r_.f64 = a_.f64 * SIMDE_MATH_PI_OVER_180; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + const __typeof__(r_.f64) tmp = { SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180 }; + r_.f64 = a_.f64 * tmp; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { @@ -2186,12 +2195,20 @@ simde_x_mm512_deg2rad_ps(simde__m512 a) { for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) { r_.m256[i] = simde_x_mm256_deg2rad_ps(a_.m256[i]); } - #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.f32 = a_.f32 * SIMDE_MATH_PI_OVER_180F; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) + r_.f32 = a_.f32 * SIMDE_MATH_PI_OVER_180F; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + const __typeof__(r_.f32) tmp = { + SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, + SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, + SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, + SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F, SIMDE_MATH_PI_OVER_180F + }; + r_.f32 = a_.f32 * tmp; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) { - r_.m256[i] = simde_x_mm256_deg2rad_ps(a_.m256[i]); + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_deg2radf(a_.f32[i]); } #endif @@ -2213,12 +2230,18 @@ simde_x_mm512_deg2rad_pd(simde__m512d a) { for (size_t i = 0 ; i < (sizeof(r_.m256d) / sizeof(r_.m256d[0])) ; i++) { r_.m256d[i] = simde_x_mm256_deg2rad_pd(a_.m256d[i]); } - #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.f64 = a_.f64 * SIMDE_MATH_PI_OVER_180; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) + r_.f64 = a_.f64 * SIMDE_MATH_PI_OVER_180; + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + const __typeof__(r_.f64) tmp = { + SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180, + SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180, SIMDE_MATH_PI_OVER_180 + }; + r_.f64 = a_.f64 * tmp; #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.m256d) / sizeof(r_.m256d[0])) ; i++) { - r_.m256d[i] = simde_x_mm256_deg2rad_pd(a_.m256d[i]); + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_deg2rad(a_.f64[i]); } #endif @@ -2458,7 +2481,7 @@ simde_mm_cosh_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_cosh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_coshf4_u10(a); + return Sleef_coshf4_u10(a); #else simde__m128_private r_, @@ -2483,7 +2506,7 @@ simde_mm_cosh_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_cosh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_coshd2_u10(a); + return Sleef_coshd2_u10(a); #else simde__m128d_private r_, @@ -2508,7 +2531,7 @@ simde_mm256_cosh_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_cosh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_coshf8_u10(a); + return Sleef_coshf8_u10(a); #else simde__m256_private r_, @@ -2540,7 +2563,7 @@ simde_mm256_cosh_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_cosh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_coshd4_u10(a); + return Sleef_coshd4_u10(a); #else simde__m256d_private r_, @@ -2571,7 +2594,7 @@ simde_mm512_cosh_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_cosh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_coshf16_u10(a); + return Sleef_coshf16_u10(a); #else simde__m512_private r_, @@ -2602,7 +2625,7 @@ simde_mm512_cosh_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_cosh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_coshd8_u10(a); + return Sleef_coshd8_u10(a); #else simde__m512d_private r_, @@ -2669,7 +2692,7 @@ simde_mm_div_epi8 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i8 = a_.i8 / b_.i8; #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i8x4_div(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_i8x4_div(a_.wasm_v128, b_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -2699,7 +2722,7 @@ simde_mm_div_epi16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i16 = a_.i16 / b_.i16; #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i16x4_div(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_i16x4_div(a_.wasm_v128, b_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -2729,7 +2752,7 @@ simde_mm_div_epi32 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 / b_.i32; #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i32x4_div(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_i32x4_div(a_.wasm_v128, b_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -2762,7 +2785,7 @@ simde_mm_div_epi64 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i64 = a_.i64 / b_.i64; #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i64x4_div(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_i64x4_div(a_.wasm_v128, b_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -2792,7 +2815,7 @@ simde_mm_div_epu8 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u8 = a_.u8 / b_.u8; #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_u8x16_div(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_u8x16_div(a_.wasm_v128, b_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { @@ -2822,7 +2845,7 @@ simde_mm_div_epu16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u16 = a_.u16 / b_.u16; #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_u16x16_div(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_u16x16_div(a_.wasm_v128, b_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -2852,7 +2875,7 @@ simde_mm_div_epu32 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u32 = a_.u32 / b_.u32; #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_u32x16_div(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_u32x16_div(a_.wasm_v128, b_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { @@ -2885,7 +2908,7 @@ simde_mm_div_epu64 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u64 = a_.u64 / b_.u64; #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_u64x16_div(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_u64x16_div(a_.wasm_v128, b_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { @@ -3054,10 +3077,16 @@ simde_mm256_div_epu8 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u8 = a_.u8 / b_.u8; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = a_.u8[i] / b_.u8[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_div_epu8(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { + r_.u8[i] = a_.u8[i] / b_.u8[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -3082,10 +3111,16 @@ simde_mm256_div_epu16 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u16 = a_.u16 / b_.u16; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = a_.u16[i] / b_.u16[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_div_epu16(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = a_.u16[i] / b_.u16[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -3110,10 +3145,16 @@ simde_mm256_div_epu32 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u32 = a_.u32 / b_.u32; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { - r_.u32[i] = a_.u32[i] / b_.u32[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_div_epu32(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = a_.u32[i] / b_.u32[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -3141,10 +3182,16 @@ simde_mm256_div_epu64 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u64 = a_.u64 / b_.u64; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { - r_.u64[i] = a_.u64[i] / b_.u64[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_div_epu64(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = a_.u64[i] / b_.u64[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -3319,10 +3366,16 @@ simde_mm512_div_epu8 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u8 = a_.u8 / b_.u8; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = a_.u8[i] / b_.u8[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_div_epu8(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { + r_.u8[i] = a_.u8[i] / b_.u8[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -3347,10 +3400,16 @@ simde_mm512_div_epu16 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u16 = a_.u16 / b_.u16; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = a_.u16[i] / b_.u16[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_div_epu16(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = a_.u16[i] / b_.u16[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -3375,10 +3434,16 @@ simde_mm512_div_epu32 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u32 = a_.u32 / b_.u32; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { - r_.u32[i] = a_.u32[i] / b_.u32[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_div_epu32(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = a_.u32[i] / b_.u32[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -3417,10 +3482,16 @@ simde_mm512_div_epu64 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u64 = a_.u64 / b_.u64; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { - r_.u64[i] = a_.u64[i] / b_.u64[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_div_epu64(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = a_.u64[i] / b_.u64[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -3437,7 +3508,7 @@ simde_mm_erf_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_erf_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_erff4_u10(a); + return Sleef_erff4_u10(a); #else simde__m128_private r_, @@ -3462,7 +3533,7 @@ simde_mm_erf_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_erf_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_erfd2_u10(a); + return Sleef_erfd2_u10(a); #else simde__m128d_private r_, @@ -3487,7 +3558,7 @@ simde_mm256_erf_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_erf_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_erff8_u10(a); + return Sleef_erff8_u10(a); #else simde__m256_private r_, @@ -3519,7 +3590,7 @@ simde_mm256_erf_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_erf_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_erfd4_u10(a); + return Sleef_erfd4_u10(a); #else simde__m256d_private r_, @@ -3550,7 +3621,7 @@ simde_mm512_erf_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_erf_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_erff16_u10(a); + return Sleef_erff16_u10(a); #else simde__m512_private r_, @@ -3581,7 +3652,7 @@ simde_mm512_erf_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_erf_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_erfd8_u10(a); + return Sleef_erfd8_u10(a); #else simde__m512d_private r_, @@ -3640,7 +3711,7 @@ simde_mm_erfc_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_erfc_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_erfcf4_u15(a); + return Sleef_erfcf4_u15(a); #else simde__m128_private r_, @@ -3665,7 +3736,7 @@ simde_mm_erfc_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_erfc_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_erfcd2_u15(a); + return Sleef_erfcd2_u15(a); #else simde__m128d_private r_, @@ -3690,7 +3761,7 @@ simde_mm256_erfc_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_erfc_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_erfcf8_u15(a); + return Sleef_erfcf8_u15(a); #else simde__m256_private r_, @@ -3722,7 +3793,7 @@ simde_mm256_erfc_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_erfc_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_erfcd4_u15(a); + return Sleef_erfcd4_u15(a); #else simde__m256d_private r_, @@ -3753,7 +3824,7 @@ simde_mm512_erfc_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_erfc_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_erfcf16_u15(a); + return Sleef_erfcf16_u15(a); #else simde__m512_private r_, @@ -3784,7 +3855,7 @@ simde_mm512_erfc_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_erfc_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_erfcd8_u15(a); + return Sleef_erfcd8_u15(a); #else simde__m512d_private r_, @@ -3843,7 +3914,7 @@ simde_mm_exp_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_exp_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_expf4_u10(a); + return Sleef_expf4_u10(a); #else simde__m128_private r_, @@ -3868,7 +3939,7 @@ simde_mm_exp_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_exp_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_expd2_u10(a); + return Sleef_expd2_u10(a); #else simde__m128d_private r_, @@ -3893,7 +3964,7 @@ simde_mm256_exp_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_exp_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_expf8_u10(a); + return Sleef_expf8_u10(a); #else simde__m256_private r_, @@ -3925,7 +3996,7 @@ simde_mm256_exp_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_exp_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_expd4_u10(a); + return Sleef_expd4_u10(a); #else simde__m256d_private r_, @@ -3956,7 +4027,7 @@ simde_mm512_exp_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_exp_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_expf16_u10(a); + return Sleef_expf16_u10(a); #else simde__m512_private r_, @@ -3987,7 +4058,7 @@ simde_mm512_exp_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_exp_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_expd8_u10(a); + return Sleef_expd8_u10(a); #else simde__m512d_private r_, @@ -4046,7 +4117,7 @@ simde_mm_expm1_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_expm1_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_expm1f4_u10(a); + return Sleef_expm1f4_u10(a); #else simde__m128_private r_, @@ -4071,7 +4142,7 @@ simde_mm_expm1_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_expm1_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_expm1d2_u10(a); + return Sleef_expm1d2_u10(a); #else simde__m128d_private r_, @@ -4096,7 +4167,7 @@ simde_mm256_expm1_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_expm1_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_expm1f8_u10(a); + return Sleef_expm1f8_u10(a); #else simde__m256_private r_, @@ -4128,7 +4199,7 @@ simde_mm256_expm1_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_expm1_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_expm1d4_u10(a); + return Sleef_expm1d4_u10(a); #else simde__m256d_private r_, @@ -4159,7 +4230,7 @@ simde_mm512_expm1_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_expm1_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_expm1f16_u10(a); + return Sleef_expm1f16_u10(a); #else simde__m512_private r_, @@ -4190,7 +4261,7 @@ simde_mm512_expm1_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_expm1_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_expm1d8_u10(a); + return Sleef_expm1d8_u10(a); #else simde__m512d_private r_, @@ -4249,7 +4320,7 @@ simde_mm_exp2_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_exp2_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_exp2f4_u10(a); + return Sleef_exp2f4_u10(a); #else simde__m128_private r_, @@ -4274,7 +4345,7 @@ simde_mm_exp2_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_exp2_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_exp2d2_u10(a); + return Sleef_exp2d2_u10(a); #else simde__m128d_private r_, @@ -4299,7 +4370,7 @@ simde_mm256_exp2_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_exp2_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_exp2f8_u10(a); + return Sleef_exp2f8_u10(a); #else simde__m256_private r_, @@ -4331,7 +4402,7 @@ simde_mm256_exp2_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_exp2_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_exp2d4_u10(a); + return Sleef_exp2d4_u10(a); #else simde__m256d_private r_, @@ -4362,7 +4433,7 @@ simde_mm512_exp2_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_exp2_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_exp2f16_u10(a); + return Sleef_exp2f16_u10(a); #else simde__m512_private r_, @@ -4393,7 +4464,7 @@ simde_mm512_exp2_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_exp2_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_exp2d8_u10(a); + return Sleef_exp2d8_u10(a); #else simde__m512d_private r_, @@ -4452,7 +4523,7 @@ simde_mm_exp10_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_exp10_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_exp10f4_u10(a); + return Sleef_exp10f4_u10(a); #else simde__m128_private r_, @@ -4477,7 +4548,7 @@ simde_mm_exp10_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_exp10_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_exp10d2_u10(a); + return Sleef_exp10d2_u10(a); #else simde__m128d_private r_, @@ -4502,7 +4573,7 @@ simde_mm256_exp10_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_exp10_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_exp10f8_u10(a); + return Sleef_exp10f8_u10(a); #else simde__m256_private r_, @@ -4534,7 +4605,7 @@ simde_mm256_exp10_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_exp10_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_exp10d4_u10(a); + return Sleef_exp10d4_u10(a); #else simde__m256d_private r_, @@ -4565,7 +4636,7 @@ simde_mm512_exp10_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_exp10_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_exp10f16_u10(a); + return Sleef_exp10f16_u10(a); #else simde__m512_private r_, @@ -4596,7 +4667,7 @@ simde_mm512_exp10_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_exp10_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_exp10d8_u10(a); + return Sleef_exp10d8_u10(a); #else simde__m512d_private r_, @@ -5140,10 +5211,16 @@ simde_mm256_hypot_ps (simde__m256 a, simde__m256 b) { a_ = simde__m256_to_private(a), b_ = simde__m256_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = simde_math_hypotf(a_.f32[i], b_.f32[i]); + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) { + r_.m128[i] = simde_mm_hypot_ps(a_.m128[i], b_.m128[i]); } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_hypotf(a_.f32[i], b_.f32[i]); + } + #endif return simde__m256_from_private(r_); #endif @@ -5171,10 +5248,16 @@ simde_mm256_hypot_pd (simde__m256d a, simde__m256d b) { a_ = simde__m256d_to_private(a), b_ = simde__m256d_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.f64[i] = simde_math_hypot(a_.f64[i], b_.f64[i]); - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128d) / sizeof(r_.m128d[0])) ; i++) { + r_.m128d[i] = simde_mm_hypot_pd(a_.m128d[i], b_.m128d[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_hypot(a_.f64[i], b_.f64[i]); + } + #endif return simde__m256d_from_private(r_); #endif @@ -5201,10 +5284,16 @@ simde_mm512_hypot_ps (simde__m512 a, simde__m512 b) { a_ = simde__m512_to_private(a), b_ = simde__m512_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = simde_math_hypotf(a_.f32[i], b_.f32[i]); - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) { + r_.m256[i] = simde_mm256_hypot_ps(a_.m256[i], b_.m256[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_hypotf(a_.f32[i], b_.f32[i]); + } + #endif return simde__m512_from_private(r_); #endif @@ -5231,10 +5320,16 @@ simde_mm512_hypot_pd (simde__m512d a, simde__m512d b) { a_ = simde__m512d_to_private(a), b_ = simde__m512d_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.f64[i] = simde_math_hypot(a_.f64[i], b_.f64[i]); - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256d) / sizeof(r_.m256d[0])) ; i++) { + r_.m256d[i] = simde_mm256_hypot_pd(a_.m256d[i], b_.m256d[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_hypot(a_.f64[i], b_.f64[i]); + } + #endif return simde__m512d_from_private(r_); #endif @@ -5445,8 +5540,6 @@ simde__m128 simde_mm_log_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_log_ps(a); - #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && 0 - return vec_loge(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) #if SIMDE_ACCURACY_PREFERENCE > 1 return Sleef_logf4_u10(a); @@ -8045,7 +8138,7 @@ simde_mm_log1p_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_log1p_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_log1pf4_u10(a); + return Sleef_log1pf4_u10(a); #else simde__m128_private r_, @@ -8070,7 +8163,7 @@ simde_mm_log1p_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_log1p_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_log1pd2_u10(a); + return Sleef_log1pd2_u10(a); #else simde__m128d_private r_, @@ -8095,7 +8188,7 @@ simde_mm256_log1p_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_log1p_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_log1pf8_u10(a); + return Sleef_log1pf8_u10(a); #else simde__m256_private r_, @@ -8127,7 +8220,7 @@ simde_mm256_log1p_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_log1p_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_log1pd4_u10(a); + return Sleef_log1pd4_u10(a); #else simde__m256d_private r_, @@ -8158,7 +8251,7 @@ simde_mm512_log1p_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_log1p_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_log1pf16_u10(a); + return Sleef_log1pf16_u10(a); #else simde__m512_private r_, @@ -8189,7 +8282,7 @@ simde_mm512_log1p_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_log1p_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_log1pd8_u10(a); + return Sleef_log1pd8_u10(a); #else simde__m512d_private r_, @@ -8248,7 +8341,7 @@ simde_mm_log10_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_log10_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_log10f4_u10(a); + return Sleef_log10f4_u10(a); #else simde__m128_private r_, @@ -8273,7 +8366,7 @@ simde_mm_log10_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_log10_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_log10d2_u10(a); + return Sleef_log10d2_u10(a); #else simde__m128d_private r_, @@ -8298,7 +8391,7 @@ simde_mm256_log10_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_log10_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_log10f8_u10(a); + return Sleef_log10f8_u10(a); #else simde__m256_private r_, @@ -8330,7 +8423,7 @@ simde_mm256_log10_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_log10_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_log10d4_u10(a); + return Sleef_log10d4_u10(a); #else simde__m256d_private r_, @@ -8361,7 +8454,7 @@ simde_mm512_log10_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_log10_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_log10f16_u10(a); + return Sleef_log10f16_u10(a); #else simde__m512_private r_, @@ -8392,7 +8485,7 @@ simde_mm512_log10_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_log10_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_log10d8_u10(a); + return Sleef_log10d8_u10(a); #else simde__m512d_private r_, @@ -8525,7 +8618,7 @@ simde_mm_pow_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_pow_ps(a, b); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_powf4_u10(a, b); + return Sleef_powf4_u10(a, b); #else simde__m128_private r_, @@ -8551,7 +8644,7 @@ simde_mm_pow_pd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_pow_pd(a, b); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_powd2_u10(a, b); + return Sleef_powd2_u10(a, b); #else simde__m128d_private r_, @@ -8577,7 +8670,7 @@ simde_mm256_pow_ps (simde__m256 a, simde__m256 b) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_pow_ps(a, b); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_powf8_u10(a, b); + return Sleef_powf8_u10(a, b); #else simde__m256_private r_, @@ -8604,7 +8697,7 @@ simde_mm256_pow_pd (simde__m256d a, simde__m256d b) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_pow_pd(a, b); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_powd4_u10(a, b); + return Sleef_powd4_u10(a, b); #else simde__m256d_private r_, @@ -8630,7 +8723,7 @@ simde_mm512_pow_ps (simde__m512 a, simde__m512 b) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_pow_ps(a, b); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_powf16_u10(a, b); + return Sleef_powf16_u10(a, b); #else simde__m512_private r_, @@ -8656,7 +8749,7 @@ simde_mm512_pow_pd (simde__m512d a, simde__m512d b) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_pow_pd(a, b); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_powd8_u10(a, b); + return Sleef_powd8_u10(a, b); #else simde__m512d_private r_, @@ -9058,10 +9151,16 @@ simde_mm256_rem_epi8 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i8 = a_.i8 % b_.i8; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - r_.i8[i] = a_.i8[i] % b_.i8[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_rem_epi8(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = a_.i8[i] % b_.i8[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -9086,10 +9185,16 @@ simde_mm256_rem_epi16 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i16 = a_.i16 % b_.i16; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i16[i] = a_.i16[i] % b_.i16[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_rem_epi16(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = a_.i16[i] % b_.i16[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -9114,10 +9219,16 @@ simde_mm256_rem_epi32 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 % b_.i32; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = a_.i32[i] % b_.i32[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_rem_epi32(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = a_.i32[i] % b_.i32[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -9145,10 +9256,16 @@ simde_mm256_rem_epi64 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i64 = a_.i64 % b_.i64; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { - r_.i64[i] = a_.i64[i] % b_.i64[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_rem_epi64(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = a_.i64[i] % b_.i64[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -9173,10 +9290,16 @@ simde_mm256_rem_epu8 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u8 = a_.u8 % b_.u8; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = a_.u8[i] % b_.u8[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_rem_epu8(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { + r_.u8[i] = a_.u8[i] % b_.u8[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -9201,10 +9324,16 @@ simde_mm256_rem_epu16 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u16 = a_.u16 % b_.u16; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = a_.u16[i] % b_.u16[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_rem_epu16(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = a_.u16[i] % b_.u16[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -9229,10 +9358,16 @@ simde_mm256_rem_epu32 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u32 = a_.u32 % b_.u32; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { - r_.u32[i] = a_.u32[i] % b_.u32[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_rem_epu32(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = a_.u32[i] % b_.u32[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -9260,10 +9395,16 @@ simde_mm256_rem_epu64 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u64 = a_.u64 % b_.u64; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { - r_.u64[i] = a_.u64[i] % b_.u64[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_rem_epu64(a_.m128i[i], b_.m128i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = a_.u64[i] % b_.u64[i]; + } + #endif #endif return simde__m256i_from_private(r_); @@ -9288,10 +9429,16 @@ simde_mm512_rem_epi8 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i8 = a_.i8 % b_.i8; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - r_.i8[i] = a_.i8[i] % b_.i8[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_rem_epi8(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = a_.i8[i] % b_.i8[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -9316,10 +9463,16 @@ simde_mm512_rem_epi16 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i16 = a_.i16 % b_.i16; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i16[i] = a_.i16[i] % b_.i16[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_rem_epi16(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = a_.i16[i] % b_.i16[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -9344,10 +9497,16 @@ simde_mm512_rem_epi32 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 % b_.i32; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = a_.i32[i] % b_.i32[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_rem_epi32(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = a_.i32[i] % b_.i32[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -9386,10 +9545,16 @@ simde_mm512_rem_epi64 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i64 = a_.i64 % b_.i64; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { - r_.i64[i] = a_.i64[i] % b_.i64[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_rem_epi64(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = a_.i64[i] % b_.i64[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -9414,10 +9579,16 @@ simde_mm512_rem_epu8 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u8 = a_.u8 % b_.u8; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = a_.u8[i] % b_.u8[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_rem_epu8(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { + r_.u8[i] = a_.u8[i] % b_.u8[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -9442,10 +9613,16 @@ simde_mm512_rem_epu16 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u16 = a_.u16 % b_.u16; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = a_.u16[i] % b_.u16[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_rem_epu16(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = a_.u16[i] % b_.u16[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -9470,10 +9647,16 @@ simde_mm512_rem_epu32 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u32 = a_.u32 % b_.u32; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { - r_.u32[i] = a_.u32[i] % b_.u32[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_rem_epu32(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = a_.u32[i] % b_.u32[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -9512,10 +9695,16 @@ simde_mm512_rem_epu64 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u64 = a_.u64 % b_.u64; #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { - r_.u64[i] = a_.u64[i] % b_.u64[i]; - } + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_rem_epu64(a_.m256i[i], b_.m256i[i]); + } + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = a_.u64[i] % b_.u64[i]; + } + #endif #endif return simde__m512i_from_private(r_); @@ -9588,7 +9777,7 @@ simde_mm512_rint_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_rint_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_rintf16(a); + return Sleef_rintf16(a); #else simde__m512_private r_, @@ -9613,7 +9802,7 @@ simde_mm512_rint_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_rint_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_rintd8(a); + return Sleef_rintd8(a); #else simde__m512d_private r_, @@ -10334,7 +10523,7 @@ simde_mm_sinh_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_sinh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_sinhf4_u10(a); + return Sleef_sinhf4_u10(a); #else simde__m128_private r_, @@ -10359,7 +10548,7 @@ simde_mm_sinh_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_sinh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_sinhd2_u10(a); + return Sleef_sinhd2_u10(a); #else simde__m128d_private r_, @@ -10384,7 +10573,7 @@ simde_mm256_sinh_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_sinh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_sinhf8_u10(a); + return Sleef_sinhf8_u10(a); #else simde__m256_private r_, @@ -10416,7 +10605,7 @@ simde_mm256_sinh_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_sinh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_sinhd4_u10(a); + return Sleef_sinhd4_u10(a); #else simde__m256d_private r_, @@ -10447,7 +10636,7 @@ simde_mm512_sinh_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_sinh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_sinhf16_u10(a); + return Sleef_sinhf16_u10(a); #else simde__m512_private r_, @@ -10478,7 +10667,7 @@ simde_mm512_sinh_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_sinh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_sinhd8_u10(a); + return Sleef_sinhd8_u10(a); #else simde__m512d_private r_, @@ -10537,7 +10726,7 @@ simde_mm_svml_ceil_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_svml_ceil_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_ceilf4(a); + return Sleef_ceilf4(a); #else return simde_mm_round_ps(a, SIMDE_MM_FROUND_TO_POS_INF); #endif @@ -10553,7 +10742,7 @@ simde_mm_svml_ceil_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_svml_ceil_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_ceild2(a); + return Sleef_ceild2(a); #else return simde_mm_round_pd(a, SIMDE_MM_FROUND_TO_POS_INF); #endif @@ -10569,7 +10758,7 @@ simde_mm256_svml_ceil_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_svml_ceil_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_ceilf8(a); + return Sleef_ceilf8(a); #else return simde_mm256_round_ps(a, SIMDE_MM_FROUND_TO_POS_INF); #endif @@ -10585,7 +10774,7 @@ simde_mm256_svml_ceil_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_svml_ceil_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_ceild4(a); + return Sleef_ceild4(a); #else return simde_mm256_round_pd(a, SIMDE_MM_FROUND_TO_POS_INF); #endif @@ -10601,7 +10790,7 @@ simde_mm512_ceil_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_ceil_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_ceilf16(a); + return Sleef_ceilf16(a); #else simde__m512_private r_, @@ -10632,7 +10821,7 @@ simde_mm512_ceil_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_ceil_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_ceild8(a); + return Sleef_ceild8(a); #else simde__m512d_private r_, @@ -10691,7 +10880,7 @@ simde_mm_svml_floor_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_svml_floor_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_floorf4(a); + return Sleef_floorf4(a); #else return simde_mm_round_ps(a, SIMDE_MM_FROUND_TO_NEG_INF); #endif @@ -10707,7 +10896,7 @@ simde_mm_svml_floor_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_svml_floor_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_floord2(a); + return Sleef_floord2(a); #else return simde_mm_round_pd(a, SIMDE_MM_FROUND_TO_NEG_INF); #endif @@ -10723,7 +10912,7 @@ simde_mm256_svml_floor_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_svml_floor_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_floorf8(a); + return Sleef_floorf8(a); #else return simde_mm256_round_ps(a, SIMDE_MM_FROUND_TO_NEG_INF); #endif @@ -10739,7 +10928,7 @@ simde_mm256_svml_floor_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_svml_floor_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_floord4(a); + return Sleef_floord4(a); #else return simde_mm256_round_pd(a, SIMDE_MM_FROUND_TO_NEG_INF); #endif @@ -10755,7 +10944,7 @@ simde_mm512_floor_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_floor_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_floorf16(a); + return Sleef_floorf16(a); #else simde__m512_private r_, @@ -10786,7 +10975,7 @@ simde_mm512_floor_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_floor_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_floord8(a); + return Sleef_floord8(a); #else simde__m512d_private r_, @@ -10845,7 +11034,7 @@ simde_mm_svml_round_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_svml_round_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_roundf4(a); + return Sleef_roundf4(a); #else simde__m128_private r_, @@ -10870,7 +11059,7 @@ simde_mm_svml_round_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_svml_round_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_roundd2(a); + return Sleef_roundd2(a); #else simde__m128d_private r_, @@ -10895,7 +11084,7 @@ simde_mm256_svml_round_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_svml_round_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_roundf8(a); + return Sleef_roundf8(a); #else simde__m256_private r_, @@ -10927,7 +11116,7 @@ simde_mm256_svml_round_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_svml_round_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_roundd4(a); + return Sleef_roundd4(a); #else simde__m256d_private r_, @@ -10958,7 +11147,7 @@ simde_mm512_svml_round_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_svml_round_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_roundd8(a); + return Sleef_roundd8(a); #else simde__m512d_private r_, @@ -11003,7 +11192,7 @@ simde_mm_svml_sqrt_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_svml_sqrt_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_sqrtf4(a); + return Sleef_sqrtf4(a); #else return simde_mm_sqrt_ps(a); #endif @@ -11019,7 +11208,7 @@ simde_mm_svml_sqrt_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_svml_sqrt_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_sqrtd2(a); + return Sleef_sqrtd2(a); #else return simde_mm_sqrt_pd(a); #endif @@ -11035,7 +11224,7 @@ simde_mm256_svml_sqrt_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_svml_sqrt_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_sqrtf8(a); + return Sleef_sqrtf8(a); #else return simde_mm256_sqrt_ps(a); #endif @@ -11051,7 +11240,7 @@ simde_mm256_svml_sqrt_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_svml_sqrt_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_sqrtd4(a); + return Sleef_sqrtd4(a); #else return simde_mm256_sqrt_pd(a); #endif @@ -11067,7 +11256,7 @@ simde_mm512_svml_sqrt_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_svml_sqrt_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_sqrtf16(a); + return Sleef_sqrtf16(a); #else return simde_mm512_sqrt_ps(a); #endif @@ -11083,7 +11272,7 @@ simde_mm512_svml_sqrt_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_svml_sqrt_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_sqrtd8(a); + return Sleef_sqrtd8(a); #else return simde_mm512_sqrt_pd(a); #endif @@ -11552,7 +11741,7 @@ simde_mm_tanh_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_tanh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_tanhf4_u10(a); + return Sleef_tanhf4_u10(a); #else simde__m128_private r_, @@ -11577,7 +11766,7 @@ simde_mm_tanh_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_tanh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_tanhd2_u10(a); + return Sleef_tanhd2_u10(a); #else simde__m128d_private r_, @@ -11602,7 +11791,7 @@ simde_mm256_tanh_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_tanh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_tanhf8_u10(a); + return Sleef_tanhf8_u10(a); #else simde__m256_private r_, @@ -11634,7 +11823,7 @@ simde_mm256_tanh_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_tanh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_tanhd4_u10(a); + return Sleef_tanhd4_u10(a); #else simde__m256d_private r_, @@ -11665,7 +11854,7 @@ simde_mm512_tanh_ps (simde__m512 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_tanh_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_tanhf16_u10(a); + return Sleef_tanhf16_u10(a); #else simde__m512_private r_, @@ -11696,7 +11885,7 @@ simde_mm512_tanh_pd (simde__m512d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_tanh_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX512F_NATIVE) - return Sleef_tanhd8_u10(a); + return Sleef_tanhd8_u10(a); #else simde__m512d_private r_, @@ -11755,7 +11944,7 @@ simde_mm_trunc_ps (simde__m128 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_trunc_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_truncf4(a); + return Sleef_truncf4(a); #else return simde_mm_round_ps(a, SIMDE_MM_FROUND_TO_ZERO); #endif @@ -11771,7 +11960,7 @@ simde_mm_trunc_pd (simde__m128d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) return _mm_trunc_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_SSE_NATIVE) - return Sleef_truncd2(a); + return Sleef_truncd2(a); #else return simde_mm_round_pd(a, SIMDE_MM_FROUND_TO_ZERO); #endif @@ -11787,7 +11976,7 @@ simde_mm256_trunc_ps (simde__m256 a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_trunc_ps(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_truncf8(a); + return Sleef_truncf8(a); #else return simde_mm256_round_ps(a, SIMDE_MM_FROUND_TO_ZERO); #endif @@ -11803,7 +11992,7 @@ simde_mm256_trunc_pd (simde__m256d a) { #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) return _mm256_trunc_pd(a); #elif defined(SIMDE_MATH_SLEEF_ENABLE) && defined(SIMDE_X86_AVX_NATIVE) - return Sleef_truncd4(a); + return Sleef_truncd4(a); #else return simde_mm256_round_pd(a, SIMDE_MM_FROUND_TO_ZERO); #endif diff --git a/lib/mmseqs/lib/xxhash/LICENSE b/lib/mmseqs/lib/xxhash/LICENSE new file mode 100644 index 0000000..fa20595 --- /dev/null +++ b/lib/mmseqs/lib/xxhash/LICENSE @@ -0,0 +1,48 @@ +xxHash Library +Copyright (c) 2012-2020 Yann Collet +All rights reserved. + +BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------- + +xxhsum command line interface +Copyright (c) 2013-2020 Yann Collet +All rights reserved. + +GPL v2 License + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. diff --git a/lib/mmseqs/lib/xxhash/xxh3.h b/lib/mmseqs/lib/xxhash/xxh3.h deleted file mode 100644 index 04fbaeb..0000000 --- a/lib/mmseqs/lib/xxhash/xxh3.h +++ /dev/null @@ -1,1632 +0,0 @@ -/* - xxHash - Extremely Fast Hash algorithm - Development source file for `xxh3` - Copyright (C) 2019-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - -/* Note : - This file is separated for development purposes. - It will be integrated into `xxhash.c` when development phase is complete. -*/ - -#ifndef XXH3_H -#define XXH3_H - - -/* === Dependencies === */ - -#undef XXH_INLINE_ALL /* in case it's already defined */ -#define XXH_INLINE_ALL -#include "xxhash.h" - - -/* === Compiler specifics === */ - -#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ -# define XXH_RESTRICT restrict -#else -/* note : it might be useful to define __restrict or __restrict__ for some C++ compilers */ -# define XXH_RESTRICT /* disable */ -#endif - -#if defined(__GNUC__) -# if defined(__AVX2__) -# include -# elif defined(__SSE2__) -# include -# elif defined(__ARM_NEON__) || defined(__ARM_NEON) -# define inline __inline__ /* clang bug */ -# include -# undef inline -# endif -#elif defined(_MSC_VER) -# include -#endif - -/* - * Sanity check. - * - * XXH3 only requires these features to be efficient: - * - * - Usable unaligned access - * - A 32-bit or 64-bit ALU - * - If 32-bit, a decent ADC instruction - * - A 32 or 64-bit multiply with a 64-bit result - * - * Almost all 32-bit and 64-bit targets meet this, except for Thumb-1, the - * classic 16-bit only subset of ARM's instruction set. - * - * First of all, Thumb-1 lacks support for the UMULL instruction which - * performs the important long multiply. This means numerous __aeabi_lmul - * calls. - * - * Second of all, the 8 functional registers are just not enough. - * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need - * Lo registers, and this shuffling results in thousands more MOVs than A32. - * - * A32 and T32 don't have this limitation. They can access all 14 registers, - * do a 32->64 multiply with UMULL, and the flexible operand is helpful too. - * - * If compiling Thumb-1 for a target which supports ARM instructions, we - * will give a warning. - * - * Usually, if this happens, it is because of an accident and you probably - * need to specify -march, as you probably meant to compileh for a newer - * architecture. - */ -#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) -# warning "XXH3 is highly inefficient without ARM or Thumb-2." -#endif - -/* ========================================== - * Vectorization detection - * ========================================== */ -#define XXH_SCALAR 0 -#define XXH_SSE2 1 -#define XXH_AVX2 2 -#define XXH_NEON 3 -#define XXH_VSX 4 - -#ifndef XXH_VECTOR /* can be defined on command line */ -# if defined(__AVX2__) -# define XXH_VECTOR XXH_AVX2 -# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) -# define XXH_VECTOR XXH_SSE2 -# elif defined(__GNUC__) /* msvc support maybe later */ \ - && (defined(__ARM_NEON__) || defined(__ARM_NEON)) \ - && (defined(__LITTLE_ENDIAN__) /* We only support little endian NEON */ \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) -# define XXH_VECTOR XXH_NEON -# elif defined(__PPC64__) && defined(__POWER8_VECTOR__) && defined(__GNUC__) -# define XXH_VECTOR XXH_VSX -# else -# define XXH_VECTOR XXH_SCALAR -# endif -#endif - -/* control alignment of accumulator, - * for compatibility with fast vector loads */ -#ifndef XXH_ACC_ALIGN -# if XXH_VECTOR == 0 /* scalar */ -# define XXH_ACC_ALIGN 8 -# elif XXH_VECTOR == 1 /* sse2 */ -# define XXH_ACC_ALIGN 16 -# elif XXH_VECTOR == 2 /* avx2 */ -# define XXH_ACC_ALIGN 32 -# elif XXH_VECTOR == 3 /* neon */ -# define XXH_ACC_ALIGN 16 -# elif XXH_VECTOR == 4 /* vsx */ -# define XXH_ACC_ALIGN 16 -# endif -#endif - -/* xxh_u64 XXH_mult32to64(xxh_u32 a, xxh_u64 b) { return (xxh_u64)a * (xxh_u64)b; } */ -#if defined(_MSC_VER) && defined(_M_IX86) -# include -# define XXH_mult32to64(x, y) __emulu(x, y) -#else -# define XXH_mult32to64(x, y) ((xxh_u64)((x) & 0xFFFFFFFF) * (xxh_u64)((y) & 0xFFFFFFFF)) -#endif - -/* VSX stuff. It's a lot because VSX support is mediocre across compilers and - * there is a lot of mischief with endianness. */ -#if XXH_VECTOR == XXH_VSX -# include -# undef vector -typedef __vector unsigned long long U64x2; -typedef __vector unsigned char U8x16; -typedef __vector unsigned U32x4; - -#ifndef XXH_VSX_BE -# if defined(__BIG_ENDIAN__) \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) -# define XXH_VSX_BE 1 -# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ -# warning "-maltivec=be is not recommended. Please use native endianness." -# define XXH_VSX_BE 1 -# else -# define XXH_VSX_BE 0 -# endif -#endif - -/* We need some helpers for big endian mode. */ -#if XXH_VSX_BE -/* A wrapper for POWER9's vec_revb. */ -# ifdef __POWER9_VECTOR__ -# define XXH_vec_revb vec_revb -# else -XXH_FORCE_INLINE U64x2 XXH_vec_revb(U64x2 val) -{ - U8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, - 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; - return vec_perm(val, val, vByteSwap); -} -# endif - -/* Power8 Crypto gives us vpermxor which is very handy for - * PPC64EB. - * - * U8x16 vpermxor(U8x16 a, U8x16 b, U8x16 mask) - * { - * U8x16 ret; - * for (int i = 0; i < 16; i++) { - * ret[i] = a[mask[i] & 0xF] ^ b[mask[i] >> 4]; - * } - * return ret; - * } - * - * Because both of the main loops load the key, swap, and xor it with input, - * we can combine the key swap into this instruction. - */ -# ifdef vec_permxor -# define XXH_vec_permxor vec_permxor -# else -# define XXH_vec_permxor __builtin_crypto_vpermxor -# endif -#endif /* XXH_VSX_BE */ -/* - * Because we reinterpret the multiply, there are endian memes: vec_mulo actually becomes - * vec_mule. - * - * Additionally, the intrinsic wasn't added until GCC 8, despite existing for a while. - * Clang has an easy way to control this, we can just use the builtin which doesn't swap. - * GCC needs inline assembly. */ -#if __has_builtin(__builtin_altivec_vmuleuw) -# define XXH_vec_mulo __builtin_altivec_vmulouw -# define XXH_vec_mule __builtin_altivec_vmuleuw -#else -/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ -XXH_FORCE_INLINE U64x2 XXH_vec_mulo(U32x4 a, U32x4 b) { - U64x2 result; - __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); - return result; -} -XXH_FORCE_INLINE U64x2 XXH_vec_mule(U32x4 a, U32x4 b) { - U64x2 result; - __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); - return result; -} -#endif /* __has_builtin(__builtin_altivec_vmuleuw) */ -#endif /* XXH_VECTOR == XXH_VSX */ - -/* prefetch - * can be disabled, by declaring XXH_NO_PREFETCH build macro */ -#if defined(XXH_NO_PREFETCH) -# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ -#else -# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ -# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ -# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) -# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) -# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) -# else -# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ -# endif -#endif /* XXH_NO_PREFETCH */ - - -/* ========================================== - * XXH3 default settings - * ========================================== */ - -#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ - -#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) -# error "default keyset is not large enough" -#endif - -XXH_ALIGN(64) static const xxh_u8 kSecret[XXH_SECRET_DEFAULT_SIZE] = { - 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, - 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, - 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, - 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, - 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, - 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, - 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, - 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, - - 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, - 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, - 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, - 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, -}; - -/* - * GCC for x86 has a tendency to use SSE in this loop. While it - * successfully avoids swapping (as MUL overwrites EAX and EDX), it - * slows it down because instead of free register swap shifts, it - * must use pshufd and punpckl/hd. - * - * To prevent this, we use this attribute to shut off SSE. - */ -#if defined(__GNUC__) && !defined(__clang__) && defined(__i386__) -__attribute__((__target__("no-sse"))) -#endif -static XXH128_hash_t -XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) -{ - /* - * GCC/Clang __uint128_t method. - * - * On most 64-bit targets, GCC and Clang define a __uint128_t type. - * This is usually the best way as it usually uses a native long 64-bit - * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. - * - * Usually. - * - * Despite being a 32-bit platform, Clang (and emscripten) define this - * type despite not having the arithmetic for it. This results in a - * laggy compiler builtin call which calculates a full 128-bit multiply. - * In that case it is best to use the portable one. - * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 - */ -#if defined(__GNUC__) && !defined(__wasm__) \ - && defined(__SIZEOF_INT128__) \ - || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) - - __uint128_t product = (__uint128_t)lhs * (__uint128_t)rhs; - XXH128_hash_t const r128 = { (xxh_u64)(product), (xxh_u64)(product >> 64) }; - return r128; - - /* - * MSVC for x64's _umul128 method. - * - * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); - * - * This compiles to single operand MUL on x64. - */ -#elif defined(_M_X64) || defined(_M_IA64) - - #ifndef _MSC_VER -# pragma intrinsic(_umul128) -#endif - xxh_u64 product_high; - xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); - XXH128_hash_t const r128 = { product_low, product_high }; - return r128; - -#else - /* - * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. - * - * This is a fast and simple grade school multiply, which is shown - * below with base 10 arithmetic instead of base 0x100000000. - * - * 9 3 // D2 lhs = 93 - * x 7 5 // D2 rhs = 75 - * ---------- - * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) - * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) - * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) - * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) - * --------- - * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 - * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 - * --------- - * 6 9 7 5 - * - * The reasons for adding the products like this are: - * 1. It avoids manual carry tracking. Just like how - * (9 * 9) + 9 + 9 = 99, the same applies with this for - * UINT64_MAX. This avoids a lot of complexity. - * - * 2. It hints for, and on Clang, compiles to, the powerful UMAAL - * instruction available in ARMv6+ A32/T32, which is shown below: - * - * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) - * { - * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; - * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); - * *RdHi = (xxh_u32)(product >> 32); - * } - * - * This instruction was designed for efficient long multiplication, - * and allows this to be calculated in only 4 instructions which - * is comparable to some 64-bit ALUs. - * - * 3. It isn't terrible on other platforms. Usually this will be - * a couple of 32-bit ADD/ADCs. - */ - - /* First calculate all of the cross products. */ - xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); - xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); - xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); - xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); - - /* Now add the products together. These will never overflow. */ - xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; - xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; - xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); - - XXH128_hash_t r128 = { lower, upper }; - return r128; -#endif -} - -/* - * We want to keep the attribute here because a target switch - * disables inlining. - * - * Does a 64-bit to 128-bit multiply, then XOR folds it. - * The reason for the separate function is to prevent passing - * too many structs around by value. This will hopefully inline - * the multiply, but we don't force it. - */ -#if defined(__GNUC__) && !defined(__clang__) && defined(__i386__) -__attribute__((__target__("no-sse"))) -#endif -static xxh_u64 -XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) -{ - XXH128_hash_t product = XXH_mult64to128(lhs, rhs); - return product.low64 ^ product.high64; -} - - -static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) -{ - h64 ^= h64 >> 37; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - return h64; -} - - -/* ========================================== - * Short keys - * ========================================== */ - -XXH_FORCE_INLINE XXH64_hash_t -XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(1 <= len && len <= 3); - XXH_ASSERT(secret != NULL); - { xxh_u8 const c1 = input[0]; - xxh_u8 const c2 = input[len >> 1]; - xxh_u8 const c3 = input[len - 1]; - xxh_u32 const combined = ((xxh_u32)c1) | (((xxh_u32)c2) << 8) | (((xxh_u32)c3) << 16) | (((xxh_u32)len) << 24); - xxh_u64 const keyed = (xxh_u64)combined ^ (XXH_readLE32(secret) + seed); - xxh_u64 const mixed = keyed * PRIME64_1; - return XXH3_avalanche(mixed); - } -} - -XXH_FORCE_INLINE XXH64_hash_t -XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(4 <= len && len <= 8); - { xxh_u32 const input_lo = XXH_readLE32(input); - xxh_u32 const input_hi = XXH_readLE32(input + len - 4); - xxh_u64 const input_64 = input_lo | ((xxh_u64)input_hi << 32); - xxh_u64 const keyed = input_64 ^ (XXH_readLE64(secret) + seed); - xxh_u64 const mix64 = len + ((keyed ^ (keyed >> 51)) * PRIME32_1); - return XXH3_avalanche((mix64 ^ (mix64 >> 47)) * PRIME64_2); - } -} - -XXH_FORCE_INLINE XXH64_hash_t -XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(9 <= len && len <= 16); - { xxh_u64 const input_lo = XXH_readLE64(input) ^ (XXH_readLE64(secret) + seed); - xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ (XXH_readLE64(secret + 8) - seed); - xxh_u64 const acc = len + (input_lo + input_hi) + XXH3_mul128_fold64(input_lo, input_hi); - return XXH3_avalanche(acc); - } -} - -XXH_FORCE_INLINE XXH64_hash_t -XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(len <= 16); - { if (len > 8) return XXH3_len_9to16_64b(input, len, secret, seed); - if (len >= 4) return XXH3_len_4to8_64b(input, len, secret, seed); - if (len) return XXH3_len_1to3_64b(input, len, secret, seed); - return 0; - } -} - - -/* === Long Keys === */ - -#define STRIPE_LEN 64 -#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ -#define ACC_NB (STRIPE_LEN / sizeof(xxh_u64)) - -typedef enum { XXH3_acc_64bits, XXH3_acc_128bits } XXH3_accWidth_e; - -XXH_FORCE_INLINE void -XXH3_accumulate_512( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret, - XXH3_accWidth_e accWidth) -{ -#if (XXH_VECTOR == XXH_AVX2) - - XXH_ASSERT((((size_t)acc) & 31) == 0); - { XXH_ALIGN(32) __m256i* const xacc = (__m256i *) acc; - const __m256i* const xinput = (const __m256i *) input; /* not really aligned, just for ptr arithmetic, and because _mm256_loadu_si256() requires this type */ - const __m256i* const xsecret = (const __m256i *) secret; /* not really aligned, just for ptr arithmetic, and because _mm256_loadu_si256() requires this type */ - - size_t i; - for (i=0; i < STRIPE_LEN/sizeof(__m256i); i++) { - __m256i const data_vec = _mm256_loadu_si256 (xinput+i); - __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); - __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); /* uint32 dk[8] = {d0+k0, d1+k1, d2+k2, d3+k3, ...} */ - __m256i const product = _mm256_mul_epu32 (data_key, _mm256_shuffle_epi32 (data_key, 0x31)); /* uint64 mul[4] = {dk0*dk1, dk2*dk3, ...} */ - if (accWidth == XXH3_acc_128bits) { - __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); - __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); - xacc[i] = _mm256_add_epi64(product, sum); - } else { /* XXH3_acc_64bits */ - __m256i const sum = _mm256_add_epi64(xacc[i], data_vec); - xacc[i] = _mm256_add_epi64(product, sum); - } - } } - -#elif (XXH_VECTOR == XXH_SSE2) - - XXH_ASSERT((((size_t)acc) & 15) == 0); - { XXH_ALIGN(16) __m128i* const xacc = (__m128i *) acc; - const __m128i* const xinput = (const __m128i *) input; /* not really aligned, just for ptr arithmetic, and because _mm_loadu_si128() requires this type */ - const __m128i* const xsecret = (const __m128i *) secret; /* not really aligned, just for ptr arithmetic, and because _mm_loadu_si128() requires this type */ - - size_t i; - for (i=0; i < STRIPE_LEN/sizeof(__m128i); i++) { - __m128i const data_vec = _mm_loadu_si128 (xinput+i); - __m128i const key_vec = _mm_loadu_si128 (xsecret+i); - __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); /* uint32 dk[8] = {d0+k0, d1+k1, d2+k2, d3+k3, ...} */ - __m128i const product = _mm_mul_epu32 (data_key, _mm_shuffle_epi32 (data_key, 0x31)); /* uint64 mul[4] = {dk0*dk1, dk2*dk3, ...} */ - if (accWidth == XXH3_acc_128bits) { - __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); - __m128i const sum = _mm_add_epi64(xacc[i], data_swap); - xacc[i] = _mm_add_epi64(product, sum); - } else { /* XXH3_acc_64bits */ - __m128i const sum = _mm_add_epi64(xacc[i], data_vec); - xacc[i] = _mm_add_epi64(product, sum); - } - } } - -#elif (XXH_VECTOR == XXH_NEON) - - XXH_ASSERT((((size_t)acc) & 15) == 0); - { - XXH_ALIGN(16) uint64x2_t* const xacc = (uint64x2_t *) acc; - /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ - uint8_t const* const xinput = (const uint8_t *) input; - uint8_t const* const xsecret = (const uint8_t *) secret; - - size_t i; - for (i=0; i < STRIPE_LEN / sizeof(uint64x2_t); i++) { -#if !defined(__aarch64__) && !defined(__arm64__) && defined(__GNUC__) /* ARM32-specific hack */ - /* vzip on ARMv7 Clang generates a lot of vmovs (technically vorrs) without this. - * vzip on 32-bit ARM NEON will overwrite the original register, and I think that Clang - * assumes I don't want to destroy it and tries to make a copy. This slows down the code - * a lot. - * aarch64 not only uses an entirely different syntax, but it requires three - * instructions... - * ext v1.16B, v0.16B, #8 // select high bits because aarch64 can't address them directly - * zip1 v3.2s, v0.2s, v1.2s // first zip - * zip2 v2.2s, v0.2s, v1.2s // second zip - * ...to do what ARM does in one: - * vzip.32 d0, d1 // Interleave high and low bits and overwrite. */ - - /* data_vec = xsecret[i]; */ - uint8x16_t const data_vec = vld1q_u8(xinput + (i * 16)); - /* key_vec = xsecret[i]; */ - uint8x16_t const key_vec = vld1q_u8(xsecret + (i * 16)); - /* data_key = data_vec ^ key_vec; */ - uint32x4_t data_key; - - if (accWidth == XXH3_acc_64bits) { - /* Add first to prevent register swaps */ - /* xacc[i] += data_vec; */ - xacc[i] = vaddq_u64 (xacc[i], vreinterpretq_u64_u8(data_vec)); - } else { /* XXH3_acc_128bits */ - /* xacc[i] += swap(data_vec); */ - /* can probably be optimized better */ - uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec); - uint64x2_t const swapped= vextq_u64(data64, data64, 1); - xacc[i] = vaddq_u64 (xacc[i], swapped); - } - - data_key = vreinterpretq_u32_u8(veorq_u8(data_vec, key_vec)); - - /* Here's the magic. We use the quirkiness of vzip to shuffle data_key in place. - * shuffle: data_key[0, 1, 2, 3] = data_key[0, 2, 1, 3] */ - __asm__("vzip.32 %e0, %f0" : "+w" (data_key)); - /* xacc[i] += (uint64x2_t) data_key[0, 1] * (uint64x2_t) data_key[2, 3]; */ - xacc[i] = vmlal_u32(xacc[i], vget_low_u32(data_key), vget_high_u32(data_key)); - -#else - /* On aarch64, vshrn/vmovn seems to be equivalent to, if not faster than, the vzip method. */ - - /* data_vec = xsecret[i]; */ - uint8x16_t const data_vec = vld1q_u8(xinput + (i * 16)); - /* key_vec = xsecret[i]; */ - uint8x16_t const key_vec = vld1q_u8(xsecret + (i * 16)); - /* data_key = data_vec ^ key_vec; */ - uint64x2_t const data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec)); - /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF); */ - uint32x2_t const data_key_lo = vmovn_u64 (data_key); - /* data_key_hi = (uint32x2_t) (data_key >> 32); */ - uint32x2_t const data_key_hi = vshrn_n_u64 (data_key, 32); - if (accWidth == XXH3_acc_64bits) { - /* xacc[i] += data_vec; */ - xacc[i] = vaddq_u64 (xacc[i], vreinterpretq_u64_u8(data_vec)); - } else { /* XXH3_acc_128bits */ - /* xacc[i] += swap(data_vec); */ - uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec); - uint64x2_t const swapped= vextq_u64(data64, data64, 1); - xacc[i] = vaddq_u64 (xacc[i], swapped); - } - /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */ - xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi); - -#endif - } - } - -#elif (XXH_VECTOR == XXH_VSX) - U64x2* const xacc = (U64x2*) acc; /* presumed aligned */ - U64x2 const* const xinput = (U64x2 const*) input; /* no alignment restriction */ - U64x2 const* const xsecret = (U64x2 const*) secret; /* no alignment restriction */ - U64x2 const v32 = { 32, 32 }; -#if XXH_VSX_BE - U8x16 const vXorSwap = { 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61, 0x70, - 0x8F, 0x9E, 0xAD, 0xBC, 0xCB, 0xDA, 0xE9, 0xF8 }; -#endif - size_t i; - for (i = 0; i < STRIPE_LEN / sizeof(U64x2); i++) { - /* data_vec = xinput[i]; */ - /* key_vec = xsecret[i]; */ -#if XXH_VSX_BE - /* byteswap */ - U64x2 const data_vec = XXH_vec_revb(vec_vsx_ld(0, xinput + i)); - U64x2 const key_raw = vec_vsx_ld(0, xsecret + i); - /* See comment above. data_key = data_vec ^ swap(xsecret[i]); */ - U64x2 const data_key = (U64x2)XXH_vec_permxor((U8x16)data_vec, (U8x16)key_raw, vXorSwap); -#else - U64x2 const data_vec = vec_vsx_ld(0, xinput + i); - U64x2 const key_vec = vec_vsx_ld(0, xsecret + i); - U64x2 const data_key = data_vec ^ key_vec; -#endif - /* shuffled = (data_key << 32) | (data_key >> 32); */ - U32x4 const shuffled = (U32x4)vec_rl(data_key, v32); - /* product = ((U64x2)data_key & 0xFFFFFFFF) * ((U64x2)shuffled & 0xFFFFFFFF); */ - U64x2 const product = XXH_vec_mulo((U32x4)data_key, shuffled); - xacc[i] += product; - - if (accWidth == XXH3_acc_64bits) { - xacc[i] += data_vec; - } else { /* XXH3_acc_128bits */ - /* swap high and low halves */ - U64x2 const data_swapped = vec_xxpermdi(data_vec, data_vec, 2); - xacc[i] += data_swapped; - } - } - -#else /* scalar variant of Accumulator - universal */ - - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned on 32-bytes boundaries, little hint for the auto-vectorizer */ - const xxh_u8* const xinput = (const xxh_u8*) input; /* no alignment restriction */ - const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ - size_t i; - XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); - for (i=0; i < ACC_NB; i++) { - xxh_u64 const data_val = XXH_readLE64(xinput + 8*i); - xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8); - - if (accWidth == XXH3_acc_64bits) { - xacc[i] += data_val; - } else { - xacc[i ^ 1] += data_val; /* swap adjacent lanes */ - } - xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32); - } -#endif -} - -XXH_FORCE_INLINE void -XXH3_scrambleAcc(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ -#if (XXH_VECTOR == XXH_AVX2) - - XXH_ASSERT((((size_t)acc) & 31) == 0); - { XXH_ALIGN(32) __m256i* const xacc = (__m256i*) acc; - const __m256i* const xsecret = (const __m256i *) secret; /* not really aligned, just for ptr arithmetic, and because _mm256_loadu_si256() requires this argument type */ - const __m256i prime32 = _mm256_set1_epi32((int)PRIME32_1); - - size_t i; - for (i=0; i < STRIPE_LEN/sizeof(__m256i); i++) { - /* xacc[i] ^= (xacc[i] >> 47) */ - __m256i const acc_vec = xacc[i]; - __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); - __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); - /* xacc[i] ^= xsecret; */ - __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); - __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); - - /* xacc[i] *= PRIME32_1; */ - __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, 0x31); - __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); - __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); - xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); - } - } - -#elif (XXH_VECTOR == XXH_SSE2) - - XXH_ASSERT((((size_t)acc) & 15) == 0); - { XXH_ALIGN(16) __m128i* const xacc = (__m128i*) acc; - const __m128i* const xsecret = (const __m128i *) secret; /* not really aligned, just for ptr arithmetic, and because _mm_loadu_si128() requires this argument type */ - const __m128i prime32 = _mm_set1_epi32((int)PRIME32_1); - - size_t i; - for (i=0; i < STRIPE_LEN/sizeof(__m128i); i++) { - /* xacc[i] ^= (xacc[i] >> 47) */ - __m128i const acc_vec = xacc[i]; - __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); - __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); - /* xacc[i] ^= xsecret; */ - __m128i const key_vec = _mm_loadu_si128 (xsecret+i); - __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); - - /* xacc[i] *= PRIME32_1; */ - __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, 0x31); - __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); - __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); - xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); - } - } - -#elif (XXH_VECTOR == XXH_NEON) - - XXH_ASSERT((((size_t)acc) & 15) == 0); - - { uint64x2_t* const xacc = (uint64x2_t*) acc; - uint8_t const* const xsecret = (uint8_t const*) secret; - uint32x2_t const prime = vdup_n_u32 (PRIME32_1); - - size_t i; - for (i=0; i < STRIPE_LEN/sizeof(uint64x2_t); i++) { - /* data_vec = xacc[i] ^ (xacc[i] >> 47); */ - uint64x2_t const acc_vec = xacc[i]; - uint64x2_t const shifted = vshrq_n_u64 (acc_vec, 47); - uint64x2_t const data_vec = veorq_u64 (acc_vec, shifted); - - /* key_vec = xsecret[i]; */ - uint32x4_t const key_vec = vreinterpretq_u32_u8(vld1q_u8(xsecret + (i * 16))); - /* data_key = data_vec ^ key_vec; */ - uint32x4_t const data_key = veorq_u32 (vreinterpretq_u32_u64(data_vec), key_vec); - /* shuffled = { data_key[0, 2], data_key[1, 3] }; */ - uint32x2x2_t const shuffled = vzip_u32 (vget_low_u32(data_key), vget_high_u32(data_key)); - - /* data_key *= PRIME32_1 */ - - /* prod_hi = (data_key >> 32) * PRIME32_1; */ - uint64x2_t const prod_hi = vmull_u32 (shuffled.val[1], prime); - /* xacc[i] = prod_hi << 32; */ - xacc[i] = vshlq_n_u64(prod_hi, 32); - /* xacc[i] += (prod_hi & 0xFFFFFFFF) * PRIME32_1; */ - xacc[i] = vmlal_u32(xacc[i], shuffled.val[0], prime); - } } - -#elif (XXH_VECTOR == XXH_VSX) - - U64x2* const xacc = (U64x2*) acc; - const U64x2* const xsecret = (const U64x2*) secret; - /* constants */ - U64x2 const v32 = { 32, 32 }; - U64x2 const v47 = { 47, 47 }; - U32x4 const prime = { PRIME32_1, PRIME32_1, PRIME32_1, PRIME32_1 }; - size_t i; -#if XXH_VSX_BE - /* endian swap */ - U8x16 const vXorSwap = { 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61, 0x70, - 0x8F, 0x9E, 0xAD, 0xBC, 0xCB, 0xDA, 0xE9, 0xF8 }; -#endif - for (i = 0; i < STRIPE_LEN / sizeof(U64x2); i++) { - U64x2 const acc_vec = xacc[i]; - U64x2 const data_vec = acc_vec ^ (acc_vec >> v47); - /* key_vec = xsecret[i]; */ -#if XXH_VSX_BE - /* swap bytes words */ - U64x2 const key_raw = vec_vsx_ld(0, xsecret + i); - U64x2 const data_key = (U64x2)XXH_vec_permxor((U8x16)data_vec, (U8x16)key_raw, vXorSwap); -#else - U64x2 const key_vec = vec_vsx_ld(0, xsecret + i); - U64x2 const data_key = data_vec ^ key_vec; -#endif - - /* data_key *= PRIME32_1 */ - - /* prod_lo = ((U64x2)data_key & 0xFFFFFFFF) * ((U64x2)prime & 0xFFFFFFFF); */ - U64x2 const prod_even = XXH_vec_mule((U32x4)data_key, prime); - /* prod_hi = ((U64x2)data_key >> 32) * ((U64x2)prime >> 32); */ - U64x2 const prod_odd = XXH_vec_mulo((U32x4)data_key, prime); - xacc[i] = prod_odd + (prod_even << v32); - } - -#else /* scalar variant of Scrambler - universal */ - - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned on 32-bytes boundaries, little hint for the auto-vectorizer */ - const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ - size_t i; - XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); - for (i=0; i < ACC_NB; i++) { - xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i); - xxh_u64 acc64 = xacc[i]; - acc64 ^= acc64 >> 47; - acc64 ^= key64; - acc64 *= PRIME32_1; - xacc[i] = acc64; - } - -#endif -} - -#define XXH_PREFETCH_DIST 384 - -/* assumption : nbStripes will not overflow secret size */ -XXH_FORCE_INLINE void -XXH3_accumulate( xxh_u64* XXH_RESTRICT acc, - const xxh_u8* XXH_RESTRICT input, - const xxh_u8* XXH_RESTRICT secret, - size_t nbStripes, - XXH3_accWidth_e accWidth) -{ - size_t n; - for (n = 0; n < nbStripes; n++ ) { - const xxh_u8* const in = input + n*STRIPE_LEN; - XXH_PREFETCH(in + XXH_PREFETCH_DIST); - XXH3_accumulate_512(acc, - in, - secret + n*XXH_SECRET_CONSUME_RATE, - accWidth); - } -} - -/* note : clang auto-vectorizes well in SS2 mode _if_ this function is `static`, - * and doesn't auto-vectorize it at all if it is `FORCE_INLINE`. - * However, it auto-vectorizes better AVX2 if it is `FORCE_INLINE` - * Pretty much every other modes and compilers prefer `FORCE_INLINE`. - */ - -#if defined(__clang__) && (XXH_VECTOR==0) && !defined(__AVX2__) && !defined(__arm__) && !defined(__thumb__) -static void -#else -XXH_FORCE_INLINE void -#endif -XXH3_hashLong_internal_loop( xxh_u64* XXH_RESTRICT acc, - const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH3_accWidth_e accWidth) -{ - size_t const nb_rounds = (secretSize - STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; - size_t const block_len = STRIPE_LEN * nb_rounds; - size_t const nb_blocks = len / block_len; - - size_t n; - - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); - - for (n = 0; n < nb_blocks; n++) { - XXH3_accumulate(acc, input + n*block_len, secret, nb_rounds, accWidth); - XXH3_scrambleAcc(acc, secret + secretSize - STRIPE_LEN); - } - - /* last partial block */ - XXH_ASSERT(len > STRIPE_LEN); - { size_t const nbStripes = (len - (block_len * nb_blocks)) / STRIPE_LEN; - XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); - XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, accWidth); - - /* last stripe */ - if (len & (STRIPE_LEN - 1)) { - const xxh_u8* const p = input + len - STRIPE_LEN; -#define XXH_SECRET_LASTACC_START 7 /* do not align on 8, so that secret is different from scrambler */ - XXH3_accumulate_512(acc, p, secret + secretSize - STRIPE_LEN - XXH_SECRET_LASTACC_START, accWidth); - } } -} - -XXH_FORCE_INLINE xxh_u64 -XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) -{ - return XXH3_mul128_fold64( - acc[0] ^ XXH_readLE64(secret), - acc[1] ^ XXH_readLE64(secret+8) ); -} - -static XXH64_hash_t -XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) -{ - xxh_u64 result64 = start; - - result64 += XXH3_mix2Accs(acc+0, secret + 0); - result64 += XXH3_mix2Accs(acc+2, secret + 16); - result64 += XXH3_mix2Accs(acc+4, secret + 32); - result64 += XXH3_mix2Accs(acc+6, secret + 48); - - return XXH3_avalanche(result64); -} - -#define XXH3_INIT_ACC { PRIME32_3, PRIME64_1, PRIME64_2, PRIME64_3, \ - PRIME64_4, PRIME32_2, PRIME64_5, PRIME32_1 }; - -XXH_FORCE_INLINE XXH64_hash_t -XXH3_hashLong_internal(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize) -{ - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[ACC_NB] = XXH3_INIT_ACC; - - XXH3_hashLong_internal_loop(acc, input, len, secret, secretSize, XXH3_acc_64bits); - - /* converge into final hash */ - XXH_STATIC_ASSERT(sizeof(acc) == 64); -#define XXH_SECRET_MERGEACCS_START 11 /* do not align on 8, so that secret is different from accumulator */ - XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); - return XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * PRIME64_1); -} - - -XXH_NO_INLINE XXH64_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ -XXH3_hashLong_64b_defaultSecret(const xxh_u8* XXH_RESTRICT input, size_t len) -{ - return XXH3_hashLong_internal(input, len, kSecret, sizeof(kSecret)); -} - -XXH_NO_INLINE XXH64_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ -XXH3_hashLong_64b_withSecret(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize) -{ - return XXH3_hashLong_internal(input, len, secret, secretSize); -} - - -XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) -{ - if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); - memcpy(dst, &v64, sizeof(v64)); -} - -/* XXH3_initCustomSecret() : - * destination `customSecret` is presumed allocated and same size as `kSecret`. - */ -XXH_FORCE_INLINE void XXH3_initCustomSecret(xxh_u8* customSecret, xxh_u64 seed64) -{ - int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; - int i; - - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); - - for (i=0; i < nbRounds; i++) { - XXH_writeLE64(customSecret + 16*i, XXH_readLE64(kSecret + 16*i) + seed64); - XXH_writeLE64(customSecret + 16*i + 8, XXH_readLE64(kSecret + 16*i + 8) - seed64); - } -} - - -/* XXH3_hashLong_64b_withSeed() : - * Generate a custom key, - * based on alteration of default kSecret with the seed, - * and then use this key for long mode hashing. - * This operation is decently fast but nonetheless costs a little bit of time. - * Try to avoid it whenever possible (typically when seed==0). - */ -XXH_NO_INLINE XXH64_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ -XXH3_hashLong_64b_withSeed(const xxh_u8* input, size_t len, XXH64_hash_t seed) -{ - XXH_ALIGN(8) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; - if (seed==0) return XXH3_hashLong_64b_defaultSecret(input, len); - XXH3_initCustomSecret(secret, seed); - return XXH3_hashLong_internal(input, len, secret, sizeof(secret)); -} - - -XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, - const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) -{ - xxh_u64 const input_lo = XXH_readLE64(input); - xxh_u64 const input_hi = XXH_readLE64(input+8); - return XXH3_mul128_fold64( - input_lo ^ (XXH_readLE64(secret) + seed64), - input_hi ^ (XXH_readLE64(secret+8) - seed64) ); -} - - -XXH_FORCE_INLINE XXH64_hash_t -XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(16 < len && len <= 128); - - { xxh_u64 acc = len * PRIME64_1; - if (len > 32) { - if (len > 64) { - if (len > 96) { - acc += XXH3_mix16B(input+48, secret+96, seed); - acc += XXH3_mix16B(input+len-64, secret+112, seed); - } - acc += XXH3_mix16B(input+32, secret+64, seed); - acc += XXH3_mix16B(input+len-48, secret+80, seed); - } - acc += XXH3_mix16B(input+16, secret+32, seed); - acc += XXH3_mix16B(input+len-32, secret+48, seed); - } - acc += XXH3_mix16B(input+0, secret+0, seed); - acc += XXH3_mix16B(input+len-16, secret+16, seed); - - return XXH3_avalanche(acc); - } -} - -#define XXH3_MIDSIZE_MAX 240 - -XXH_NO_INLINE XXH64_hash_t -XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); - -#define XXH3_MIDSIZE_STARTOFFSET 3 -#define XXH3_MIDSIZE_LASTOFFSET 17 - - { xxh_u64 acc = len * PRIME64_1; - int const nbRounds = (int)len / 16; - int i; - for (i=0; i<8; i++) { - acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); - } - acc = XXH3_avalanche(acc); - XXH_ASSERT(nbRounds >= 8); - for (i=8 ; i < nbRounds; i++) { - acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); - } - /* last bytes */ - acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); - return XXH3_avalanche(acc); - } -} - -/* === Public entry point === */ - -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len) -{ - if (len <= 16) return XXH3_len_0to16_64b((const xxh_u8*)input, len, kSecret, 0); - if (len <= 128) return XXH3_len_17to128_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0); - if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0); - return XXH3_hashLong_64b_defaultSecret((const xxh_u8*)input, len); -} - -XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); - /* if an action must be taken should `secret` conditions not be respected, - * it should be done here. - * For now, it's a contract pre-condition. - * Adding a check and a branch here would cost performance at every hash */ - if (len <= 16) return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, 0); - if (len <= 128) return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0); - if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0); - return XXH3_hashLong_64b_withSecret((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize); -} - -XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) -{ - if (len <= 16) return XXH3_len_0to16_64b((const xxh_u8*)input, len, kSecret, seed); - if (len <= 128) return XXH3_len_17to128_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed); - if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed); - return XXH3_hashLong_64b_withSeed((const xxh_u8*)input, len, seed); -} - -/* === XXH3 streaming === */ - -XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) -{ - return (XXH3_state_t*)XXH_malloc(sizeof(XXH3_state_t)); -} - -XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -XXH_PUBLIC_API void -XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state) -{ - memcpy(dst_state, src_state, sizeof(*dst_state)); -} - -static void -XXH3_64bits_reset_internal(XXH3_state_t* statePtr, - XXH64_hash_t seed, - const xxh_u8* secret, size_t secretSize) -{ - XXH_ASSERT(statePtr != NULL); - memset(statePtr, 0, sizeof(*statePtr)); - statePtr->acc[0] = PRIME32_3; - statePtr->acc[1] = PRIME64_1; - statePtr->acc[2] = PRIME64_2; - statePtr->acc[3] = PRIME64_3; - statePtr->acc[4] = PRIME64_4; - statePtr->acc[5] = PRIME32_2; - statePtr->acc[6] = PRIME64_5; - statePtr->acc[7] = PRIME32_1; - statePtr->seed = seed; - XXH_ASSERT(secret != NULL); - statePtr->secret = secret; - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); - statePtr->secretLimit = (XXH32_hash_t)(secretSize - STRIPE_LEN); - statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; -} - -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset(XXH3_state_t* statePtr) -{ - if (statePtr == NULL) return XXH_ERROR; - XXH3_64bits_reset_internal(statePtr, 0, kSecret, XXH_SECRET_DEFAULT_SIZE); - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) -{ - if (statePtr == NULL) return XXH_ERROR; - XXH3_64bits_reset_internal(statePtr, 0, (const xxh_u8*)secret, secretSize); - if (secret == NULL) return XXH_ERROR; - if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) -{ - if (statePtr == NULL) return XXH_ERROR; - XXH3_64bits_reset_internal(statePtr, seed, kSecret, XXH_SECRET_DEFAULT_SIZE); - XXH3_initCustomSecret(statePtr->customSecret, seed); - statePtr->secret = statePtr->customSecret; - return XXH_OK; -} - -XXH_FORCE_INLINE void -XXH3_consumeStripes( xxh_u64* acc, - XXH32_hash_t* nbStripesSoFarPtr, XXH32_hash_t nbStripesPerBlock, - const xxh_u8* input, size_t totalStripes, - const xxh_u8* secret, size_t secretLimit, - XXH3_accWidth_e accWidth) -{ - XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock); - if (nbStripesPerBlock - *nbStripesSoFarPtr <= totalStripes) { - /* need a scrambling operation */ - size_t const nbStripes = nbStripesPerBlock - *nbStripesSoFarPtr; - XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, accWidth); - XXH3_scrambleAcc(acc, secret + secretLimit); - XXH3_accumulate(acc, input + nbStripes * STRIPE_LEN, secret, totalStripes - nbStripes, accWidth); - *nbStripesSoFarPtr = (XXH32_hash_t)(totalStripes - nbStripes); - } else { - XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, totalStripes, accWidth); - *nbStripesSoFarPtr += (XXH32_hash_t)totalStripes; - } -} - -XXH_FORCE_INLINE XXH_errorcode -XXH3_update(XXH3_state_t* state, const xxh_u8* input, size_t len, XXH3_accWidth_e accWidth) -{ - if (input==NULL) -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - return XXH_OK; -#else - return XXH_ERROR; -#endif - - { const xxh_u8* const bEnd = input + len; - - state->totalLen += len; - - if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) { /* fill in tmp buffer */ - XXH_memcpy(state->buffer + state->bufferedSize, input, len); - state->bufferedSize += (XXH32_hash_t)len; - return XXH_OK; - } - /* input now > XXH3_INTERNALBUFFER_SIZE */ - -#define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / STRIPE_LEN) - XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % STRIPE_LEN == 0); /* clean multiple */ - - if (state->bufferedSize) { /* some input within internal buffer: fill then consume it */ - size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; - XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); - input += loadSize; - XXH3_consumeStripes(state->acc, - &state->nbStripesSoFar, state->nbStripesPerBlock, - state->buffer, XXH3_INTERNALBUFFER_STRIPES, - state->secret, state->secretLimit, - accWidth); - state->bufferedSize = 0; - } - - /* consume input by full buffer quantities */ - if (input+XXH3_INTERNALBUFFER_SIZE <= bEnd) { - const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE; - do { - XXH3_consumeStripes(state->acc, - &state->nbStripesSoFar, state->nbStripesPerBlock, - input, XXH3_INTERNALBUFFER_STRIPES, - state->secret, state->secretLimit, - accWidth); - input += XXH3_INTERNALBUFFER_SIZE; - } while (input<=limit); - } - - if (input < bEnd) { /* some remaining input input : buffer it */ - XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); - state->bufferedSize = (XXH32_hash_t)(bEnd-input); - } - } - - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len) -{ - return XXH3_update(state, (const xxh_u8*)input, len, XXH3_acc_64bits); -} - - -XXH_FORCE_INLINE void -XXH3_digest_long (XXH64_hash_t* acc, const XXH3_state_t* state, XXH3_accWidth_e accWidth) -{ - memcpy(acc, state->acc, sizeof(state->acc)); /* digest locally, state remains unaltered, and can continue ingesting more input afterwards */ - if (state->bufferedSize >= STRIPE_LEN) { - size_t const totalNbStripes = state->bufferedSize / STRIPE_LEN; - XXH32_hash_t nbStripesSoFar = state->nbStripesSoFar; - XXH3_consumeStripes(acc, - &nbStripesSoFar, state->nbStripesPerBlock, - state->buffer, totalNbStripes, - state->secret, state->secretLimit, - accWidth); - if (state->bufferedSize % STRIPE_LEN) { /* one last partial stripe */ - XXH3_accumulate_512(acc, - state->buffer + state->bufferedSize - STRIPE_LEN, - state->secret + state->secretLimit - XXH_SECRET_LASTACC_START, - accWidth); - } - } else { /* bufferedSize < STRIPE_LEN */ - if (state->bufferedSize) { /* one last stripe */ - xxh_u8 lastStripe[STRIPE_LEN]; - size_t const catchupSize = STRIPE_LEN - state->bufferedSize; - memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); - memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); - XXH3_accumulate_512(acc, - lastStripe, - state->secret + state->secretLimit - XXH_SECRET_LASTACC_START, - accWidth); - } } -} - -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state) -{ - if (state->totalLen > XXH3_MIDSIZE_MAX) { - XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[ACC_NB]; - XXH3_digest_long(acc, state, XXH3_acc_64bits); - return XXH3_mergeAccs(acc, state->secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)state->totalLen * PRIME64_1); - } - /* len <= XXH3_MIDSIZE_MAX : short code */ - if (state->seed) - return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); - return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), state->secret, state->secretLimit + STRIPE_LEN); -} - -/* ========================================== - * XXH3 128 bits (=> XXH128) - * ========================================== */ - -XXH_FORCE_INLINE XXH128_hash_t -XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(1 <= len && len <= 3); - XXH_ASSERT(secret != NULL); - { xxh_u8 const c1 = input[0]; - xxh_u8 const c2 = input[len >> 1]; - xxh_u8 const c3 = input[len - 1]; - xxh_u32 const combinedl = ((xxh_u32)c1) + (((xxh_u32)c2) << 8) + (((xxh_u32)c3) << 16) + (((xxh_u32)len) << 24); - xxh_u32 const combinedh = XXH_swap32(combinedl); - xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ (XXH_readLE32(secret) + seed); - xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ (XXH_readLE32(secret+4) - seed); - xxh_u64 const mixedl = keyed_lo * PRIME64_1; - xxh_u64 const mixedh = keyed_hi * PRIME64_5; - XXH128_hash_t const h128 = { XXH3_avalanche(mixedl) /*low64*/, XXH3_avalanche(mixedh) /*high64*/ }; - return h128; - } -} - - -XXH_FORCE_INLINE XXH128_hash_t -XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(4 <= len && len <= 8); - { xxh_u32 const input_lo = XXH_readLE32(input); - xxh_u32 const input_hi = XXH_readLE32(input + len - 4); - xxh_u64 const input_64_lo = input_lo + ((xxh_u64)input_hi << 32); - xxh_u64 const input_64_hi = XXH_swap64(input_64_lo); - xxh_u64 const keyed_lo = input_64_lo ^ (XXH_readLE64(secret) + seed); - xxh_u64 const keyed_hi = input_64_hi ^ (XXH_readLE64(secret + 8) - seed); - xxh_u64 const mix64l1 = len + ((keyed_lo ^ (keyed_lo >> 51)) * PRIME32_1); - xxh_u64 const mix64l2 = (mix64l1 ^ (mix64l1 >> 47)) * PRIME64_2; - xxh_u64 const mix64h1 = ((keyed_hi ^ (keyed_hi >> 47)) * PRIME64_1) - len; - xxh_u64 const mix64h2 = (mix64h1 ^ (mix64h1 >> 43)) * PRIME64_4; - { XXH128_hash_t const h128 = { XXH3_avalanche(mix64l2) /*low64*/, XXH3_avalanche(mix64h2) /*high64*/ }; - return h128; - } } -} - -XXH_FORCE_INLINE XXH128_hash_t -XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(9 <= len && len <= 16); - { xxh_u64 const input_lo = XXH_readLE64(input) ^ (XXH_readLE64(secret) + seed); - xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ (XXH_readLE64(secret+8) - seed); - XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi, PRIME64_1); - xxh_u64 const lenContrib = XXH_mult32to64(len, PRIME32_5); - m128.low64 += lenContrib; - m128.high64 += input_hi * PRIME64_1; - m128.low64 ^= (m128.high64 >> 32); - { XXH128_hash_t h128 = XXH_mult64to128(m128.low64, PRIME64_2); - h128.high64 += m128.high64 * PRIME64_2; - h128.low64 = XXH3_avalanche(h128.low64); - h128.high64 = XXH3_avalanche(h128.high64); - return h128; - } } -} - -/* Assumption : `secret` size is >= 16 - * Note : it should be >= XXH3_SECRET_SIZE_MIN anyway */ -XXH_FORCE_INLINE XXH128_hash_t -XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(len <= 16); - { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); - if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); - if (len) return XXH3_len_1to3_128b(input, len, secret, seed); - { XXH128_hash_t const h128 = { 0, 0 }; - return h128; - } } -} - -XXH_FORCE_INLINE XXH128_hash_t -XXH3_hashLong_128b_internal(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize) -{ - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[ACC_NB] = XXH3_INIT_ACC; - - XXH3_hashLong_internal_loop(acc, input, len, secret, secretSize, XXH3_acc_128bits); - - /* converge into final hash */ - XXH_STATIC_ASSERT(sizeof(acc) == 64); - XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); - { xxh_u64 const low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * PRIME64_1); - xxh_u64 const high64 = XXH3_mergeAccs(acc, secret + secretSize - sizeof(acc) - XXH_SECRET_MERGEACCS_START, ~((xxh_u64)len * PRIME64_2)); - XXH128_hash_t const h128 = { low64, high64 }; - return h128; - } -} - -XXH_NO_INLINE XXH128_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ -XXH3_hashLong_128b_defaultSecret(const xxh_u8* input, size_t len) -{ - return XXH3_hashLong_128b_internal(input, len, kSecret, sizeof(kSecret)); -} - -XXH_NO_INLINE XXH128_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ -XXH3_hashLong_128b_withSecret(const xxh_u8* input, size_t len, - const xxh_u8* secret, size_t secretSize) -{ - return XXH3_hashLong_128b_internal(input, len, secret, secretSize); -} - -XXH_NO_INLINE XXH128_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ -XXH3_hashLong_128b_withSeed(const xxh_u8* input, size_t len, XXH64_hash_t seed) -{ - XXH_ALIGN(8) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; - if (seed == 0) return XXH3_hashLong_128b_defaultSecret(input, len); - XXH3_initCustomSecret(secret, seed); - return XXH3_hashLong_128b_internal(input, len, secret, sizeof(secret)); -} - - -XXH_FORCE_INLINE XXH128_hash_t -XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, const xxh_u8* secret, XXH64_hash_t seed) -{ - acc.low64 += XXH3_mix16B (input_1, secret+0, seed); - acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); - acc.high64 += XXH3_mix16B (input_2, secret+16, seed); - acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); - return acc; -} - -XXH_NO_INLINE XXH128_hash_t -XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); - - { XXH128_hash_t acc; - int const nbRounds = (int)len / 32; - int i; - acc.low64 = len * PRIME64_1; - acc.high64 = 0; - for (i=0; i<4; i++) { - acc = XXH128_mix32B(acc, input+(32*i), input+(32*i)+16, secret+(32*i), seed); - } - acc.low64 = XXH3_avalanche(acc.low64); - acc.high64 = XXH3_avalanche(acc.high64); - XXH_ASSERT(nbRounds >= 4); - for (i=4 ; i < nbRounds; i++) { - acc = XXH128_mix32B(acc, input+(32*i), input+(32*i)+16, secret+XXH3_MIDSIZE_STARTOFFSET+(32*(i-4)), seed); - } - /* last bytes */ - acc = XXH128_mix32B(acc, input + len - 16, input + len - 32, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, 0ULL - seed); - - { xxh_u64 const low64 = acc.low64 + acc.high64; - xxh_u64 const high64 = (acc.low64 * PRIME64_1) + (acc.high64 * PRIME64_4) + ((len - seed) * PRIME64_2); - XXH128_hash_t const h128 = { XXH3_avalanche(low64), (XXH64_hash_t)0 - XXH3_avalanche(high64) }; - return h128; - } - } -} - - -XXH_FORCE_INLINE XXH128_hash_t -XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(16 < len && len <= 128); - - { XXH128_hash_t acc; - acc.low64 = len * PRIME64_1; - acc.high64 = 0; - if (len > 32) { - if (len > 64) { - if (len > 96) { - acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); - } - acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); - } - acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); - } - acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); - { xxh_u64 const low64 = acc.low64 + acc.high64; - xxh_u64 const high64 = (acc.low64 * PRIME64_1) + (acc.high64 * PRIME64_4) + ((len - seed) * PRIME64_2); - XXH128_hash_t const h128 = { XXH3_avalanche(low64), (XXH64_hash_t)0 - XXH3_avalanche(high64) }; - return h128; - } - } -} - -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len) -{ - if (len <= 16) return XXH3_len_0to16_128b((const xxh_u8*)input, len, kSecret, 0); - if (len <= 128) return XXH3_len_17to128_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0); - if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0); - return XXH3_hashLong_128b_defaultSecret((const xxh_u8*)input, len); -} - -XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); - /* if an action must be taken should `secret` conditions not be respected, - * it should be done here. - * For now, it's a contract pre-condition. - * Adding a check and a branch here would cost performance at every hash */ - if (len <= 16) return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, 0); - if (len <= 128) return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0); - if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0); - return XXH3_hashLong_128b_withSecret((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize); -} - -XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) -{ - if (len <= 16) return XXH3_len_0to16_128b((const xxh_u8*)input, len, kSecret, seed); - if (len <= 128) return XXH3_len_17to128_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed); - if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed); - return XXH3_hashLong_128b_withSeed((const xxh_u8*)input, len, seed); -} - -XXH_PUBLIC_API XXH128_hash_t -XXH128(const void* input, size_t len, XXH64_hash_t seed) -{ - return XXH3_128bits_withSeed(input, len, seed); -} - - -/* === XXH3 128-bit streaming === */ - -/* all the functions are actually the same as for 64-bit streaming variant, - just the reset one is different (different initial acc values for 0,5,6,7), - and near the end of the digest function */ - -static void -XXH3_128bits_reset_internal(XXH3_state_t* statePtr, - XXH64_hash_t seed, - const xxh_u8* secret, size_t secretSize) -{ - XXH3_64bits_reset_internal(statePtr, seed, secret, secretSize); -} - -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset(XXH3_state_t* statePtr) -{ - if (statePtr == NULL) return XXH_ERROR; - XXH3_128bits_reset_internal(statePtr, 0, kSecret, XXH_SECRET_DEFAULT_SIZE); - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) -{ - if (statePtr == NULL) return XXH_ERROR; - XXH3_128bits_reset_internal(statePtr, 0, (const xxh_u8*)secret, secretSize); - if (secret == NULL) return XXH_ERROR; - if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) -{ - if (statePtr == NULL) return XXH_ERROR; - XXH3_128bits_reset_internal(statePtr, seed, kSecret, XXH_SECRET_DEFAULT_SIZE); - XXH3_initCustomSecret(statePtr->customSecret, seed); - statePtr->secret = statePtr->customSecret; - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len) -{ - return XXH3_update(state, (const xxh_u8*)input, len, XXH3_acc_128bits); -} - -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state) -{ - if (state->totalLen > XXH3_MIDSIZE_MAX) { - XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[ACC_NB]; - XXH3_digest_long(acc, state, XXH3_acc_128bits); - XXH_ASSERT(state->secretLimit + STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); - { xxh_u64 const low64 = XXH3_mergeAccs(acc, state->secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)state->totalLen * PRIME64_1); - xxh_u64 const high64 = XXH3_mergeAccs(acc, state->secret + state->secretLimit + STRIPE_LEN - sizeof(acc) - XXH_SECRET_MERGEACCS_START, ~((xxh_u64)state->totalLen * PRIME64_2)); - XXH128_hash_t const h128 = { low64, high64 }; - return h128; - } - } - /* len <= XXH3_MIDSIZE_MAX : short code */ - if (state->seed) - return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); - return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), state->secret, state->secretLimit + STRIPE_LEN); -} - -/* 128-bit utility functions */ - -#include /* memcmp */ - -/* return : 1 is equal, 0 if different */ -XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) -{ - /* note : XXH128_hash_t is compact, it has no padding byte */ - return !(memcmp(&h1, &h2, sizeof(h1))); -} - -/* This prototype is compatible with stdlib's qsort(). - * return : >0 if *h128_1 > *h128_2 - * <0 if *h128_1 < *h128_2 - * =0 if *h128_1 == *h128_2 */ -XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2) -{ - XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; - XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; - int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); - /* note : bets that, in most cases, hash values are different */ - if (hcmp) return hcmp; - return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); -} - - -/*====== Canonical representation ======*/ -XXH_PUBLIC_API void -XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) { - hash.high64 = XXH_swap64(hash.high64); - hash.low64 = XXH_swap64(hash.low64); - } - memcpy(dst, &hash.high64, sizeof(hash.high64)); - memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); -} - -XXH_PUBLIC_API XXH128_hash_t -XXH128_hashFromCanonical(const XXH128_canonical_t* src) -{ - XXH128_hash_t h; - h.high64 = XXH_readBE64(src); - h.low64 = XXH_readBE64(src->digest + 8); - return h; -} - - - -#endif /* XXH3_H */ diff --git a/lib/mmseqs/lib/xxhash/xxhash.c b/lib/mmseqs/lib/xxhash/xxhash.c new file mode 100644 index 0000000..0fae88c --- /dev/null +++ b/lib/mmseqs/lib/xxhash/xxhash.c @@ -0,0 +1,43 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Copyright (C) 2012-2020 Yann Collet + * + * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at: + * - xxHash homepage: https://www.xxhash.com + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + + +/* + * xxhash.c instantiates functions defined in xxhash.h + */ + +#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */ +#define XXH_IMPLEMENTATION /* access definitions */ + +#include "xxhash.h" diff --git a/lib/mmseqs/lib/xxhash/xxhash.cpp b/lib/mmseqs/lib/xxhash/xxhash.cpp deleted file mode 100644 index 1e4df67..0000000 --- a/lib/mmseqs/lib/xxhash/xxhash.cpp +++ /dev/null @@ -1,5 +0,0 @@ -// -// Created by Martin Steinegger on 2020-01-14. -// - -#include "xxhash.h" diff --git a/lib/mmseqs/lib/xxhash/xxhash.h b/lib/mmseqs/lib/xxhash/xxhash.h index 3593b91..2d56d23 100644 --- a/lib/mmseqs/lib/xxhash/xxhash.h +++ b/lib/mmseqs/lib/xxhash/xxhash.h @@ -1,40 +1,42 @@ /* - xxHash - Extremely Fast Hash algorithm - Header File - Copyright (C) 2012-2016, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ + * xxHash - Extremely Fast Hash algorithm + * Header File + * Copyright (C) 2012-2020 Yann Collet + * + * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at: + * - xxHash homepage: https://www.xxhash.com + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ -/* Notice extracted from xxHash homepage : +/* TODO: update */ +/* Notice extracted from xxHash homepage: -xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +xxHash is an extremely fast hash algorithm, running at RAM speed limits. It also successfully passes all tests from the SMHasher suite. Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) @@ -57,10 +59,10 @@ Q.Score is a measure of quality of the hash function. It depends on successfully passing SMHasher test set. 10 is a perfect score. -Note : SMHasher's CRC32 implementation is not the fastest one. +Note: SMHasher's CRC32 implementation is not the fastest one. Other speed-oriented implementations can be faster, -especially in combination with PCLMUL instruction : -http://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735 +especially in combination with PCLMUL instruction: +https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735 A 64-bit version, named XXH64, is available since r35. It offers much better speed, but for 64-bit applications only. @@ -69,37 +71,38 @@ XXH64 13.8 GB/s 1.9 GB/s XXH32 6.8 GB/s 6.0 GB/s */ -#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */ -#define XXH_IMPLEMENTATION /* access definitions */ - - #if defined (__cplusplus) extern "C" { #endif - -#ifndef XXHASH_H_5627135585666179 -#define XXHASH_H_5627135585666179 1 - /* **************************** - * API modifier + * INLINE mode ******************************/ -/** XXH_INLINE_ALL (and XXH_PRIVATE_API) - * This build macro includes xxhash functions in `static` mode - * in order to inline them, and remove their symbol from the public list. - * Inlining offers great performance improvement on small keys, - * and dramatic ones when length is expressed as a compile-time constant. - * See https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html . - * Methodology : +/*! + * XXH_INLINE_ALL (and XXH_PRIVATE_API) + * Use these build macros to inline xxhash into the target unit. + * Inlining improves performance on small inputs, especially when the length is + * expressed as a compile-time constant: + * + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * + * It also keeps xxHash symbols private to the unit, so they are not exported. + * + * Usage: * #define XXH_INLINE_ALL * #include "xxhash.h" - * `xxhash.c` is automatically included. - * It's not useful to compile and link it as a separate object. + * + * Do not compile and link xxhash.o as a separate object, as it is not useful. */ -#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) -# ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY -# endif +#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ + && !defined(XXH_INLINE_ALL_31684351384) + /* this section should be traversed only once */ +# define XXH_INLINE_ALL_31684351384 + /* give access to the advanced API, required to compile implementations */ +# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ +# define XXH_STATIC_LINKING_ONLY + /* make all functions private */ +# undef XXH_PUBLIC_API # if defined(__GNUC__) # define XXH_PUBLIC_API static __inline __attribute__((unused)) # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) @@ -107,10 +110,63 @@ extern "C" { # elif defined(_MSC_VER) # define XXH_PUBLIC_API static __inline # else - /* this version may generate warnings for unused static functions */ + /* note: this version may generate warnings for unused static functions */ # define XXH_PUBLIC_API static # endif -#else + + /* + * This part deals with the special case where a unit wants to inline xxHash, + * but "xxhash.h" has previously been included without XXH_INLINE_ALL, such + * as part of some previously included *.h header file. + * Without further action, the new include would just be ignored, + * and functions would effectively _not_ be inlined (silent failure). + * The following macros solve this situation by prefixing all inlined names, + * avoiding naming collision with previous inclusions. + */ +# ifdef XXH_NAMESPACE +# error "XXH_INLINE_ALL with XXH_NAMESPACE is not supported" + /* + * Note: Alternative: #undef all symbols (it's a pretty large list). + * Without #error: it compiles, but functions are actually not inlined. + */ +# endif +# define XXH_NAMESPACE XXH_INLINE_ + /* + * Some identifiers (enums, type names) are not symbols, but they must + * still be renamed to avoid redeclaration. + * Alternative solution: do not redeclare them. + * However, this requires some #ifdefs, and is a more dispersed action. + * Meanwhile, renaming can be achieved in a single block + */ +# define XXH_IPREF(Id) XXH_INLINE_ ## Id +# define XXH_OK XXH_IPREF(XXH_OK) +# define XXH_ERROR XXH_IPREF(XXH_ERROR) +# define XXH_errorcode XXH_IPREF(XXH_errorcode) +# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) +# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) +# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) +# define XXH32_state_s XXH_IPREF(XXH32_state_s) +# define XXH32_state_t XXH_IPREF(XXH32_state_t) +# define XXH64_state_s XXH_IPREF(XXH64_state_s) +# define XXH64_state_t XXH_IPREF(XXH64_state_t) +# define XXH3_state_s XXH_IPREF(XXH3_state_s) +# define XXH3_state_t XXH_IPREF(XXH3_state_t) +# define XXH128_hash_t XXH_IPREF(XXH128_hash_t) + /* Ensure the header is parsed again, even if it was previously included */ +# undef XXHASH_H_5627135585666179 +# undef XXHASH_H_STATIC_13879238742 +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + + + +/* **************************************************************** + * Stable API + *****************************************************************/ +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +/* specific declaration modes for Windows */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) # ifdef XXH_EXPORT # define XXH_PUBLIC_API __declspec(dllexport) @@ -120,23 +176,26 @@ extern "C" { # else # define XXH_PUBLIC_API /* do nothing */ # endif -#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ +#endif -/*! XXH_NAMESPACE, aka Namespace Emulation : - * - * If you want to include _and expose_ xxHash functions from within your own library, - * but also want to avoid symbol collisions with other libraries which may also include xxHash, +/*! + * XXH_NAMESPACE, aka Namespace Emulation: * - * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library - * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries which + * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix + * any public symbol from xxhash library with the value of XXH_NAMESPACE + * (therefore, avoid empty or numeric values). * - * Note that no change is required within the calling program as long as it includes `xxhash.h` : - * regular symbol name will be automatically translated by this header. + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. */ #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +/* XXH32 */ # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) @@ -146,6 +205,7 @@ extern "C" { # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +/* XXH64 */ # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) @@ -155,6 +215,33 @@ extern "C" { # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +/* XXH3_64bits */ +# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) +# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) +# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) +# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) +# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) +# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) +# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) +# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) +# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) +# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) +# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) +# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) +/* XXH3_128bits */ +# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) +# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) +# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) +# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) +# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) +# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) +# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) +# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) +# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) +# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) +# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) +# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) +# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) #endif @@ -162,8 +249,8 @@ extern "C" { * Version ***************************************/ #define XXH_VERSION_MAJOR 0 -#define XXH_VERSION_MINOR 7 -#define XXH_VERSION_RELEASE 2 +#define XXH_VERSION_MINOR 8 +#define XXH_VERSION_RELEASE 0 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) XXH_PUBLIC_API unsigned XXH_versionNumber (void); @@ -182,7 +269,7 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include -typedef uint32_t XXH32_hash_t; + typedef uint32_t XXH32_hash_t; #else # include # if UINT_MAX == 0xFFFFFFFFUL @@ -191,16 +278,22 @@ typedef uint32_t XXH32_hash_t; # if ULONG_MAX == 0xFFFFFFFFUL typedef unsigned long XXH32_hash_t; # else -# error "unsupported platform : need a 32-bit type" +# error "unsupported platform: need a 32-bit type" # endif # endif #endif -/*! XXH32() : - Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". - The memory between input & input+length must be valid (allocated and read-accessible). - "seed" can be used to alter the result predictably. - Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ +/*! + * XXH32(): + * Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". + * The memory between input & input+length must be valid (allocated and read-accessible). + * "seed" can be used to alter the result predictably. + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s + * + * Note: XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. It provides a superior level of + * dispersion, and greatly reduces the risks of collisions. + */ XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); /******* Streaming *******/ @@ -210,20 +303,22 @@ XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_ * This method is slower than single-call functions, due to state management. * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. * - * XXH state must first be allocated, using XXH*_createState() . + * An XXH state must first be allocated using `XXH*_createState()`. * - * Start a new hash by initializing state with a seed, using XXH*_reset(). + * Start a new hash by initializing the state with a seed using `XXH*_reset()`. * - * Then, feed the hash state by calling XXH*_update() as many times as necessary. - * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. * - * Finally, a hash value can be produced anytime, by using XXH*_digest(). + * The function returns an error code, with 0 meaning OK, and any other value + * meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. * This function returns the nn-bits hash as an int or long long. * - * It's still possible to continue inserting input into the hash state after a digest, - * and generate some new hash values later on, by invoking again XXH*_digest(). + * It's still possible to continue inserting input into the hash state after a + * digest, and generate new hash values later on by invoking `XXH*_digest()`. * - * When done, release the state, using XXH*_freeState(). + * When done, release the state using `XXH*_freeState()`. */ typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ @@ -237,19 +332,23 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); /******* Canonical representation *******/ -/* Default return values from XXH functions are basic unsigned 32 and 64 bits. +/* + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. * This the simplest and fastest format for further post-processing. - * However, this leaves open the question of what is the order of bytes, - * since little and big endian conventions will write the same number differently. * - * The canonical representation settles this issue, - * by mandating big-endian convention, - * aka, the same convention as human-readable numbers (large digits first). - * When writing hash values to storage, sending them over a network, or printing them, - * it's highly recommended to use the canonical representation, - * to ensure portability across a wider range of systems, present and future. + * However, this leaves open the question of what is the order on the byte level, + * since little and big endian conventions will store the same number differently. * - * The following functions allow transformation of hash values into and from canonical format. + * The canonical representation settles this issue by mandating big-endian + * convention, the same convention as human-readable numbers (large digits first). + * + * When writing hash values to storage, sending them over a network, or printing + * them, it's highly recommended to use the canonical representation to ensure + * portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values to and from + * canonical format. */ typedef struct { unsigned char digest[4]; } XXH32_canonical_t; @@ -265,16 +364,24 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include -typedef uint64_t XXH64_hash_t; + typedef uint64_t XXH64_hash_t; #else -/* the following type must have a width of 64-bit */ + /* the following type must have a width of 64-bit */ typedef unsigned long long XXH64_hash_t; #endif -/*! XXH64() : - * Returns the 64-bit hash of sequence of length @length stored at memory address @input. - * @seed can be used to alter the result predictably. - * This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). +/*! + * XXH64(): + * Returns the 64-bit hash of sequence of length @length stored at memory + * address @input. + * @seed can be used to alter the result predictably. + * + * This function usually runs faster on 64-bit systems, but slower on 32-bit + * systems (see benchmark). + * + * Note: XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. It provides a superior level of + * dispersion, and greatly reduces the risks of collisions. */ XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, XXH64_hash_t seed); @@ -289,262 +396,165 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); /******* Canonical representation *******/ -typedef struct { unsigned char digest[8]; } XXH64_canonical_t; +typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); -#endif /* XXH_NO_LONG_LONG */ - -#endif /* XXHASH_H_5627135585666179 */ - - - -#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) -#define XXHASH_H_STATIC_13879238742 -/* ************************************************************************************************ - This section contains declarations which are not guaranteed to remain stable. - They may change in future versions, becoming incompatible with a different version of the library. - These declarations should only be used with static linking. - Never use them in association with dynamic linking ! -*************************************************************************************************** */ - -/* These definitions are only present to allow - * static allocation of XXH state, on stack or in a struct for example. - * Never **ever** use members directly. */ - -struct XXH32_state_s { - XXH32_hash_t total_len_32; - XXH32_hash_t large_len; - XXH32_hash_t v1; - XXH32_hash_t v2; - XXH32_hash_t v3; - XXH32_hash_t v4; - XXH32_hash_t mem32[4]; - XXH32_hash_t memsize; - XXH32_hash_t reserved; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH32_state_t */ - - -#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ - -struct XXH64_state_s { - XXH64_hash_t total_len; - XXH64_hash_t v1; - XXH64_hash_t v2; - XXH64_hash_t v3; - XXH64_hash_t v4; - XXH64_hash_t mem64[4]; - XXH32_hash_t memsize; - XXH32_hash_t reserved32; /* required for padding anyway */ - XXH64_hash_t reserved64; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH64_state_t */ - - /*-********************************************************************** -* XXH3 -* New experimental hash +* XXH3 64-bit variant ************************************************************************/ -/* ********************************************* - * XXH3 is a new hash algorithm, - * featuring improved speed performance for both small and large inputs. - * See full speed analysis at : http://fastcompression.blogspot.com/2019/03/presenting-xxh3.html - * In general, expect XXH3 to run about ~2x faster on large inputs, - * and >3x faster on small ones, though exact differences depend on platform. +/* ************************************************************************ + * XXH3 is a new hash algorithm featuring: + * - Improved speed for both small and large inputs + * - True 64-bit and 128-bit outputs + * - SIMD acceleration + * - Improved 32-bit viability + * + * Speed analysis methodology is explained here: * - * The algorithm is portable, will generate the same hash on all platforms. - * It benefits greatly from vectorization units, but does not require it. + * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html + * + * In general, expect XXH3 to run about ~2x faster on large inputs and >3x + * faster on small ones compared to XXH64, though exact differences depend on + * the platform. + * + * The algorithm is portable: Like XXH32 and XXH64, it generates the same hash + * on all platforms. + * + * It benefits greatly from SIMD and 64-bit arithmetic, but does not require it. + * + * Almost all 32-bit and 64-bit targets that can run XXH32 smoothly can run + * XXH3 at competitive speeds, even if XXH64 runs slowly. Further details are + * explained in the implementation. + * + * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8, + * ZVector and scalar targets. This can be controlled with the XXH_VECTOR macro. * * XXH3 offers 2 variants, _64bits and _128bits. - * When only 64 bits are needed, prefer calling the _64bits variant : - * it reduces the amount of mixing, resulting in faster speed on small inputs. + * When only 64 bits are needed, prefer calling the _64bits variant, as it + * reduces the amount of mixing, resulting in faster speed on small inputs. + * * It's also generally simpler to manipulate a scalar return type than a struct. * - * The XXH3 algorithm is still considered experimental. - * Produced results can still change between versions. - * Results produced by v0.7.x are not comparable with results from v0.7.y . - * It's nonetheless possible to use XXH3 for ephemeral data (local sessions), - * but avoid storing values in long-term storage for later reads. + * The 128-bit version adds additional strength, but it is slightly slower. * - * The API supports one-shot hashing, streaming mode, and custom secrets. + * The XXH3 algorithm is still in development. + * The results it produces may still change in future versions. + * + * Results produced by v0.7.x are not comparable with results from v0.7.y. + * However, the API is completely stable, and it can safely be used for + * ephemeral data (local sessions). * - * There are still a number of opened questions that community can influence during the experimental period. - * I'm trying to list a few of them below, though don't consider this list as complete. - * - * - 128-bits output type : currently defined as a structure of two 64-bits fields. - * That's because 128-bit values do not exist in C standard. - * Note that it means that, at byte level, result is not identical depending on endianess. - * However, at field level, they are identical on all platforms. - * The canonical representation solves the issue of identical byte-level representation across platforms, - * which is necessary for serialization. - * Q1 : Would there be a better representation for a 128-bit hash result ? - * Q2 : Are the names of the inner 64-bit fields important ? Should they be changed ? - * - * - Prototype XXH128() : XXH128() uses the same arguments as XXH64(), for consistency. - * It means it maps to XXH3_128bits_withSeed(). - * This variant is slightly slower than XXH3_128bits(), - * because the seed is now part of the algorithm, and can't be simplified. - * Is that a good idea ? - * - * - Seed type for XXH128() : currently, it's a single 64-bit value, like the 64-bit variant. - * It could be argued that it's more logical to offer a 128-bit seed input parameter for a 128-bit hash. - * But 128-bit seed is more difficult to use, since it requires to pass a structure instead of a scalar value. - * Such a variant could either replace current one, or become an additional one. - * Farmhash, for example, offers both variants (the 128-bits seed variant is called `doubleSeed`). - * Follow up question : if both 64-bit and 128-bit seeds are allowed, which variant should be called XXH128 ? - * - * - Result for len==0 : Currently, the result of hashing a zero-length input is always `0`. - * It seems okay as a return value when using "default" secret and seed. - * But is it still fine to return `0` when secret or seed are non-default ? - * Are there use cases which could depend on generating a different hash result for zero-length input when the secret is different ? - * - * - Consistency (1) : Streaming XXH128 uses an XXH3 state, which is the same state as XXH3_64bits(). - * It means a 128bit streaming loop must invoke the following symbols : - * XXH3_createState(), XXH3_128bits_reset(), XXH3_128bits_update() (loop), XXH3_128bits_digest(), XXH3_freeState(). - * Is that consistent enough ? - * - * - Consistency (2) : The canonical representation of `XXH3_64bits` is provided by existing functions - * XXH64_canonicalFromHash(), and reverse operation XXH64_hashFromCanonical(). - * As a mirror, canonical functions for XXH128_hash_t results generated by `XXH3_128bits` - * are XXH128_canonicalFromHash() and XXH128_hashFromCanonical(). - * Which means, `XXH3` doesn't appear in the names, because canonical functions operate on a type, - * independently of which algorithm was used to generate that type. - * Is that consistent enough ? + * Avoid storing values in long-term storage until the algorithm is finalized. + * XXH3's return values will be officially finalized upon reaching v0.8.0. + * + * After which, return values of XXH3 and XXH128 will no longer change in + * future versions. + * + * The API supports one-shot hashing, streaming mode, and custom secrets. */ -#ifdef XXH_NAMESPACE -# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) -# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) -# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) - -# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) -# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) -# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) - -# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) -# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) -# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) -# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) -# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) -#endif - -/* XXH3_64bits() : +/* XXH3_64bits(): * default 64-bit variant, using default secret and default seed of 0. * It's the fastest variant. */ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len); -/* XXH3_64bits_withSecret() : +/* + * XXH3_64bits_withSeed(): + * This variant generates a custom secret on the fly + * based on default secret altered using the `seed` value. + * While this operation is decently fast, note that it's not completely free. + * Note: seed==0 produces the same results as XXH3_64bits(). + */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); + +/* + * XXH3_64bits_withSecret(): * It's possible to provide any blob of bytes as a "secret" to generate the hash. * This makes it more difficult for an external actor to prepare an intentional collision. - * The secret *must* be large enough (>= XXH3_SECRET_SIZE_MIN). - * It should consist of random bytes. - * Avoid repeating same character, or sequences of bytes, - * and especially avoid swathes of \0. - * Failure to respect these conditions will result in a poor quality hash. + * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). + * However, the quality of produced hash values depends on secret's entropy. + * Technically, the secret must look like a bunch of random bytes. + * Avoid "trivial" or structured data such as repeated sequences or a text document. + * Whenever unsure about the "randomness" of the blob of bytes, + * consider relabelling it as a "custom seed" instead, + * and employ "XXH3_generateSecret()" (see below) + * to generate a high entropy secret derived from the custom seed. */ #define XXH3_SECRET_SIZE_MIN 136 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); -/* XXH3_64bits_withSeed() : - * This variant generates on the fly a custom secret, - * based on the default secret, altered using the `seed` value. - * While this operation is decently fast, note that it's not completely free. - * note : seed==0 produces same results as XXH3_64bits() */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); - - -/* streaming 64-bit */ - -#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11+ */ -# include -# define XXH_ALIGN(n) alignas(n) -#elif defined(__GNUC__) -# define XXH_ALIGN(n) __attribute__ ((aligned(n))) -#elif defined(_MSC_VER) -# define XXH_ALIGN(n) __declspec(align(n)) -#else -# define XXH_ALIGN(n) /* disabled */ -#endif - -typedef struct XXH3_state_s XXH3_state_t; - -#define XXH3_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ -#define XXH3_INTERNALBUFFER_SIZE 256 -struct XXH3_state_s { - XXH_ALIGN(64) XXH64_hash_t acc[8]; - XXH_ALIGN(64) unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]; /* used to store a custom secret generated from the seed. Makes state larger. Design might change */ - XXH_ALIGN(64) unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]; - XXH32_hash_t bufferedSize; - XXH32_hash_t nbStripesPerBlock; - XXH32_hash_t nbStripesSoFar; - XXH32_hash_t secretLimit; - XXH32_hash_t reserved32; - XXH32_hash_t reserved32_2; - XXH64_hash_t totalLen; - XXH64_hash_t seed; - XXH64_hash_t reserved64; - const unsigned char* secret; /* note : there is some padding after, due to alignment on 64 bytes */ -}; /* typedef'd to XXH3_state_t */ -/* Streaming requires state maintenance. - * This operation costs memory and cpu. +/******* Streaming *******/ +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. * As a consequence, streaming is slower than one-shot hashing. - * For better performance, prefer using one-shot functions whenever possible. */ - + * For better performance, prefer one-shot functions whenever applicable. + */ +typedef struct XXH3_state_s XXH3_state_t; XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void); XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state); - -/* XXH3_64bits_reset() : - * initialize with default parameters. - * result will be equivalent to `XXH3_64bits()`. */ +/* + * XXH3_64bits_reset(): + * Initialize with default parameters. + * digest will be equivalent to `XXH3_64bits()`. + */ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr); -/* XXH3_64bits_reset_withSeed() : - * generate a custom secret from `seed`, and store it into state. - * digest will be equivalent to `XXH3_64bits_withSeed()`. */ +/* + * XXH3_64bits_reset_withSeed(): + * Generate a custom secret from `seed`, and store it into `statePtr`. + * digest will be equivalent to `XXH3_64bits_withSeed()`. + */ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed); -/* XXH3_64bits_reset_withSecret() : - * `secret` is referenced, and must outlive the hash streaming session. - * secretSize must be >= XXH3_SECRET_SIZE_MIN. +/* + * XXH3_64bits_reset_withSecret(): + * `secret` is referenced, it _must outlive_ the hash streaming session. + * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, + * and the quality of produced hash values depends on secret's entropy + * (secret's content should look like a bunch of random bytes). + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). */ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize); XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length); XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr); +/* note : canonical representation of XXH3 is the same as XXH64 + * since they both produce XXH64_hash_t values */ -/* 128-bit */ - -#ifdef XXH_NAMESPACE -# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) -# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) -# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) -# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) - -# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) -# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) -# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) -# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) -# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) -# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) -# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) -# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) -# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) -#endif +/*-********************************************************************** +* XXH3 128-bit variant +************************************************************************/ typedef struct { - XXH64_hash_t low64; - XXH64_hash_t high64; + XXH64_hash_t low64; + XXH64_hash_t high64; } XXH128_hash_t; -XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed); XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len); -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); /* == XXH128() */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); +/******* Streaming *******/ +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + * + * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). + * Use already declared XXH3_createState() and XXH3_freeState(). + * + * All reset and streaming functions have same meaning as their 64-bit counterpart. + */ + XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr); XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed); XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize); @@ -552,104 +562,332 @@ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePt XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length); XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr); +/* Following helper functions make it possible to compare XXH128_hast_t values. + * Since XXH128_hash_t is a structure, this capability is not offered by the language. + * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ -/* Note : for better performance, following functions can be inlined, - * using XXH_INLINE_ALL */ - -/* return : 1 is equal, 0 if different */ +/*! + * XXH128_isEqual(): + * Return: 1 if `h1` and `h2` are equal, 0 if they are not. + */ XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); -/* This comparator is compatible with stdlib's qsort(). - * return : >0 if *h128_1 > *h128_2 - * <0 if *h128_1 < *h128_2 - * =0 if *h128_1 == *h128_2 */ +/*! + * XXH128_cmp(): + * + * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. + * + * return: >0 if *h128_1 > *h128_2 + * =0 if *h128_1 == *h128_2 + * <0 if *h128_1 < *h128_2 + */ XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2); /******* Canonical representation *******/ -typedef struct { unsigned char digest[16]; } XXH128_canonical_t; +typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash); XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src); #endif /* XXH_NO_LONG_LONG */ -#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) -# define XXH_IMPLEMENTATION -#endif - -#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ - +#endif /* XXHASH_H_5627135585666179 */ -/*-********************************************************************** -* xxHash implementation -* Functions implementation used to be hosted within xxhash.c . -* However, code inlining requires to place implementation in the header file. -* As a consequence, xxhash.c used to be included within xxhash.h . -* But some build systems don't like *.c inclusions. -* So the implementation is now directly integrated within xxhash.h . -* Another small advantage is that xxhash.c is no longer required in /includes . -************************************************************************/ -#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ - || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) -# define XXH_IMPLEM_13a8737387 +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) +#define XXHASH_H_STATIC_13879238742 +/* **************************************************************************** + * This section contains declarations which are not guaranteed to remain stable. + * They may change in future versions, becoming incompatible with a different + * version of the library. + * These declarations should only be used with static linking. + * Never use them in association with dynamic linking! + ***************************************************************************** */ -/* ************************************* -* Tuning parameters -***************************************/ -/*!XXH_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. - * It can generate buggy code on targets which do not support unaligned memory accesses. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://stackoverflow.com/a/32095106/646947 for details. - * Prefer these methods in priority order (0 > 1 > 2) +/* + * These definitions are only present to allow static allocation + * of XXH states, on stack or in a struct, for example. + * Never **ever** access their members directly. */ -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6) -# define XXH_FORCE_MEMORY_ACCESS 2 -# elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ - (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7))) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif -#endif -/*!XXH_ACCEPT_NULL_INPUT_POINTER : - * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. - * When this macro is enabled, xxHash actively checks input for null pointer. - * It it is, result for null input pointers is the same as a null-length input. - */ -#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ -# define XXH_ACCEPT_NULL_INPUT_POINTER 0 -#endif +struct XXH32_state_s { + XXH32_hash_t total_len_32; + XXH32_hash_t large_len; + XXH32_hash_t v1; + XXH32_hash_t v2; + XXH32_hash_t v3; + XXH32_hash_t v4; + XXH32_hash_t mem32[4]; + XXH32_hash_t memsize; + XXH32_hash_t reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ -/*!XXH_FORCE_ALIGN_CHECK : - * This is a minor performance trick, only useful with lots of very small keys. - * It means : check for aligned/unaligned input. - * The check costs one initial branch per hash; - * set it to 0 when the input is guaranteed to be aligned, - * or when alignment doesn't matter for performance. - */ -#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif -#endif -/*!XXH_REROLL: +#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ + +struct XXH64_state_s { + XXH64_hash_t total_len; + XXH64_hash_t v1; + XXH64_hash_t v2; + XXH64_hash_t v3; + XXH64_hash_t v4; + XXH64_hash_t mem64[4]; + XXH32_hash_t memsize; + XXH32_hash_t reserved32; /* required for padding anyway */ + XXH64_hash_t reserved64; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ + +#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11+ */ +# include +# define XXH_ALIGN(n) alignas(n) +#elif defined(__GNUC__) +# define XXH_ALIGN(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +# define XXH_ALIGN(n) __declspec(align(n)) +#else +# define XXH_ALIGN(n) /* disabled */ +#endif + +/* Old GCC versions only accept the attribute after the type in structures. */ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && defined(__GNUC__) +# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) +#else +# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type +#endif + +#define XXH3_INTERNALBUFFER_SIZE 256 +#define XXH3_SECRET_DEFAULT_SIZE 192 +struct XXH3_state_s { + XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); + /* used to store a custom secret generated from a seed */ + XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); + XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); + XXH32_hash_t bufferedSize; + XXH32_hash_t reserved32; + size_t nbStripesSoFar; + XXH64_hash_t totalLen; + size_t nbStripesPerBlock; + size_t secretLimit; + XXH64_hash_t seed; + XXH64_hash_t reserved64; + const unsigned char* extSecret; /* reference to external secret; + * if == NULL, use .customSecret instead */ + /* note: there may be some padding at the end due to alignment on 64 bytes */ +}; /* typedef'd to XXH3_state_t */ + +#undef XXH_ALIGN_MEMBER + +/* When the XXH3_state_t structure is merely emplaced on stack, + * it should be initialized with XXH3_INITSTATE() or a memset() + * in case its first reset uses XXH3_NNbits_reset_withSeed(). + * This init can be omitted if the first reset uses default or _withSecret mode. + * This operation isn't necessary when the state is created with XXH3_createState(). + * Note that this doesn't prepare the state for a streaming operation, + * it's still necessary to use XXH3_NNbits_reset*() afterwards. + */ +#define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; } + + +/* === Experimental API === */ +/* Symbols defined below must be considered tied to a specific library version. */ + +/* + * XXH3_generateSecret(): + * + * Derive a high-entropy secret from any user-defined content, named customSeed. + * The generated secret can be used in combination with `*_withSecret()` functions. + * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed, + * as it becomes much more difficult for an external actor to guess how to impact the calculation logic. + * + * The function accepts as input a custom seed of any length and any content, + * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE + * into an already allocated buffer secretBuffer. + * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long. + * + * The generated secret can then be used with any `*_withSecret()` variant. + * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`, + * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()` + * are part of this list. They all accept a `secret` parameter + * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN) + * _and_ feature very high entropy (consist of random-looking bytes). + * These conditions can be a high bar to meet, so + * this function can be used to generate a secret of proper quality. + * + * customSeed can be anything. It can have any size, even small ones, + * and its content can be anything, even stupidly "low entropy" source such as a bunch of zeroes. + * The resulting `secret` will nonetheless provide all expected qualities. + * + * Supplying NULL as the customSeed copies the default secret into `secretBuffer`. + * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior. + */ +XXH_PUBLIC_API void XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize); + + +/* simple short-cut to pre-selected XXH3_128bits variant */ +XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed); + + +#endif /* XXH_NO_LONG_LONG */ + + +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# define XXH_IMPLEMENTATION +#endif + +#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ + + +/* ======================================================================== */ +/* ======================================================================== */ +/* ======================================================================== */ + + +/*-********************************************************************** + * xxHash implementation + *-********************************************************************** + * xxHash's implementation used to be hosted inside xxhash.c. + * + * However, inlining requires implementation to be visible to the compiler, + * hence be included alongside the header. + * Previously, implementation was hosted inside xxhash.c, + * which was then #included when inlining was activated. + * This construction created issues with a few build and install systems, + * as it required xxhash.c to be stored in /include directory. + * + * xxHash implementation is now directly integrated within xxhash.h. + * As a consequence, xxhash.c is no longer needed in /include. + * + * xxhash.c is still available and is still useful. + * In a "normal" setup, when xxhash is not inlined, + * xxhash.h only exposes the prototypes and public symbols, + * while xxhash.c can be built into an object file xxhash.o + * which can then be linked into the final binary. + ************************************************************************/ + +#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ + || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) +# define XXH_IMPLEM_13a8737387 + +/* ************************************* +* Tuning parameters +***************************************/ +/*! + * XXH_FORCE_MEMORY_ACCESS: + * By default, access to unaligned memory is controlled by `memcpy()`, which is + * safe and portable. + * + * Unfortunately, on some target/compiler combinations, the generated assembly + * is sub-optimal. + * + * The below switch allow selection of a different access method + * in the search for improved performance. + * Method 0 (default): + * Use `memcpy()`. Safe and portable. Default. + * Method 1: + * `__attribute__((packed))` statement. It depends on compiler extensions + * and is therefore not portable. + * This method is safe if your compiler supports it, and *generally* as + * fast or faster than `memcpy`. + * Method 2: + * Direct access via cast. This method doesn't depend on the compiler but + * violates the C standard. + * It can generate buggy code on targets which do not support unaligned + * memory accesses. + * But in some circumstances, it's the only known way to get the most + * performance (example: GCC + ARMv6) + * Method 3: + * Byteshift. This can generate the best code on old compilers which don't + * inline small `memcpy()` calls, and it might also be faster on big-endian + * systems which lack a native byteswap instruction. + * See https://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2 > 3) + */ +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7))) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/*! + * XXH_ACCEPT_NULL_INPUT_POINTER: + * If the input pointer is NULL, xxHash's default behavior is to dereference it, + * triggering a segfault. + * When this macro is enabled, xxHash actively checks the input for a null pointer. + * If it is, the result for null input pointers is the same as a zero-length input. + */ +#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ +# define XXH_ACCEPT_NULL_INPUT_POINTER 0 +#endif + +/*! + * XXH_FORCE_ALIGN_CHECK: + * This is an important performance trick + * for architectures without decent unaligned memory access performance. + * It checks for input alignment, and when conditions are met, + * uses a "fast path" employing direct 32-bit/64-bit read, + * resulting in _dramatically faster_ read speed. + * + * The check costs one initial branch per hash, which is generally negligible, but not zero. + * Moreover, it's not useful to generate binary for an additional code path + * if memory access uses same instruction for both aligned and unaligned adresses. + * + * In these cases, the alignment check can be removed by setting this macro to 0. + * Then the code will always use unaligned memory access. + * Align check is automatically disabled on x86, x64 & arm64, + * which are platforms known to offer good unaligned memory accesses performance. + * + * This option does not affect XXH3 (only XXH32 and XXH64). + */ +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \ + || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */ +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + +/*! + * XXH_NO_INLINE_HINTS: + * + * By default, xxHash tries to force the compiler to inline almost all internal + * functions. + * + * This can usually improve performance due to reduced jumping and improved + * constant folding, but significantly increases the size of the binary which + * might not be favorable. + * + * Additionally, sometimes the forced inlining can be detrimental to performance, + * depending on the architecture. + * + * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the + * compiler full control on whether to inline or not. + * + * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using + * -fno-inline with GCC or Clang, this will automatically be defined. + */ +#ifndef XXH_NO_INLINE_HINTS +# if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \ + || defined(__NO_INLINE__) /* -O0, -fno-inline */ +# define XXH_NO_INLINE_HINTS 1 +# else +# define XXH_NO_INLINE_HINTS 0 +# endif +#endif + +/*! + * XXH_REROLL: * Whether to reroll XXH32_finalize, and XXH64_finalize, * instead of using an unrolled jump table/if statement loop. * - * This is automatically defined on -Os/-Oz on GCC and Clang. */ + * This is automatically defined on -Os/-Oz on GCC and Clang. + */ #ifndef XXH_REROLL # if defined(__OPTIMIZE_SIZE__) # define XXH_REROLL 1 @@ -662,14 +900,21 @@ XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* /* ************************************* * Includes & Memory related functions ***************************************/ -/*! Modify the local functions below should you wish to use some other memory routines -* for malloc(), free() */ +/*! + * Modify the local functions below should you wish to use + * different memory routines for malloc() and free() + */ #include + static void* XXH_malloc(size_t s) { return malloc(s); } -static void XXH_free (void* p) { free(p); } +static void XXH_free(void* p) { free(p); } + /*! and for memcpy() */ #include -static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } +static void* XXH_memcpy(void* dest, const void* src, size_t size) +{ + return memcpy(dest,src,size); +} #include /* ULLONG_MAX */ @@ -677,23 +922,31 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp /* ************************************* * Compiler Specific Options ***************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#ifdef _MSC_VER /* Visual Studio warning fix */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + +#if XXH_NO_INLINE_HINTS /* disable inlining hints */ +# if defined(__GNUC__) +# define XXH_FORCE_INLINE static __attribute__((unused)) +# else +# define XXH_FORCE_INLINE static +# endif +# define XXH_NO_INLINE static +/* enable inlining hints */ +#elif defined(_MSC_VER) /* Visual Studio */ # define XXH_FORCE_INLINE static __forceinline # define XXH_NO_INLINE static __declspec(noinline) +#elif defined(__GNUC__) +# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) +# define XXH_NO_INLINE static __attribute__((noinline)) +#elif defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ +# define XXH_FORCE_INLINE static inline +# define XXH_NO_INLINE static #else -# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ -# define XXH_FORCE_INLINE static inline __attribute__((always_inline)) -# define XXH_NO_INLINE static __attribute__((noinline)) -# else -# define XXH_FORCE_INLINE static inline -# define XXH_NO_INLINE static -# endif -# else -# define XXH_FORCE_INLINE static -# define XXH_NO_INLINE static -# endif /* __STDC_VERSION__ */ +# define XXH_FORCE_INLINE static +# define XXH_NO_INLINE static #endif @@ -701,22 +954,27 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp /* ************************************* * Debug ***************************************/ -/* DEBUGLEVEL is expected to be defined externally, - * typically through compiler command line. - * Value must be a number. */ -#ifndef DEBUGLEVEL -# define DEBUGLEVEL 0 +/* + * XXH_DEBUGLEVEL is expected to be defined externally, typically via the + * compiler's command line options. The value must be a number. + */ +#ifndef XXH_DEBUGLEVEL +# ifdef DEBUGLEVEL /* backwards compat */ +# define XXH_DEBUGLEVEL DEBUGLEVEL +# else +# define XXH_DEBUGLEVEL 0 +# endif #endif -#if (DEBUGLEVEL>=1) -# include /* note : can still be disabled with NDEBUG */ +#if (XXH_DEBUGLEVEL>=1) +# include /* note: can still be disabled with NDEBUG */ # define XXH_ASSERT(c) assert(c) #else # define XXH_ASSERT(c) ((void)0) #endif -/* note : use after variable declarations */ -#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } +/* note: use after variable declarations */ +#define XXH_STATIC_ASSERT(c) do { enum { XXH_sa = 1/(int)(!!(c)) }; } while (0) /* ************************************* @@ -726,31 +984,55 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include - typedef uint8_t xxh_u8; + typedef uint8_t xxh_u8; #else - typedef unsigned char xxh_u8; + typedef unsigned char xxh_u8; #endif typedef XXH32_hash_t xxh_u32; +#ifdef XXH_OLD_NAMES +# define BYTE xxh_u8 +# define U8 xxh_u8 +# define U32 xxh_u32 +#endif /* *** Memory access *** */ -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE32 and XXH_readBE32. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +/* + * Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware. + */ static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ +/* + * __pack instructions are safer but compiler specific, hence potentially + * problematic for some compilers. + * + * Currently only defined for GCC and ICC. + */ +#ifdef XXH_OLD_NAMES typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; -static xxh_u32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +#endif +static xxh_u32 XXH_read32(const void* ptr) +{ + typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign; + return ((const xxh_unalign*)ptr)->u32; +} #else -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 +/* + * Portable and safe solution. Generally efficient. + * see: https://stackoverflow.com/a/32095106/646947 */ static xxh_u32 XXH_read32(const void* memPtr) { @@ -765,8 +1047,19 @@ static xxh_u32 XXH_read32(const void* memPtr) /* *** Endianess *** */ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; -/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +/*! + * XXH_CPU_LITTLE_ENDIAN: + * Defined to 1 if the target is little endian, or 0 if it is big endian. + * It can be defined externally, for example on the compiler command line. + * + * If it is not defined, a runtime check (which is usually constant folded) + * is used instead. + */ #ifndef XXH_CPU_LITTLE_ENDIAN +/* + * Try to detect endianness automatically, to avoid the nonstandard behavior + * in `XXH_isLittleEndian()` + */ # if defined(_WIN32) /* Windows is always little endian */ \ || defined(__LITTLE_ENDIAN__) \ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) @@ -775,9 +1068,16 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) # define XXH_CPU_LITTLE_ENDIAN 0 # else +/* + * runtime test, presumed to simplify to a constant by compiler + */ static int XXH_isLittleEndian(void) { - const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + /* + * Portable and well-defined behavior. + * Don't use static: it is detrimental to performance. + */ + const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; return one.c[0]; } # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() @@ -792,14 +1092,17 @@ static int XXH_isLittleEndian(void) ******************************************/ #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -#ifndef __has_builtin -# define __has_builtin(x) 0 +#ifdef __has_builtin +# define XXH_HAS_BUILTIN(x) __has_builtin(x) +#else +# define XXH_HAS_BUILTIN(x) 0 #endif -#if !defined(NO_CLANG_BUILTIN) && __has_builtin(__builtin_rotateleft32) && __has_builtin(__builtin_rotateleft64) +#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ + && XXH_HAS_BUILTIN(__builtin_rotateleft64) # define XXH_rotl32 __builtin_rotateleft32 # define XXH_rotl64 __builtin_rotateleft64 -/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ #elif defined(_MSC_VER) # define XXH_rotl32(x,r) _rotl(x,r) # define XXH_rotl64(x,r) _rotl64(x,r) @@ -828,6 +1131,32 @@ static xxh_u32 XXH_swap32 (xxh_u32 x) *****************************/ typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; +/* + * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. + * + * This is ideal for older compilers which don't inline memcpy. + */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u32)bytePtr[1] << 8) + | ((xxh_u32)bytePtr[2] << 16) + | ((xxh_u32)bytePtr[3] << 24); +} + +XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[3] + | ((xxh_u32)bytePtr[2] << 8) + | ((xxh_u32)bytePtr[1] << 16) + | ((xxh_u32)bytePtr[0] << 24); +} + +#else XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); @@ -837,6 +1166,7 @@ static xxh_u32 XXH_readBE32(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); } +#endif XXH_FORCE_INLINE xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) @@ -858,31 +1188,41 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } /* ******************************************************************* * 32-bit hash functions *********************************************************************/ -static const xxh_u32 PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */ -static const xxh_u32 PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */ -static const xxh_u32 PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */ -static const xxh_u32 PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */ -static const xxh_u32 PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */ +static const xxh_u32 XXH_PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */ +static const xxh_u32 XXH_PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */ +static const xxh_u32 XXH_PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */ +static const xxh_u32 XXH_PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */ +static const xxh_u32 XXH_PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */ + +#ifdef XXH_OLD_NAMES +# define PRIME32_1 XXH_PRIME32_1 +# define PRIME32_2 XXH_PRIME32_2 +# define PRIME32_3 XXH_PRIME32_3 +# define PRIME32_4 XXH_PRIME32_4 +# define PRIME32_5 XXH_PRIME32_5 +#endif static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) { - acc += input * PRIME32_2; + acc += input * XXH_PRIME32_2; acc = XXH_rotl32(acc, 13); - acc *= PRIME32_1; + acc *= XXH_PRIME32_1; #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE) - /* UGLY HACK: + /* + * UGLY HACK: * This inline assembly hack forces acc into a normal register. This is the - * only thing that prevents GCC and Clang from autovectorizing the XXH32 loop - * (pragmas and attributes don't work for some resason) without globally + * only thing that prevents GCC and Clang from autovectorizing the XXH32 + * loop (pragmas and attributes don't work for some resason) without globally * disabling SSE4.1. * * The reason we want to avoid vectorization is because despite working on * 4 integers at a time, there are multiple factors slowing XXH32 down on * SSE4: - * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on newer chips!) - * making it slightly slower to multiply four integers at once compared to four - * integers independently. Even when pmulld was fastest, Sandy/Ivy Bridge, it is - * still not worth it to go into SSE just to multiply unless doing a long operation. + * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on + * newer chips!) making it slightly slower to multiply four integers at + * once compared to four integers independently. Even when pmulld was + * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE + * just to multiply unless doing a long operation. * * - Four instructions are required to rotate, * movqda tmp, v // not required with VEX encoding @@ -893,9 +1233,10 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) * roll v, 13 // reliably fast across the board * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason * - * - Instruction level parallelism is actually more beneficial here because the - * SIMD actually serializes this operation: While v1 is rotating, v2 can load data, - * while v3 can multiply. SSE forces them to operate together. + * - Instruction level parallelism is actually more beneficial here because + * the SIMD actually serializes this operation: While v1 is rotating, v2 + * can load data, while v3 can multiply. SSE forces them to operate + * together. * * How this hack works: * __asm__("" // Declare an assembly block but don't declare any instructions @@ -910,7 +1251,8 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) * loads and stores. * * Since the argument has to be in a normal register (not an SSE register), - * each time XXH32_round is called, it is impossible to vectorize. */ + * each time XXH32_round is called, it is impossible to vectorize. + */ __asm__("" : "+r" (acc)); #endif return acc; @@ -920,9 +1262,9 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) static xxh_u32 XXH32_avalanche(xxh_u32 h32) { h32 ^= h32 >> 15; - h32 *= PRIME32_2; + h32 *= XXH_PRIME32_2; h32 ^= h32 >> 13; - h32 *= PRIME32_3; + h32 *= XXH_PRIME32_3; h32 ^= h32 >> 16; return(h32); } @@ -932,64 +1274,66 @@ static xxh_u32 XXH32_avalanche(xxh_u32 h32) static xxh_u32 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) { -#define PROCESS1 \ - h32 += (*ptr++) * PRIME32_5; \ - h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; +#define XXH_PROCESS1 do { \ + h32 += (*ptr++) * XXH_PRIME32_5; \ + h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \ +} while (0) -#define PROCESS4 \ - h32 += XXH_get32bits(ptr) * PRIME32_3; \ - ptr+=4; \ - h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; +#define XXH_PROCESS4 do { \ + h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \ +} while (0) /* Compact rerolled version */ if (XXH_REROLL) { len &= 15; while (len >= 4) { - PROCESS4; + XXH_PROCESS4; len -= 4; } while (len > 0) { - PROCESS1; + XXH_PROCESS1; --len; } return XXH32_avalanche(h32); } else { switch(len&15) /* or switch(bEnd - p) */ { - case 12: PROCESS4; + case 12: XXH_PROCESS4; /* fallthrough */ - case 8: PROCESS4; + case 8: XXH_PROCESS4; /* fallthrough */ - case 4: PROCESS4; + case 4: XXH_PROCESS4; return XXH32_avalanche(h32); - case 13: PROCESS4; + case 13: XXH_PROCESS4; /* fallthrough */ - case 9: PROCESS4; + case 9: XXH_PROCESS4; /* fallthrough */ - case 5: PROCESS4; - PROCESS1; + case 5: XXH_PROCESS4; + XXH_PROCESS1; return XXH32_avalanche(h32); - case 14: PROCESS4; + case 14: XXH_PROCESS4; /* fallthrough */ - case 10: PROCESS4; + case 10: XXH_PROCESS4; /* fallthrough */ - case 6: PROCESS4; - PROCESS1; - PROCESS1; + case 6: XXH_PROCESS4; + XXH_PROCESS1; + XXH_PROCESS1; return XXH32_avalanche(h32); - case 15: PROCESS4; + case 15: XXH_PROCESS4; /* fallthrough */ - case 11: PROCESS4; + case 11: XXH_PROCESS4; /* fallthrough */ - case 7: PROCESS4; + case 7: XXH_PROCESS4; /* fallthrough */ - case 3: PROCESS1; + case 3: XXH_PROCESS1; /* fallthrough */ - case 2: PROCESS1; + case 2: XXH_PROCESS1; /* fallthrough */ - case 1: PROCESS1; + case 1: XXH_PROCESS1; /* fallthrough */ case 0: return XXH32_avalanche(h32); } @@ -998,6 +1342,14 @@ XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) } } +#ifdef XXH_OLD_NAMES +# define PROCESS1 XXH_PROCESS1 +# define PROCESS4 XXH_PROCESS4 +#else +# undef XXH_PROCESS1 +# undef XXH_PROCESS4 +#endif + XXH_FORCE_INLINE xxh_u32 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) { @@ -1013,10 +1365,10 @@ XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment if (len>=16) { const xxh_u8* const limit = bEnd - 15; - xxh_u32 v1 = seed + PRIME32_1 + PRIME32_2; - xxh_u32 v2 = seed + PRIME32_2; + xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + xxh_u32 v2 = seed + XXH_PRIME32_2; xxh_u32 v3 = seed + 0; - xxh_u32 v4 = seed - PRIME32_1; + xxh_u32 v4 = seed - XXH_PRIME32_1; do { v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; @@ -1028,7 +1380,7 @@ XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); } else { - h32 = seed + PRIME32_5; + h32 = seed + XXH_PRIME32_5; } h32 += (xxh_u32)len; @@ -1080,10 +1432,10 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t s { XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ memset(&state, 0, sizeof(state)); - state.v1 = seed + PRIME32_1 + PRIME32_2; - state.v2 = seed + PRIME32_2; + state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + state.v2 = seed + XXH_PRIME32_2; state.v3 = seed + 0; - state.v4 = seed - PRIME32_1; + state.v4 = seed - XXH_PRIME32_1; /* do not write into reserved, planned to be removed in a future version */ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); return XXH_OK; @@ -1164,7 +1516,7 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); } else { - h32 = state->v3 /* == seed */ + PRIME32_5; + h32 = state->v3 /* == seed */ + XXH_PRIME32_5; } h32 += state->total_len_32; @@ -1175,12 +1527,19 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state) /******* Canonical representation *******/ -/*! Default XXH result types are basic unsigned 32 and 64 bits. -* The canonical representation follows human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file or buffer, remaining comparable across different systems. -*/ - +/* + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * + * The canonical representation uses big endian convention, the same convention + * as human-readable numbers (large digits first). + * + * This way, hash values can be written into a file or buffer, remaining + * comparable across different systems. + * + * The following functions allow transformation of hash values to and from their + * canonical format. + */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); @@ -1204,20 +1563,26 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src typedef XXH64_hash_t xxh_u64; +#ifdef XXH_OLD_NAMES +# define U64 xxh_u64 +#endif -/*! XXH_REROLL_XXH64: +/*! + * XXH_REROLL_XXH64: * Whether to reroll the XXH64_finalize() loop. * - * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a performance gain - * on 64-bit hosts, as only one jump is required. + * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a + * performance gain on 64-bit hosts, as only one jump is required. * - * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit registers, - * and 64-bit arithmetic needs to be simulated, it isn't beneficial to unroll. The code becomes - * ridiculously large (the largest function in the binary on i386!), and rerolling it saves - * anywhere from 3kB to 20kB. It is also slightly faster because it fits into cache better - * and is more likely to be inlined by the compiler. + * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit + * registers, and 64-bit arithmetic needs to be simulated, it isn't beneficial + * to unroll. The code becomes ridiculously large (the largest function in the + * binary on i386!), and rerolling it saves anywhere from 3kB to 20kB. It is + * also slightly faster because it fits into cache better and is more likely + * to be inlined by the compiler. * - * If XXH_REROLL is defined, this is ignored and the loop is always rerolled. */ + * If XXH_REROLL is defined, this is ignored and the loop is always rerolled. + */ #ifndef XXH_REROLL_XXH64 # if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \ || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \ @@ -1231,24 +1596,39 @@ typedef XXH64_hash_t xxh_u64; # endif #endif /* !defined(XXH_REROLL_XXH64) */ -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE64 and XXH_readBE64. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr; } #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ +/* + * __pack instructions are safer, but compiler specific, hence potentially + * problematic for some compilers. + * + * Currently only defined for GCC and ICC. + */ +#ifdef XXH_OLD_NAMES typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; -static xxh_u64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } +#endif +static xxh_u64 XXH_read64(const void* ptr) +{ + typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64; + return ((const xxh_unalign64*)ptr)->u64; +} #else -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 +/* + * Portable and safe solution. Generally efficient. + * see: https://stackoverflow.com/a/32095106/646947 */ - static xxh_u64 XXH_read64(const void* memPtr) { xxh_u64 val; @@ -1276,6 +1656,37 @@ static xxh_u64 XXH_swap64 (xxh_u64 x) } #endif + +/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u64)bytePtr[1] << 8) + | ((xxh_u64)bytePtr[2] << 16) + | ((xxh_u64)bytePtr[3] << 24) + | ((xxh_u64)bytePtr[4] << 32) + | ((xxh_u64)bytePtr[5] << 40) + | ((xxh_u64)bytePtr[6] << 48) + | ((xxh_u64)bytePtr[7] << 56); +} + +XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[7] + | ((xxh_u64)bytePtr[6] << 8) + | ((xxh_u64)bytePtr[5] << 16) + | ((xxh_u64)bytePtr[4] << 24) + | ((xxh_u64)bytePtr[3] << 32) + | ((xxh_u64)bytePtr[2] << 40) + | ((xxh_u64)bytePtr[1] << 48) + | ((xxh_u64)bytePtr[0] << 56); +} + +#else XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); @@ -1285,6 +1696,7 @@ static xxh_u64 XXH_readBE64(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); } +#endif XXH_FORCE_INLINE xxh_u64 XXH_readLE64_align(const void* ptr, XXH_alignment align) @@ -1298,17 +1710,25 @@ XXH_readLE64_align(const void* ptr, XXH_alignment align) /******* xxh64 *******/ -static const xxh_u64 PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */ -static const xxh_u64 PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */ -static const xxh_u64 PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */ -static const xxh_u64 PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */ -static const xxh_u64 PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */ +static const xxh_u64 XXH_PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */ +static const xxh_u64 XXH_PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */ +static const xxh_u64 XXH_PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */ +static const xxh_u64 XXH_PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */ +static const xxh_u64 XXH_PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */ + +#ifdef XXH_OLD_NAMES +# define PRIME64_1 XXH_PRIME64_1 +# define PRIME64_2 XXH_PRIME64_2 +# define PRIME64_3 XXH_PRIME64_3 +# define PRIME64_4 XXH_PRIME64_4 +# define PRIME64_5 XXH_PRIME64_5 +#endif static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) { - acc += input * PRIME64_2; + acc += input * XXH_PRIME64_2; acc = XXH_rotl64(acc, 31); - acc *= PRIME64_1; + acc *= XXH_PRIME64_1; return acc; } @@ -1316,16 +1736,16 @@ static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) { val = XXH64_round(0, val); acc ^= val; - acc = acc * PRIME64_1 + PRIME64_4; + acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; return acc; } static xxh_u64 XXH64_avalanche(xxh_u64 h64) { h64 ^= h64 >> 33; - h64 *= PRIME64_2; + h64 *= XXH_PRIME64_2; h64 ^= h64 >> 29; - h64 *= PRIME64_3; + h64 *= XXH_PRIME64_3; h64 ^= h64 >> 32; return h64; } @@ -1336,117 +1756,119 @@ static xxh_u64 XXH64_avalanche(xxh_u64 h64) static xxh_u64 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align) { -#define PROCESS1_64 \ - h64 ^= (*ptr++) * PRIME64_5; \ - h64 = XXH_rotl64(h64, 11) * PRIME64_1; - -#define PROCESS4_64 \ - h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * PRIME64_1; \ - ptr+=4; \ - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - -#define PROCESS8_64 { \ +#define XXH_PROCESS1_64 do { \ + h64 ^= (*ptr++) * XXH_PRIME64_5; \ + h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1; \ +} while (0) + +#define XXH_PROCESS4_64 do { \ + h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; \ + ptr += 4; \ + h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; \ +} while (0) + +#define XXH_PROCESS8_64 do { \ xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \ - ptr+=8; \ - h64 ^= k1; \ - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ -} + ptr += 8; \ + h64 ^= k1; \ + h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4; \ +} while (0) /* Rerolled version for 32-bit targets is faster and much smaller. */ if (XXH_REROLL || XXH_REROLL_XXH64) { len &= 31; while (len >= 8) { - PROCESS8_64; + XXH_PROCESS8_64; len -= 8; } if (len >= 4) { - PROCESS4_64; + XXH_PROCESS4_64; len -= 4; } while (len > 0) { - PROCESS1_64; + XXH_PROCESS1_64; --len; } return XXH64_avalanche(h64); } else { switch(len & 31) { - case 24: PROCESS8_64; + case 24: XXH_PROCESS8_64; /* fallthrough */ - case 16: PROCESS8_64; + case 16: XXH_PROCESS8_64; /* fallthrough */ - case 8: PROCESS8_64; + case 8: XXH_PROCESS8_64; return XXH64_avalanche(h64); - case 28: PROCESS8_64; + case 28: XXH_PROCESS8_64; /* fallthrough */ - case 20: PROCESS8_64; + case 20: XXH_PROCESS8_64; /* fallthrough */ - case 12: PROCESS8_64; + case 12: XXH_PROCESS8_64; /* fallthrough */ - case 4: PROCESS4_64; + case 4: XXH_PROCESS4_64; return XXH64_avalanche(h64); - case 25: PROCESS8_64; + case 25: XXH_PROCESS8_64; /* fallthrough */ - case 17: PROCESS8_64; + case 17: XXH_PROCESS8_64; /* fallthrough */ - case 9: PROCESS8_64; - PROCESS1_64; + case 9: XXH_PROCESS8_64; + XXH_PROCESS1_64; return XXH64_avalanche(h64); - case 29: PROCESS8_64; + case 29: XXH_PROCESS8_64; /* fallthrough */ - case 21: PROCESS8_64; + case 21: XXH_PROCESS8_64; /* fallthrough */ - case 13: PROCESS8_64; + case 13: XXH_PROCESS8_64; /* fallthrough */ - case 5: PROCESS4_64; - PROCESS1_64; + case 5: XXH_PROCESS4_64; + XXH_PROCESS1_64; return XXH64_avalanche(h64); - case 26: PROCESS8_64; + case 26: XXH_PROCESS8_64; /* fallthrough */ - case 18: PROCESS8_64; + case 18: XXH_PROCESS8_64; /* fallthrough */ - case 10: PROCESS8_64; - PROCESS1_64; - PROCESS1_64; + case 10: XXH_PROCESS8_64; + XXH_PROCESS1_64; + XXH_PROCESS1_64; return XXH64_avalanche(h64); - case 30: PROCESS8_64; + case 30: XXH_PROCESS8_64; /* fallthrough */ - case 22: PROCESS8_64; + case 22: XXH_PROCESS8_64; /* fallthrough */ - case 14: PROCESS8_64; + case 14: XXH_PROCESS8_64; /* fallthrough */ - case 6: PROCESS4_64; - PROCESS1_64; - PROCESS1_64; + case 6: XXH_PROCESS4_64; + XXH_PROCESS1_64; + XXH_PROCESS1_64; return XXH64_avalanche(h64); - case 27: PROCESS8_64; + case 27: XXH_PROCESS8_64; /* fallthrough */ - case 19: PROCESS8_64; + case 19: XXH_PROCESS8_64; /* fallthrough */ - case 11: PROCESS8_64; - PROCESS1_64; - PROCESS1_64; - PROCESS1_64; + case 11: XXH_PROCESS8_64; + XXH_PROCESS1_64; + XXH_PROCESS1_64; + XXH_PROCESS1_64; return XXH64_avalanche(h64); - case 31: PROCESS8_64; + case 31: XXH_PROCESS8_64; /* fallthrough */ - case 23: PROCESS8_64; + case 23: XXH_PROCESS8_64; /* fallthrough */ - case 15: PROCESS8_64; + case 15: XXH_PROCESS8_64; /* fallthrough */ - case 7: PROCESS4_64; + case 7: XXH_PROCESS4_64; /* fallthrough */ - case 3: PROCESS1_64; + case 3: XXH_PROCESS1_64; /* fallthrough */ - case 2: PROCESS1_64; + case 2: XXH_PROCESS1_64; /* fallthrough */ - case 1: PROCESS1_64; + case 1: XXH_PROCESS1_64; /* fallthrough */ case 0: return XXH64_avalanche(h64); } @@ -1456,6 +1878,16 @@ XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align) return 0; /* unreachable, but some compilers complain without it */ } +#ifdef XXH_OLD_NAMES +# define PROCESS1_64 XXH_PROCESS1_64 +# define PROCESS4_64 XXH_PROCESS4_64 +# define PROCESS8_64 XXH_PROCESS8_64 +#else +# undef XXH_PROCESS1_64 +# undef XXH_PROCESS4_64 +# undef XXH_PROCESS8_64 +#endif + XXH_FORCE_INLINE xxh_u64 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) { @@ -1471,10 +1903,10 @@ XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment if (len>=32) { const xxh_u8* const limit = bEnd - 32; - xxh_u64 v1 = seed + PRIME64_1 + PRIME64_2; - xxh_u64 v2 = seed + PRIME64_2; + xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + xxh_u64 v2 = seed + XXH_PRIME64_2; xxh_u64 v3 = seed + 0; - xxh_u64 v4 = seed - PRIME64_1; + xxh_u64 v4 = seed - XXH_PRIME64_1; do { v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; @@ -1490,7 +1922,7 @@ XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment h64 = XXH64_mergeRound(h64, v4); } else { - h64 = seed + PRIME64_5; + h64 = seed + XXH_PRIME64_5; } h64 += (xxh_u64) len; @@ -1539,12 +1971,12 @@ XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed) { - XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + XXH64_state_t state; /* use a local state to memcpy() in order to avoid strict-aliasing warnings */ memset(&state, 0, sizeof(state)); - state.v1 = seed + PRIME64_1 + PRIME64_2; - state.v2 = seed + PRIME64_2; + state.v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + state.v2 = seed + XXH_PRIME64_2; state.v3 = seed + 0; - state.v4 = seed - PRIME64_1; + state.v4 = seed - XXH_PRIME64_1; /* do not write into reserved64, might be removed in a future version */ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64)); return XXH_OK; @@ -1627,7 +2059,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state) h64 = XXH64_mergeRound(h64, v3); h64 = XXH64_mergeRound(h64, v4); } else { - h64 = state->v3 /*seed*/ + PRIME64_5; + h64 = state->v3 /*seed*/ + XXH_PRIME64_5; } h64 += (xxh_u64) state->total_len; @@ -1657,8 +2089,2671 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src * New generation hash designed for speed on small keys and vectorization ************************************************************************ */ -#include "xxh3.h" +/* === Compiler specifics === */ + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ +# define XXH_RESTRICT restrict +#else +/* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */ +# define XXH_RESTRICT /* disable */ +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) \ + || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ + || defined(__clang__) +# define XXH_likely(x) __builtin_expect(x, 1) +# define XXH_unlikely(x) __builtin_expect(x, 0) +#else +# define XXH_likely(x) (x) +# define XXH_unlikely(x) (x) +#endif + +#if defined(__GNUC__) +# if defined(__AVX2__) +# include +# elif defined(__SSE2__) +# include +# elif defined(__ARM_NEON__) || defined(__ARM_NEON) +# define inline __inline__ /* circumvent a clang bug */ +# include +# undef inline +# endif +#elif defined(_MSC_VER) +# include +#endif + +/* + * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while + * remaining a true 64-bit/128-bit hash function. + * + * This is done by prioritizing a subset of 64-bit operations that can be + * emulated without too many steps on the average 32-bit machine. + * + * For example, these two lines seem similar, and run equally fast on 64-bit: + * + * xxh_u64 x; + * x ^= (x >> 47); // good + * x ^= (x >> 13); // bad + * + * However, to a 32-bit machine, there is a major difference. + * + * x ^= (x >> 47) looks like this: + * + * x.lo ^= (x.hi >> (47 - 32)); + * + * while x ^= (x >> 13) looks like this: + * + * // note: funnel shifts are not usually cheap. + * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); + * x.hi ^= (x.hi >> 13); + * + * The first one is significantly faster than the second, simply because the + * shift is larger than 32. This means: + * - All the bits we need are in the upper 32 bits, so we can ignore the lower + * 32 bits in the shift. + * - The shift result will always fit in the lower 32 bits, and therefore, + * we can ignore the upper 32 bits in the xor. + * + * Thanks to this optimization, XXH3 only requires these features to be efficient: + * + * - Usable unaligned access + * - A 32-bit or 64-bit ALU + * - If 32-bit, a decent ADC instruction + * - A 32 or 64-bit multiply with a 64-bit result + * - For the 128-bit variant, a decent byteswap helps short inputs. + * + * The first two are already required by XXH32, and almost all 32-bit and 64-bit + * platforms which can run XXH32 can run XXH3 efficiently. + * + * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one + * notable exception. + * + * First of all, Thumb-1 lacks support for the UMULL instruction which + * performs the important long multiply. This means numerous __aeabi_lmul + * calls. + * + * Second of all, the 8 functional registers are just not enough. + * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need + * Lo registers, and this shuffling results in thousands more MOVs than A32. + * + * A32 and T32 don't have this limitation. They can access all 14 registers, + * do a 32->64 multiply with UMULL, and the flexible operand allowing free + * shifts is helpful, too. + * + * Therefore, we do a quick sanity check. + * + * If compiling Thumb-1 for a target which supports ARM instructions, we will + * emit a warning, as it is not a "sane" platform to compile for. + * + * Usually, if this happens, it is because of an accident and you probably need + * to specify -march, as you likely meant to compile for a newer architecture. + * + * Credit: large sections of the vectorial and asm source code paths + * have been contributed by @easyaspi314 + */ +#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) +# warning "XXH3 is highly inefficient without ARM or Thumb-2." +#endif + +/* ========================================== + * Vectorization detection + * ========================================== */ +#define XXH_SCALAR 0 /* Portable scalar version */ +#define XXH_SSE2 1 /* SSE2 for Pentium 4 and all x86_64 */ +#define XXH_AVX2 2 /* AVX2 for Haswell and Bulldozer */ +#define XXH_AVX512 3 /* AVX512 for Skylake and Icelake */ +#define XXH_NEON 4 /* NEON for most ARMv7-A and all AArch64 */ +#define XXH_VSX 5 /* VSX and ZVector for POWER8/z13 */ + +#ifndef XXH_VECTOR /* can be defined on command line */ +# if defined(__AVX512F__) +# define XXH_VECTOR XXH_AVX512 +# elif defined(__AVX2__) +# define XXH_VECTOR XXH_AVX2 +# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) +# define XXH_VECTOR XXH_SSE2 +# elif defined(__GNUC__) /* msvc support maybe later */ \ + && (defined(__ARM_NEON__) || defined(__ARM_NEON)) \ + && (defined(__LITTLE_ENDIAN__) /* We only support little endian NEON */ \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) +# define XXH_VECTOR XXH_NEON +# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ + || (defined(__s390x__) && defined(__VEC__)) \ + && defined(__GNUC__) /* TODO: IBM XL */ +# define XXH_VECTOR XXH_VSX +# else +# define XXH_VECTOR XXH_SCALAR +# endif +#endif + +/* + * Controls the alignment of the accumulator, + * for compatibility with aligned vector loads, which are usually faster. + */ +#ifndef XXH_ACC_ALIGN +# if defined(XXH_X86DISPATCH) +# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ +# elif XXH_VECTOR == XXH_SCALAR /* scalar */ +# define XXH_ACC_ALIGN 8 +# elif XXH_VECTOR == XXH_SSE2 /* sse2 */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX2 /* avx2 */ +# define XXH_ACC_ALIGN 32 +# elif XXH_VECTOR == XXH_NEON /* neon */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_VSX /* vsx */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX512 /* avx512 */ +# define XXH_ACC_ALIGN 64 +# endif +#endif +#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ + || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#else +# define XXH_SEC_ALIGN 8 +#endif + +/* + * UGLY HACK: + * GCC usually generates the best code with -O3 for xxHash. + * + * However, when targeting AVX2, it is overzealous in its unrolling resulting + * in code roughly 3/4 the speed of Clang. + * + * There are other issues, such as GCC splitting _mm256_loadu_si256 into + * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which + * only applies to Sandy and Ivy Bridge... which don't even support AVX2. + * + * That is why when compiling the AVX2 version, it is recommended to use either + * -O2 -mavx2 -march=haswell + * or + * -O2 -mavx2 -mno-avx256-split-unaligned-load + * for decent performance, or to use Clang instead. + * + * Fortunately, we can control the first one with a pragma that forces GCC into + * -O2, but the other one we can't control without "failed to inline always + * inline function due to target mismatch" warnings. + */ +#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ + && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */ +# pragma GCC push_options +# pragma GCC optimize("-O2") +#endif + + +#if XXH_VECTOR == XXH_NEON +/* + * NEON's setup for vmlal_u32 is a little more complicated than it is on + * SSE2, AVX2, and VSX. + * + * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast. + * + * To do the same operation, the 128-bit 'Q' register needs to be split into + * two 64-bit 'D' registers, performing this operation:: + * + * [ a | b ] + * | '---------. .--------' | + * | x | + * | .---------' '--------. | + * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ] + * + * Due to significant changes in aarch64, the fastest method for aarch64 is + * completely different than the fastest method for ARMv7-A. + * + * ARMv7-A treats D registers as unions overlaying Q registers, so modifying + * D11 will modify the high half of Q5. This is similar to how modifying AH + * will only affect bits 8-15 of AX on x86. + * + * VZIP takes two registers, and puts even lanes in one register and odd lanes + * in the other. + * + * On ARMv7-A, this strangely modifies both parameters in place instead of + * taking the usual 3-operand form. + * + * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the + * lower and upper halves of the Q register to end up with the high and low + * halves where we want - all in one instruction. + * + * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] } + * + * Unfortunately we need inline assembly for this: Instructions modifying two + * registers at once is not possible in GCC or Clang's IR, and they have to + * create a copy. + * + * aarch64 requires a different approach. + * + * In order to make it easier to write a decent compiler for aarch64, many + * quirks were removed, such as conditional execution. + * + * NEON was also affected by this. + * + * aarch64 cannot access the high bits of a Q-form register, and writes to a + * D-form register zero the high bits, similar to how writes to W-form scalar + * registers (or DWORD registers on x86_64) work. + * + * The formerly free vget_high intrinsics now require a vext (with a few + * exceptions) + * + * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent + * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one + * operand. + * + * The equivalent of the VZIP.32 on the lower and upper halves would be this + * mess: + * + * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] } + * zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] } + * zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] } + * + * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN): + * + * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32); + * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF); + * + * This is available on ARMv7-A, but is less efficient than a single VZIP.32. + */ + +/* + * Function-like macro: + * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi) + * { + * outLo = (uint32x2_t)(in & 0xFFFFFFFF); + * outHi = (uint32x2_t)(in >> 32); + * in = UNDEFINED; + * } + */ +# if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \ + && defined(__GNUC__) \ + && !defined(__aarch64__) && !defined(__arm64__) +# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ + do { \ + /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \ + /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \ + /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \ + __asm__("vzip.32 %e0, %f0" : "+w" (in)); \ + (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \ + (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \ + } while (0) +# else +# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ + do { \ + (outLo) = vmovn_u64 (in); \ + (outHi) = vshrn_n_u64 ((in), 32); \ + } while (0) +# endif +#endif /* XXH_VECTOR == XXH_NEON */ + +/* + * VSX and Z Vector helpers. + * + * This is very messy, and any pull requests to clean this up are welcome. + * + * There are a lot of problems with supporting VSX and s390x, due to + * inconsistent intrinsics, spotty coverage, and multiple endiannesses. + */ +#if XXH_VECTOR == XXH_VSX +# if defined(__s390x__) +# include +# else +/* gcc's altivec.h can have the unwanted consequence to unconditionally + * #define bool, vector, and pixel keywords, + * with bad consequences for programs already using these keywords for other purposes. + * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined. + * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler, + * but it seems that, in some cases, it isn't. + * Force the build macro to be defined, so that keywords are not altered. + */ +# if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__) +# define __APPLE_ALTIVEC__ +# endif +# include +# endif + +typedef __vector unsigned long long xxh_u64x2; +typedef __vector unsigned char xxh_u8x16; +typedef __vector unsigned xxh_u32x4; + +# ifndef XXH_VSX_BE +# if defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_VSX_BE 1 +# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ +# warning "-maltivec=be is not recommended. Please use native endianness." +# define XXH_VSX_BE 1 +# else +# define XXH_VSX_BE 0 +# endif +# endif /* !defined(XXH_VSX_BE) */ + +# if XXH_VSX_BE +/* A wrapper for POWER9's vec_revb. */ +# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) +# define XXH_vec_revb vec_revb +# else +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) +{ + xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, + 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; + return vec_perm(val, val, vByteSwap); +} +# endif +# endif /* XXH_VSX_BE */ + +/* + * Performs an unaligned load and byte swaps it on big endian. + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) +{ + xxh_u64x2 ret; + memcpy(&ret, ptr, sizeof(xxh_u64x2)); +# if XXH_VSX_BE + ret = XXH_vec_revb(ret); +# endif + return ret; +} + +/* + * vec_mulo and vec_mule are very problematic intrinsics on PowerPC + * + * These intrinsics weren't added until GCC 8, despite existing for a while, + * and they are endian dependent. Also, their meaning swap depending on version. + * */ +# if defined(__s390x__) + /* s390x is always big endian, no issue on this platform */ +# define XXH_vec_mulo vec_mulo +# define XXH_vec_mule vec_mule +# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) +/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ +# define XXH_vec_mulo __builtin_altivec_vmulouw +# define XXH_vec_mule __builtin_altivec_vmuleuw +# else +/* gcc needs inline assembly */ +/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +# endif /* XXH_vec_mulo, XXH_vec_mule */ +#endif /* XXH_VECTOR == XXH_VSX */ + + +/* prefetch + * can be disabled, by declaring XXH_NO_PREFETCH build macro */ +#if defined(XXH_NO_PREFETCH) +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +#else +# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ +# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) +# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +# else +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +# endif +#endif /* XXH_NO_PREFETCH */ + + +/* ========================================== + * XXH3 default settings + * ========================================== */ + +#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ + +#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) +# error "default keyset is not large enough" +#endif + +/* Pseudorandom secret taken directly from FARSH */ +XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { + 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, + 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, + 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, + 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, + 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, + 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, + 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, + 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, + 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, + 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, + 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, + 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, +}; + + +#ifdef XXH_OLD_NAMES +# define kSecret XXH3_kSecret +#endif + +/* + * Calculates a 32-bit to 64-bit long multiply. + * + * Wraps __emulu on MSVC x86 because it tends to call __allmul when it doesn't + * need to (but it shouldn't need to anyways, it is about 7 instructions to do + * a 64x64 multiply...). Since we know that this will _always_ emit MULL, we + * use that instead of the normal method. + * + * If you are compiling for platforms like Thumb-1 and don't have a better option, + * you may also want to write your own long multiply routine here. + * + * XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y) + * { + * return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); + * } + */ +#if defined(_MSC_VER) && defined(_M_IX86) +# include +# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) +#else +/* + * Downcast + upcast is usually better than masking on older compilers like + * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. + * + * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands + * and perform a full 64x64 multiply -- entirely redundant on 32-bit. + */ +# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) +#endif + +/* + * Calculates a 64->128-bit long multiply. + * + * Uses __uint128_t and _umul128 if available, otherwise uses a scalar version. + */ +static XXH128_hash_t +XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) +{ + /* + * GCC/Clang __uint128_t method. + * + * On most 64-bit targets, GCC and Clang define a __uint128_t type. + * This is usually the best way as it usually uses a native long 64-bit + * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. + * + * Usually. + * + * Despite being a 32-bit platform, Clang (and emscripten) define this type + * despite not having the arithmetic for it. This results in a laggy + * compiler builtin call which calculates a full 128-bit multiply. + * In that case it is best to use the portable one. + * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 + */ +#if defined(__GNUC__) && !defined(__wasm__) \ + && defined(__SIZEOF_INT128__) \ + || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + + __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; + XXH128_hash_t r128; + r128.low64 = (xxh_u64)(product); + r128.high64 = (xxh_u64)(product >> 64); + return r128; + + /* + * MSVC for x64's _umul128 method. + * + * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); + * + * This compiles to single operand MUL on x64. + */ +#elif defined(_M_X64) || defined(_M_IA64) + +#ifndef _MSC_VER +# pragma intrinsic(_umul128) +#endif + xxh_u64 product_high; + xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); + XXH128_hash_t r128; + r128.low64 = product_low; + r128.high64 = product_high; + return r128; + +#else + /* + * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. + * + * This is a fast and simple grade school multiply, which is shown below + * with base 10 arithmetic instead of base 0x100000000. + * + * 9 3 // D2 lhs = 93 + * x 7 5 // D2 rhs = 75 + * ---------- + * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 + * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 + * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 + * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 + * --------- + * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 + * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 + * --------- + * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 + * + * The reasons for adding the products like this are: + * 1. It avoids manual carry tracking. Just like how + * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. + * This avoids a lot of complexity. + * + * 2. It hints for, and on Clang, compiles to, the powerful UMAAL + * instruction available in ARM's Digital Signal Processing extension + * in 32-bit ARMv6 and later, which is shown below: + * + * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) + * { + * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; + * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); + * *RdHi = (xxh_u32)(product >> 32); + * } + * + * This instruction was designed for efficient long multiplication, and + * allows this to be calculated in only 4 instructions at speeds + * comparable to some 64-bit ALUs. + * + * 3. It isn't terrible on other platforms. Usually this will be a couple + * of 32-bit ADD/ADCs. + */ + + /* First calculate all of the cross products. */ + xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); + xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); + xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); + xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); + + /* Now add the products together. These will never overflow. */ + xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; + xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; + xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); + + XXH128_hash_t r128; + r128.low64 = lower; + r128.high64 = upper; + return r128; +#endif +} + +/* + * Does a 64-bit to 128-bit multiply, then XOR folds it. + * + * The reason for the separate function is to prevent passing too many structs + * around by value. This will hopefully inline the multiply, but we don't force it. + */ +static xxh_u64 +XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) +{ + XXH128_hash_t product = XXH_mult64to128(lhs, rhs); + return product.low64 ^ product.high64; +} + +/* Seems to produce slightly better code on GCC for some reason. */ +XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) +{ + XXH_ASSERT(0 <= shift && shift < 64); + return v64 ^ (v64 >> shift); +} + +/* + * This is a fast avalanche stage, + * suitable when input bits are already partially mixed + */ +static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) +{ + h64 = XXH_xorshift64(h64, 37); + h64 *= 0x165667919E3779F9ULL; + h64 = XXH_xorshift64(h64, 32); + return h64; +} + +/* + * This is a stronger avalanche, + * inspired by Pelle Evensen's rrmxmx + * preferable when input has not been previously mixed + */ +static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) +{ + /* this mix is inspired by Pelle Evensen's rrmxmx */ + h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); + h64 *= 0x9FB21C651E98DF25ULL; + h64 ^= (h64 >> 35) + len ; + h64 *= 0x9FB21C651E98DF25ULL; + return XXH_xorshift64(h64, 28); +} + + +/* ========================================== + * Short keys + * ========================================== + * One of the shortcomings of XXH32 and XXH64 was that their performance was + * sub-optimal on short lengths. It used an iterative algorithm which strongly + * favored lengths that were a multiple of 4 or 8. + * + * Instead of iterating over individual inputs, we use a set of single shot + * functions which piece together a range of lengths and operate in constant time. + * + * Additionally, the number of multiplies has been significantly reduced. This + * reduces latency, especially when emulating 64-bit multiplies on 32-bit. + * + * Depending on the platform, this may or may not be faster than XXH32, but it + * is almost guaranteed to be faster than XXH64. + */ + +/* + * At very short lengths, there isn't enough input to fully hide secrets, or use + * the entire secret. + * + * There is also only a limited amount of mixing we can do before significantly + * impacting performance. + * + * Therefore, we use different sections of the secret and always mix two secret + * samples with an XOR. This should have no effect on performance on the + * seedless or withSeed variants because everything _should_ be constant folded + * by modern compilers. + * + * The XOR mixing hides individual parts of the secret and increases entropy. + * + * This adds an extra layer of strength for custom secrets. + */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combined = { input[0], 0x01, input[0], input[0] } + * len = 2: combined = { input[1], 0x02, input[0], input[1] } + * len = 3: combined = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; + return XXH64_avalanche(keyed); + } +} + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len < 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input1 = XXH_readLE32(input); + xxh_u32 const input2 = XXH_readLE32(input + len - 4); + xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; + xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); + xxh_u64 const keyed = input64 ^ bitflip; + return XXH3_rrmxmx(keyed, len); + } +} + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(8 <= len && len <= 16); + { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; + xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; + xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; + xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; + xxh_u64 const acc = len + + XXH_swap64(input_lo) + input_hi + + XXH3_mul128_fold64(input_lo, input_hi); + return XXH3_avalanche(acc); + } +} + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); + if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); + if (len) return XXH3_len_1to3_64b(input, len, secret, seed); + return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); + } +} + +/* + * DISCLAIMER: There are known *seed-dependent* multicollisions here due to + * multiplication by zero, affecting hashes of lengths 17 to 240. + * + * However, they are very unlikely. + * + * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all + * unseeded non-cryptographic hashes, it does not attempt to defend itself + * against specially crafted inputs, only random inputs. + * + * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes + * cancelling out the secret is taken an arbitrary number of times (addressed + * in XXH3_accumulate_512), this collision is very unlikely with random inputs + * and/or proper seeding: + * + * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a + * function that is only called up to 16 times per hash with up to 240 bytes of + * input. + * + * This is not too bad for a non-cryptographic hash function, especially with + * only 64 bit outputs. + * + * The 128-bit variant (which trades some speed for strength) is NOT affected + * by this, although it is always a good idea to use a proper seed if you care + * about strength. + */ +XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) +{ +#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ + /* + * UGLY HACK: + * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in + * slower code. + * + * By forcing seed64 into a register, we disrupt the cost model and + * cause it to scalarize. See `XXH32_round()` + * + * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, + * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on + * GCC 9.2, despite both emitting scalar code. + * + * GCC generates much better scalar code than Clang for the rest of XXH3, + * which is why finding a more optimal codepath is an interest. + */ + __asm__ ("" : "+r" (seed64)); +#endif + { xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 const input_hi = XXH_readLE64(input+8); + return XXH3_mul128_fold64( + input_lo ^ (XXH_readLE64(secret) + seed64), + input_hi ^ (XXH_readLE64(secret+8) - seed64) + ); + } +} + +/* For mid range keys, XXH3 uses a Mum-hash variant. */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { xxh_u64 acc = len * XXH_PRIME64_1; + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc += XXH3_mix16B(input+48, secret+96, seed); + acc += XXH3_mix16B(input+len-64, secret+112, seed); + } + acc += XXH3_mix16B(input+32, secret+64, seed); + acc += XXH3_mix16B(input+len-48, secret+80, seed); + } + acc += XXH3_mix16B(input+16, secret+32, seed); + acc += XXH3_mix16B(input+len-32, secret+48, seed); + } + acc += XXH3_mix16B(input+0, secret+0, seed); + acc += XXH3_mix16B(input+len-16, secret+16, seed); + + return XXH3_avalanche(acc); + } +} + +#define XXH3_MIDSIZE_MAX 240 + +XXH_NO_INLINE XXH64_hash_t +XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + #define XXH3_MIDSIZE_STARTOFFSET 3 + #define XXH3_MIDSIZE_LASTOFFSET 17 + + { xxh_u64 acc = len * XXH_PRIME64_1; + int const nbRounds = (int)len / 16; + int i; + for (i=0; i<8; i++) { + acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); + } + acc = XXH3_avalanche(acc); + XXH_ASSERT(nbRounds >= 8); +#if defined(__clang__) /* Clang */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. + * In everywhere else, it uses scalar code. + * + * For 64->128-bit multiplies, even if the NEON was 100% optimal, it + * would still be slower than UMAAL (see XXH_mult64to128). + * + * Unfortunately, Clang doesn't handle the long multiplies properly and + * converts them to the nonexistent "vmulq_u64" intrinsic, which is then + * scalarized into an ugly mess of VMOV.32 instructions. + * + * This mess is difficult to avoid without turning autovectorization + * off completely, but they are usually relatively minor and/or not + * worth it to fix. + * + * This loop is the easiest to fix, as unlike XXH32, this pragma + * _actually works_ because it is a loop vectorization instead of an + * SLP vectorization. + */ + #pragma clang loop vectorize(disable) +#endif + for (i=8 ; i < nbRounds; i++) { + acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); + } + /* last bytes */ + acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); + return XXH3_avalanche(acc); + } +} + + +/* ======= Long Keys ======= */ + +#define XXH_STRIPE_LEN 64 +#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ +#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) + +#ifdef XXH_OLD_NAMES +# define STRIPE_LEN XXH_STRIPE_LEN +# define ACC_NB XXH_ACC_NB +#endif + +XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) +{ + if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); + memcpy(dst, &v64, sizeof(v64)); +} + +/* Several intrinsic functions below are supposed to accept __int64 as argument, + * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . + * However, several environments do not define __int64 type, + * requiring a workaround. + */ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) + typedef int64_t xxh_i64; +#else + /* the following type must have a width of 64-bit */ + typedef long long xxh_i64; +#endif + +/* + * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. + * + * It is a hardened version of UMAC, based off of FARSH's implementation. + * + * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD + * implementations, and it is ridiculously fast. + * + * We harden it by mixing the original input to the accumulators as well as the product. + * + * This means that in the (relatively likely) case of a multiply by zero, the + * original input is preserved. + * + * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve + * cross-pollination, as otherwise the upper and lower halves would be + * essentially independent. + * + * This doesn't matter on 64-bit hashes since they all get merged together in + * the end, so we skip the extra step. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +#if (XXH_VECTOR == XXH_AVX512) || defined(XXH_X86DISPATCH) + +#ifndef XXH_TARGET_AVX512 +# define XXH_TARGET_AVX512 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ALIGN(64) __m512i* const xacc = (__m512i *) acc; + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + + { + /* data_vec = input[0]; */ + __m512i const data_vec = _mm512_loadu_si512 (input); + /* key_vec = secret[0]; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + /* data_key = data_vec ^ key_vec; */ + __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); + /* xacc[0] += swap(data_vec); */ + __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); + __m512i const sum = _mm512_add_epi64(*xacc, data_swap); + /* xacc[0] += product; */ + *xacc = _mm512_add_epi64(product, sum); + } +} + +/* + * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. + * + * Multiplication isn't perfect, as explained by Google in HighwayHash: + * + * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to + * // varying degrees. In descending order of goodness, bytes + * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. + * // As expected, the upper and lower bytes are much worse. + * + * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 + * + * Since our algorithm uses a pseudorandom secret to add some variance into the + * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. + * + * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid + * extraction. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + { XXH_ALIGN(64) __m512i* const xacc = (__m512i*) acc; + const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); + + /* xacc[0] ^= (xacc[0] >> 47) */ + __m512i const acc_vec = *xacc; + __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); + __m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted); + /* xacc[0] ^= secret; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); + + /* xacc[0] *= XXH_PRIME32_1; */ + __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1)); + __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); + __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); + *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); + XXH_ASSERT(((size_t)customSecret & 63) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); + __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, -(xxh_i64)seed64); + + XXH_ALIGN(64) const __m512i* const src = (const __m512i*) XXH3_kSecret; + XXH_ALIGN(64) __m512i* const dest = ( __m512i*) customSecret; + int i; + for (i=0; i < nbRounds; ++i) { + /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*', + * this will warn "discards ‘const’ qualifier". */ + union { + XXH_ALIGN(64) const __m512i* cp; + XXH_ALIGN(64) void* p; + } remote_const_void; + remote_const_void.cp = src + i; + dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_AVX2) || defined(XXH_X86DISPATCH) + +#ifndef XXH_TARGET_AVX2 +# define XXH_TARGET_AVX2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { XXH_ALIGN(32) __m256i* const xacc = (__m256i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xinput = (const __m256i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* data_vec = xinput[i]; */ + __m256i const data_vec = _mm256_loadu_si256 (xinput+i); + /* key_vec = xsecret[i]; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); + __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm256_add_epi64(product, sum); + } } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { XXH_ALIGN(32) __m256i* const xacc = (__m256i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m256i const acc_vec = xacc[i]; + __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); + __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); + /* xacc[i] ^= xsecret; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); + __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); + (void)(&XXH_writeLE64); + XXH_PREFETCH(customSecret); + { __m256i const seed = _mm256_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64, -(xxh_i64)seed64, (xxh_i64)seed64); + + XXH_ALIGN(64) const __m256i* const src = (const __m256i*) XXH3_kSecret; + XXH_ALIGN(64) __m256i* dest = ( __m256i*) customSecret; + +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + * The asm hack causes Clang to assume that XXH3_kSecretPtr aliases with + * customSecret, and on aarch64, this prevented LDP from merging two + * loads together for free. Putting the loads together before the stores + * properly generates LDP. + */ + __asm__("" : "+r" (dest)); +# endif + + /* GCC -O2 need unroll loop manually */ + dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed); + dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed); + dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed); + dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed); + dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed); + dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed); + } +} + +#endif + +#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) + +#ifndef XXH_TARGET_SSE2 +# define XXH_TARGET_SSE2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* SSE2 is just a half-scale version of the AVX2 version. */ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { XXH_ALIGN(16) __m128i* const xacc = (__m128i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xinput = (const __m128i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* data_vec = xinput[i]; */ + __m128i const data_vec = _mm_loadu_si128 (xinput+i); + /* key_vec = xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); + __m128i const sum = _mm_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm_add_epi64(product, sum); + } } +} + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { XXH_ALIGN(16) __m128i* const xacc = (__m128i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m128i const acc_vec = xacc[i]; + __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); + __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); + /* xacc[i] ^= xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); + __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); + +# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 + // MSVC 32bit mode does not support _mm_set_epi64x before 2015 + XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, -(xxh_i64)seed64 }; + __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); +# else + __m128i const seed = _mm_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64); +# endif + int i; + + XXH_ALIGN(64) const float* const src = (float const*) XXH3_kSecret; + XXH_ALIGN(XXH_SEC_ALIGN) __m128i* dest = (__m128i*) customSecret; +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + __asm__("" : "+r" (dest)); +# endif + + for (i=0; i < nbRounds; ++i) { + dest[i] = _mm_add_epi64(_mm_castps_si128(_mm_load_ps(src+i*4)), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_NEON) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { + XXH_ALIGN(16) uint64x2_t* const xacc = (uint64x2_t *) acc; + /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ + uint8_t const* const xinput = (const uint8_t *) input; + uint8_t const* const xsecret = (const uint8_t *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) { + /* data_vec = xinput[i]; */ + uint8x16_t data_vec = vld1q_u8(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16)); + uint64x2_t data_key; + uint32x2_t data_key_lo, data_key_hi; + /* xacc[i] += swap(data_vec); */ + uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec); + uint64x2_t const swapped = vextq_u64(data64, data64, 1); + xacc[i] = vaddq_u64 (xacc[i], swapped); + /* data_key = data_vec ^ key_vec; */ + data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec)); + /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF); + * data_key_hi = (uint32x2_t) (data_key >> 32); + * data_key = UNDEFINED; */ + XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi); + /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */ + xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi); + + } + } +} + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { uint64x2_t* xacc = (uint64x2_t*) acc; + uint8_t const* xsecret = (uint8_t const*) secret; + uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(uint64x2_t); i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + uint64x2_t acc_vec = xacc[i]; + uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47); + uint64x2_t data_vec = veorq_u64 (acc_vec, shifted); + + /* xacc[i] ^= xsecret[i]; */ + uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64(data_vec, vreinterpretq_u64_u8(key_vec)); + + /* xacc[i] *= XXH_PRIME32_1 */ + uint32x2_t data_key_lo, data_key_hi; + /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF); + * data_key_hi = (uint32x2_t) (xacc[i] >> 32); + * xacc[i] = UNDEFINED; */ + XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi); + { /* + * prod_hi = (data_key >> 32) * XXH_PRIME32_1; + * + * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will + * incorrectly "optimize" this: + * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b)); + * shifted = vshll_n_u32(tmp, 32); + * to this: + * tmp = "vmulq_u64"(a, b); // no such thing! + * shifted = vshlq_n_u64(tmp, 32); + * + * However, unlike SSE, Clang lacks a 64-bit multiply routine + * for NEON, and it scalarizes two 64-bit multiplies instead. + * + * vmull_u32 has the same timing as vmul_u32, and it avoids + * this bug completely. + * See https://bugs.llvm.org/show_bug.cgi?id=39967 + */ + uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime); + /* xacc[i] = prod_hi << 32; */ + xacc[i] = vshlq_n_u64(prod_hi, 32); + /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */ + xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime); + } + } } +} + +#endif + +#if (XXH_VECTOR == XXH_VSX) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + xxh_u64x2* const xacc = (xxh_u64x2*) acc; /* presumed aligned */ + xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */ + xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */ + xxh_u64x2 const v32 = { 32, 32 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* data_vec = xinput[i]; */ + xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i); + /* key_vec = xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + /* shuffled = (data_key << 32) | (data_key >> 32); */ + xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); + /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ + xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); + xacc[i] += product; + + /* swap high and low halves */ +#ifdef __s390x__ + xacc[i] += vec_permi(data_vec, data_vec, 2); +#else + xacc[i] += vec_xxpermdi(data_vec, data_vec, 2); +#endif + } +} + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_u64x2* const xacc = (xxh_u64x2*) acc; + const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret; + /* constants */ + xxh_u64x2 const v32 = { 32, 32 }; + xxh_u64x2 const v47 = { 47, 47 }; + xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + xxh_u64x2 const acc_vec = xacc[i]; + xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); + + /* xacc[i] ^= xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + + /* xacc[i] *= XXH_PRIME32_1 */ + /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ + xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); + /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ + xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); + xacc[i] = prod_odd + (prod_even << v32); + } } +} + +#endif + +/* scalar variants - universal */ + +XXH_FORCE_INLINE void +XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ + const xxh_u8* const xinput = (const xxh_u8*) input; /* no alignment restriction */ + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ + size_t i; + XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); + for (i=0; i < XXH_ACC_NB; i++) { + xxh_u64 const data_val = XXH_readLE64(xinput + 8*i); + xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8); + xacc[i ^ 1] += data_val; /* swap adjacent lanes */ + xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32); + } +} + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ + size_t i; + XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); + for (i=0; i < XXH_ACC_NB; i++) { + xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i); + xxh_u64 acc64 = xacc[i]; + acc64 = XXH_xorshift64(acc64, 47); + acc64 ^= key64; + acc64 *= XXH_PRIME32_1; + xacc[i] = acc64; + } +} + +XXH_FORCE_INLINE void +XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + /* + * We need a separate pointer for the hack below, + * which requires a non-const pointer. + * Any decent compiler will optimize this out otherwise. + */ + const xxh_u8* kSecretPtr = XXH3_kSecret; + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + +#if defined(__clang__) && defined(__aarch64__) + /* + * UGLY HACK: + * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are + * placed sequentially, in order, at the top of the unrolled loop. + * + * While MOVK is great for generating constants (2 cycles for a 64-bit + * constant compared to 4 cycles for LDR), long MOVK chains stall the + * integer pipelines: + * I L S + * MOVK + * MOVK + * MOVK + * MOVK + * ADD + * SUB STR + * STR + * By forcing loads from memory (as the asm line causes Clang to assume + * that XXH3_kSecretPtr has been changed), the pipelines are used more + * efficiently: + * I L S + * LDR + * ADD LDR + * SUB STR + * STR + * XXH3_64bits_withSeed, len == 256, Snapdragon 835 + * without hack: 2654.4 MB/s + * with hack: 3202.9 MB/s + */ + __asm__("" : "+r" (kSecretPtr)); +#endif + /* + * Note: in debug mode, this overrides the asm optimization + * and Clang will emit MOVK chains again. + */ + XXH_ASSERT(kSecretPtr == XXH3_kSecret); + + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; + int i; + for (i=0; i < nbRounds; i++) { + /* + * The asm hack causes Clang to assume that kSecretPtr aliases with + * customSecret, and on aarch64, this prevented LDP from merging two + * loads together for free. Putting the loads together before the stores + * properly generates LDP. + */ + xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; + xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; + XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); + XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); + } } +} + + +typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*); +typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); +typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); + + +#if (XXH_VECTOR == XXH_AVX512) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx512 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 + +#elif (XXH_VECTOR == XXH_AVX2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 + +#elif (XXH_VECTOR == XXH_SSE2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_sse2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 + +#elif (XXH_VECTOR == XXH_NEON) + +#define XXH3_accumulate_512 XXH3_accumulate_512_neon +#define XXH3_scrambleAcc XXH3_scrambleAcc_neon +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_VSX) + +#define XXH3_accumulate_512 XXH3_accumulate_512_vsx +#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#else /* scalar */ + +#define XXH3_accumulate_512 XXH3_accumulate_512_scalar +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#endif + + + +#ifndef XXH_PREFETCH_DIST +# ifdef __clang__ +# define XXH_PREFETCH_DIST 320 +# else +# if (XXH_VECTOR == XXH_AVX512) +# define XXH_PREFETCH_DIST 512 +# else +# define XXH_PREFETCH_DIST 384 +# endif +# endif /* __clang__ */ +#endif /* XXH_PREFETCH_DIST */ + +/* + * XXH3_accumulate() + * Loops over XXH3_accumulate_512(). + * Assumption: nbStripes will not overflow the secret size + */ +XXH_FORCE_INLINE void +XXH3_accumulate( xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, + size_t nbStripes, + XXH3_f_accumulate_512 f_acc512) +{ + size_t n; + for (n = 0; n < nbStripes; n++ ) { + const xxh_u8* const in = input + n*XXH_STRIPE_LEN; + XXH_PREFETCH(in + XXH_PREFETCH_DIST); + f_acc512(acc, + in, + secret + n*XXH_SECRET_CONSUME_RATE); + } +} + +XXH_FORCE_INLINE void +XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble) +{ + size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; + size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; + size_t const nb_blocks = (len - 1) / block_len; + + size_t n; + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + + for (n = 0; n < nb_blocks; n++) { + XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512); + f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); + } + + /* last partial block */ + XXH_ASSERT(len > XXH_STRIPE_LEN); + { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; + XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); + XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512); + + /* last stripe */ + { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; +#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ + f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); + } } +} + +XXH_FORCE_INLINE xxh_u64 +XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) +{ + return XXH3_mul128_fold64( + acc[0] ^ XXH_readLE64(secret), + acc[1] ^ XXH_readLE64(secret+8) ); +} + +static XXH64_hash_t +XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) +{ + xxh_u64 result64 = start; + size_t i = 0; + + for (i = 0; i < 4; i++) { + result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); +#if defined(__clang__) /* Clang */ \ + && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Prevent autovectorization on Clang ARMv7-a. Exact same problem as + * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. + * XXH3_64bits, len == 256, Snapdragon 835: + * without hack: 2063.7 MB/s + * with hack: 2560.7 MB/s + */ + __asm__("" : "+r" (result64)); +#endif + } + + return XXH3_avalanche(result64); +} + +#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ + XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, + const void* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + /* do not align on 8, so that the secret is different from the accumulator */ +#define XXH_SECRET_MERGEACCS_START 11 + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1); +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc); +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + * Since the function is not inlined, the compiler may not be able to understand that, + * in some scenarios, its `secret` argument is actually a compile time constant. + * This variant enforces that the compiler can detect that, + * and uses this opportunity to streamline the generated code for better performance. + */ +XXH_NO_INLINE XXH64_hash_t +XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc); +} + +/* + * XXH3_hashLong_64b_withSeed(): + * Generate a custom key based on alteration of default XXH3_kSecret with the seed, + * and then use this key for long mode hashing. + * + * This operation is decently fast but nonetheless costs a little bit of time. + * Try to avoid it whenever possible (typically when seed==0). + * + * It's important for performance that XXH3_hashLong is not inlined. Not sure + * why (uop cache maybe?), but the difference is large and easily measurable. + */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, + XXH64_hash_t seed, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ + if (seed == 0) + return XXH3_hashLong_64b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc512, f_scramble); + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed); + return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), + f_acc512, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed(const void* input, size_t len, + XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_64b_withSeed_internal(input, len, seed, + XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + + +typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong64_f f_hashLong) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secretLen` condition is not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + * Also, note that function signature doesn't offer room to return an error. + */ + if (len <= 16) + return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); +} + + +/* === Public entry point === */ + +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len) +{ + return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); +} + +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) +{ + return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); +} + +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); +} + + +/* === XXH3 streaming === */ + +/* + * Malloc's a pointer that is always aligned to align. + * + * This must be freed with `XXH_alignedFree()`. + * + * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte + * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2 + * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON. + * + * This underalignment previously caused a rather obvious crash which went + * completely unnoticed due to XXH3_createState() not actually being tested. + * Credit to RedSpah for noticing this bug. + * + * The alignment is done manually: Functions like posix_memalign or _mm_malloc + * are avoided: To maintain portability, we would have to write a fallback + * like this anyways, and besides, testing for the existence of library + * functions without relying on external build tools is impossible. + * + * The method is simple: Overallocate, manually align, and store the offset + * to the original behind the returned pointer. + * + * Align must be a power of 2 and 8 <= align <= 128. + */ +static void* XXH_alignedMalloc(size_t s, size_t align) +{ + XXH_ASSERT(align <= 128 && align >= 8); /* range check */ + XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ + XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ + { /* Overallocate to make room for manual realignment and an offset byte */ + xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); + if (base != NULL) { + /* + * Get the offset needed to align this pointer. + * + * Even if the returned pointer is aligned, there will always be + * at least one byte to store the offset to the original pointer. + */ + size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ + /* Add the offset for the now-aligned pointer */ + xxh_u8* ptr = base + offset; + + XXH_ASSERT((size_t)ptr % align == 0); + + /* Store the offset immediately before the returned pointer. */ + ptr[-1] = (xxh_u8)offset; + return ptr; + } + return NULL; + } +} +/* + * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass + * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. + */ +static void XXH_alignedFree(void* p) +{ + if (p != NULL) { + xxh_u8* ptr = (xxh_u8*)p; + /* Get the offset byte we added in XXH_malloc. */ + xxh_u8 offset = ptr[-1]; + /* Free the original malloc'd pointer */ + xxh_u8* base = ptr - offset; + XXH_free(base); + } +} +XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) +{ + XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); + if (state==NULL) return NULL; + XXH3_INITSTATE(state); + return state; +} + +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) +{ + XXH_alignedFree(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void +XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state) +{ + memcpy(dst_state, src_state, sizeof(*dst_state)); +} + +static void +XXH3_64bits_reset_internal(XXH3_state_t* statePtr, + XXH64_hash_t seed, + const void* secret, size_t secretSize) +{ + size_t const initStart = offsetof(XXH3_state_t, bufferedSize); + size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; + XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); + XXH_ASSERT(statePtr != NULL); + /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ + memset((char*)statePtr + initStart, 0, initLength); + statePtr->acc[0] = XXH_PRIME32_3; + statePtr->acc[1] = XXH_PRIME64_1; + statePtr->acc[2] = XXH_PRIME64_2; + statePtr->acc[3] = XXH_PRIME64_3; + statePtr->acc[4] = XXH_PRIME64_4; + statePtr->acc[5] = XXH_PRIME32_2; + statePtr->acc[6] = XXH_PRIME64_5; + statePtr->acc[7] = XXH_PRIME32_1; + statePtr->seed = seed; + statePtr->extSecret = (const unsigned char*)secret; + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; + statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset(XXH3_state_t* statePtr) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_64bits_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_64bits_reset_internal(statePtr, 0, secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + if (statePtr == NULL) return XXH_ERROR; + if (seed==0) return XXH3_64bits_reset(statePtr); + if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed); + XXH3_64bits_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/* Note : when XXH3_consumeStripes() is invoked, + * there must be a guarantee that at least one more byte must be consumed from input + * so that the function can blindly consume all stripes using the "normal" secret segment */ +XXH_FORCE_INLINE void +XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, + size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, + const xxh_u8* XXH_RESTRICT input, size_t nbStripes, + const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */ + XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock); + if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) { + /* need a scrambling operation */ + size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr; + size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock; + XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512); + f_scramble(acc, secret + secretLimit); + XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512); + *nbStripesSoFarPtr = nbStripesAfterBlock; + } else { + XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512); + *nbStripesSoFarPtr += nbStripes; + } +} + +/* + * Both XXH3_64bits_update and XXH3_128bits_update use this routine. + */ +XXH_FORCE_INLINE XXH_errorcode +XXH3_update(XXH3_state_t* state, + const xxh_u8* input, size_t len, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const xxh_u8* const bEnd = input + len; + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + + state->totalLen += len; + + if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) { /* fill in tmp buffer */ + XXH_memcpy(state->buffer + state->bufferedSize, input, len); + state->bufferedSize += (XXH32_hash_t)len; + return XXH_OK; + } + /* total input is now > XXH3_INTERNALBUFFER_SIZE */ + + #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) + XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ + + /* + * Internal buffer is partially filled (always, except at beginning) + * Complete it, then consume it. + */ + if (state->bufferedSize) { + size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; + XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); + input += loadSize; + XXH3_consumeStripes(state->acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, XXH3_INTERNALBUFFER_STRIPES, + secret, state->secretLimit, + f_acc512, f_scramble); + state->bufferedSize = 0; + } + XXH_ASSERT(input < bEnd); + + /* Consume input by a multiple of internal buffer size */ + if (input+XXH3_INTERNALBUFFER_SIZE < bEnd) { + const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE; + do { + XXH3_consumeStripes(state->acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + input, XXH3_INTERNALBUFFER_STRIPES, + secret, state->secretLimit, + f_acc512, f_scramble); + input += XXH3_INTERNALBUFFER_SIZE; + } while (inputbuffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + } + XXH_ASSERT(input < bEnd); + + /* Some remaining input (always) : buffer it */ + XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); + state->bufferedSize = (XXH32_hash_t)(bEnd-input); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len) +{ + return XXH3_update(state, (const xxh_u8*)input, len, + XXH3_accumulate_512, XXH3_scrambleAcc); +} + + +XXH_FORCE_INLINE void +XXH3_digest_long (XXH64_hash_t* acc, + const XXH3_state_t* state, + const unsigned char* secret) +{ + /* + * Digest on a local copy. This way, the state remains unaltered, and it can + * continue ingesting more input afterwards. + */ + memcpy(acc, state->acc, sizeof(state->acc)); + if (state->bufferedSize >= XXH_STRIPE_LEN) { + size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; + size_t nbStripesSoFar = state->nbStripesSoFar; + XXH3_consumeStripes(acc, + &nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, nbStripes, + secret, state->secretLimit, + XXH3_accumulate_512, XXH3_scrambleAcc); + /* last stripe */ + XXH3_accumulate_512(acc, + state->buffer + state->bufferedSize - XXH_STRIPE_LEN, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); + } else { /* bufferedSize < XXH_STRIPE_LEN */ + xxh_u8 lastStripe[XXH_STRIPE_LEN]; + size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; + XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ + memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); + memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + XXH3_accumulate_512(acc, + lastStripe, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); + } +} + +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + return XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + } + /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ + if (state->seed) + return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} + + +#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) + +XXH_PUBLIC_API void +XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize) +{ + XXH_ASSERT(secretBuffer != NULL); + if (customSeedSize == 0) { + memcpy(secretBuffer, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + return; + } + XXH_ASSERT(customSeed != NULL); + + { size_t const segmentSize = sizeof(XXH128_hash_t); + size_t const nbSegments = XXH_SECRET_DEFAULT_SIZE / segmentSize; + XXH128_canonical_t scrambler; + XXH64_hash_t seeds[12]; + size_t segnb; + XXH_ASSERT(nbSegments == 12); + XXH_ASSERT(segmentSize * nbSegments == XXH_SECRET_DEFAULT_SIZE); /* exact multiple */ + XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); + + /* + * Copy customSeed to seeds[], truncating or repeating as necessary. + */ + { size_t toFill = XXH_MIN(customSeedSize, sizeof(seeds)); + size_t filled = toFill; + memcpy(seeds, customSeed, toFill); + while (filled < sizeof(seeds)) { + toFill = XXH_MIN(filled, sizeof(seeds) - filled); + memcpy((char*)seeds + filled, seeds, toFill); + filled += toFill; + } } + + /* generate secret */ + memcpy(secretBuffer, &scrambler, sizeof(scrambler)); + for (segnb=1; segnb < nbSegments; segnb++) { + size_t const segmentStart = segnb * segmentSize; + XXH128_canonical_t segment; + XXH128_canonicalFromHash(&segment, + XXH128(&scrambler, sizeof(scrambler), XXH_readLE64(seeds + segnb) + segnb) ); + memcpy((char*)secretBuffer + segmentStart, &segment, sizeof(segment)); + } } +} + + +/* ========================================== + * XXH3 128 bits (a.k.a XXH128) + * ========================================== + * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, + * even without counting the significantly larger output size. + * + * For example, extra steps are taken to avoid the seed-dependent collisions + * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). + * + * This strength naturally comes at the cost of some speed, especially on short + * lengths. Note that longer hashes are about as fast as the 64-bit version + * due to it using only a slight modification of the 64-bit loop. + * + * XXH128 is also more oriented towards 64-bit machines. It is still extremely + * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). + */ + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + /* A doubled version of 1to3_64b with different constants. */ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } + * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } + * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); + xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; + xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; + xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; + XXH128_hash_t h128; + h128.low64 = XXH64_avalanche(keyed_lo); + h128.high64 = XXH64_avalanche(keyed_hi); + return h128; + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input_lo = XXH_readLE32(input); + xxh_u32 const input_hi = XXH_readLE32(input + len - 4); + xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); + xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; + xxh_u64 const keyed = input_64 ^ bitflip; + + /* Shift len to the left to ensure it is even, this avoids even multiplies. */ + XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); + + m128.high64 += (m128.low64 << 1); + m128.low64 ^= (m128.high64 >> 3); + + m128.low64 = XXH_xorshift64(m128.low64, 35); + m128.low64 *= 0x9FB21C651E98DF25ULL; + m128.low64 = XXH_xorshift64(m128.low64, 28); + m128.high64 = XXH3_avalanche(m128.high64); + return m128; + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; + xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; + xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 input_hi = XXH_readLE64(input + len - 8); + XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); + /* + * Put len in the middle of m128 to ensure that the length gets mixed to + * both the low and high bits in the 128x64 multiply below. + */ + m128.low64 += (xxh_u64)(len - 1) << 54; + input_hi ^= bitfliph; + /* + * Add the high 32 bits of input_hi to the high 32 bits of m128, then + * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to + * the high 64 bits of m128. + * + * The best approach to this operation is different on 32-bit and 64-bit. + */ + if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ + /* + * 32-bit optimized version, which is more readable. + * + * On 32-bit, it removes an ADC and delays a dependency between the two + * halves of m128.high64, but it generates an extra mask on 64-bit. + */ + m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); + } else { + /* + * 64-bit optimized (albeit more confusing) version. + * + * Uses some properties of addition and multiplication to remove the mask: + * + * Let: + * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) + * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) + * c = XXH_PRIME32_2 + * + * a + (b * c) + * Inverse Property: x + y - x == y + * a + (b * (1 + c - 1)) + * Distributive Property: x * (y + z) == (x * y) + (x * z) + * a + (b * 1) + (b * (c - 1)) + * Identity Property: x * 1 == x + * a + b + (b * (c - 1)) + * + * Substitute a, b, and c: + * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + * + * Since input_hi.hi + input_hi.lo == input_hi, we get this: + * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + */ + m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); + } + /* m128 ^= XXH_swap64(m128 >> 64); */ + m128.low64 ^= XXH_swap64(m128.high64); + + { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ + XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); + h128.high64 += m128.high64 * XXH_PRIME64_2; + + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = XXH3_avalanche(h128.high64); + return h128; + } } +} + +/* + * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN + */ +XXH_FORCE_INLINE XXH128_hash_t +XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); + if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); + if (len) return XXH3_len_1to3_128b(input, len, secret, seed); + { XXH128_hash_t h128; + xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); + xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); + h128.low64 = XXH64_avalanche(seed ^ bitflipl); + h128.high64 = XXH64_avalanche( seed ^ bitfliph); + return h128; + } } +} + +/* + * A bit slower than XXH3_mix16B, but handles multiply by zero better. + */ +XXH_FORCE_INLINE XXH128_hash_t +XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, + const xxh_u8* secret, XXH64_hash_t seed) +{ + acc.low64 += XXH3_mix16B (input_1, secret+0, seed); + acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); + acc.high64 += XXH3_mix16B (input_2, secret+16, seed); + acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); + return acc; +} + + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { XXH128_hash_t acc; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); + } + acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); + } + acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); + } + acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_NO_INLINE XXH128_hash_t +XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + { XXH128_hash_t acc; + int const nbRounds = (int)len / 32; + int i; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + for (i=0; i<4; i++) { + acc = XXH128_mix32B(acc, + input + (32 * i), + input + (32 * i) + 16, + secret + (32 * i), + seed); + } + acc.low64 = XXH3_avalanche(acc.low64); + acc.high64 = XXH3_avalanche(acc.high64); + XXH_ASSERT(nbRounds >= 4); + for (i=4 ; i < nbRounds; i++) { + acc = XXH128_mix32B(acc, + input + (32 * i), + input + (32 * i) + 16, + secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)), + seed); + } + /* last bytes */ + acc = XXH128_mix32B(acc, + input + len - 16, + input + len - 32, + secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, + 0ULL - seed); + + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)len * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + secretSize + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)len * XXH_PRIME64_2)); + return h128; + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH128_hash_t +XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_accumulate_512, XXH3_scrambleAcc); +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, + XXH3_accumulate_512, XXH3_scrambleAcc); +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ + if (seed64 == 0) + return XXH3_hashLong_128b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc512, f_scramble); + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed64); + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), + f_acc512, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, + XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + +typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const void* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_128bits_internal(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong128_f f_hl128) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secret` conditions are not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + */ + if (len <= 16) + return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hl128(input, len, seed64, secret, secretLen); +} + + +/* === Public XXH128 API === */ + +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len) +{ + return XXH3_128bits_internal(input, len, 0, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_default); +} + +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) +{ + return XXH3_128bits_internal(input, len, 0, + (const xxh_u8*)secret, secretSize, + XXH3_hashLong_128b_withSecret); +} + +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_internal(input, len, seed, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_withSeed); +} + +XXH_PUBLIC_API XXH128_hash_t +XXH128(const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_withSeed(input, len, seed); +} + + +/* === XXH3 128-bit streaming === */ + +/* + * All the functions are actually the same as for 64-bit streaming variant. + * The only difference is the finalizatiom routine. + */ + +static void +XXH3_128bits_reset_internal(XXH3_state_t* statePtr, + XXH64_hash_t seed, + const void* secret, size_t secretSize) +{ + XXH3_64bits_reset_internal(statePtr, seed, secret, secretSize); +} + +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset(XXH3_state_t* statePtr) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_128bits_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_128bits_reset_internal(statePtr, 0, secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + if (statePtr == NULL) return XXH_ERROR; + if (seed==0) return XXH3_128bits_reset(statePtr); + if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed); + XXH3_128bits_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len) +{ + return XXH3_update(state, (const xxh_u8*)input, len, + XXH3_accumulate_512, XXH3_scrambleAcc); +} + +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + state->secretLimit + XXH_STRIPE_LEN + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); + return h128; + } + } + /* len <= XXH3_MIDSIZE_MAX : short code */ + if (state->seed) + return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} + +/* 128-bit utility functions */ + +#include /* memcmp, memcpy */ + +/* return : 1 is equal, 0 if different */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) +{ + /* note : XXH128_hash_t is compact, it has no padding byte */ + return !(memcmp(&h1, &h2, sizeof(h1))); +} + +/* This prototype is compatible with stdlib's qsort(). + * return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2) +{ + XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; + XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; + int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); + /* note : bets that, in most cases, hash values are different */ + if (hcmp) return hcmp; + return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); +} + + +/*====== Canonical representation ======*/ +XXH_PUBLIC_API void +XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) { + hash.high64 = XXH_swap64(hash.high64); + hash.low64 = XXH_swap64(hash.low64); + } + memcpy(dst, &hash.high64, sizeof(hash.high64)); + memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); +} + +XXH_PUBLIC_API XXH128_hash_t +XXH128_hashFromCanonical(const XXH128_canonical_t* src) +{ + XXH128_hash_t h; + h.high64 = XXH_readBE64(src); + h.low64 = XXH_readBE64(src->digest + 8); + return h; +} + +/* Pop our optimization override from above */ +#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ + && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */ +# pragma GCC pop_options +#endif #endif /* XXH_NO_LONG_LONG */ diff --git a/lib/mmseqs/src/CMakeLists.txt b/lib/mmseqs/src/CMakeLists.txt index c2a763b..27f6a69 100644 --- a/lib/mmseqs/src/CMakeLists.txt +++ b/lib/mmseqs/src/CMakeLists.txt @@ -130,7 +130,7 @@ else () message("-- OMPTL sorting fallback") endif () -target_link_libraries(mmseqs-framework tinyexpr libzstd_static microtar) +target_link_libraries(mmseqs-framework tinyexpr ${ZSTD_LIBRARIES} microtar) if (CYGWIN) target_link_libraries(mmseqs-framework nedmalloc) endif () diff --git a/lib/mmseqs/src/CommandDeclarations.h b/lib/mmseqs/src/CommandDeclarations.h index 0f089a0..8620465 100644 --- a/lib/mmseqs/src/CommandDeclarations.h +++ b/lib/mmseqs/src/CommandDeclarations.h @@ -85,6 +85,7 @@ extern int profile2repseq(int argc, const char **argv, const Command& command); extern int proteinaln2nucl(int argc, const char **argv, const Command& command); extern int rescorediagonal(int argc, const char **argv, const Command& command); extern int ungappedprefilter(int argc, const char **argv, const Command& command); +extern int unpackdb(int argc, const char **argv, const Command& command); extern int rbh(int argc, const char **argv, const Command& command); extern int result2flat(int argc, const char **argv, const Command& command); extern int result2msa(int argc, const char **argv, const Command& command); diff --git a/lib/mmseqs/src/MMseqsBase.cpp b/lib/mmseqs/src/MMseqsBase.cpp index 071e297..abb0456 100644 --- a/lib/mmseqs/src/MMseqsBase.cpp +++ b/lib/mmseqs/src/MMseqsBase.cpp @@ -84,13 +84,16 @@ std::vector baseCommands = { " - result_report: kraken style report\n" "# Download a sequence database with taxonomy information\n" "mmseqs databases UniProtKB/Swiss-Prot swissprotDB tmp\n\n" - "# Assign taxonomy based on top hit\n" + "# Assign taxonomy based on 2bLCA hit\n" "mmseqs easy-taxonomy examples/DB.fasta swissprotDB result tmp\n\n" - "# Assign taxonomy based on 2bLCA\n" - "mmseqs easy-taxonomy examples/DB.fasta swissprotDB result tmp --lca-mode 2\n", + "# Assign taxonomy based on top hit\n" + "mmseqs easy-taxonomy examples/DB.fasta swissprotDB result tmp --lca-mode 4\n\n" + "# Assign taxonomy without ORF prefilter\n" + "# Classifies higher percentage for short nucleotide input (e.g. short reads) at the cost of speed\n" + "mmseqs easy-taxonomy queryNuclDB swissprotDB result tmp --orf-filter 0\n", "Martin Steinegger ", " ... ", - CITATION_MMSEQS2, {{"queryFastaFile[.gz]", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_DATA|DbType::VARIADIC, &DbValidator::flatfileAndStdin }, + CITATION_TAXONOMY|CITATION_MMSEQS2, {{"queryFastaFile[.gz]", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_DATA|DbType::VARIADIC, &DbValidator::flatfileAndStdin }, {"targetDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_HEADER|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, {"taxReports", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::flatfile }, {"tmpDir", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::directory }}}, @@ -109,7 +112,7 @@ std::vector baseCommands = { NULL, "Milot Mirdita ", " ", - CITATION_MMSEQS2, {{"selection", 0, DbType::ZERO_OR_ALL, &DbValidator::empty }, + CITATION_TAXONOMY|CITATION_MMSEQS2, {{"selection", 0, DbType::ZERO_OR_ALL, &DbValidator::empty }, {"sequenceDB", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::sequenceDb }, {"tmpDir", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::directory }}}, {"createdb", createdb, &par.createdb, COMMAND_DATABASE_CREATION, @@ -286,15 +289,18 @@ std::vector baseCommands = { "Taxonomic classification", "# Download a sequence database with taxonomy information\n" "mmseqs databases UniProtKB/Swiss-Prot swissprotDB tmp\n\n" - "# Assign taxonomy based on top hit\n" - "mmseqs taxonomy queryDB swissprotDB result tmp\n\n" "# Assign taxonomy based on 2bLCA\n" - "mmseqs taxonomy queryDB swissprotDB result tmp --lca-mode 2\n\n" + "mmseqs taxonomy queryDB swissprotDB result tmp\n\n" + "# Assign taxonomy based on top hit\n" + "mmseqs taxonomy queryDB swissprotDB result tmp --lca-mode 4\n\n" + "# Assign taxonomy without ORF prefilter\n" + "# Classifies higher percentage for short nucleotide input (e.g. short reads) at the cost of speed\n" + "mmseqs taxonomy queryNuclDB swissprotDB result tmp --orf-filter 0\n\n" "# Create a Krona report\n" "mmseqs taxonomyreport swissprotDB result report.html --report-mode 1\n", "Milot Mirdita & Martin Steinegger & Eli Levy Karin ", " ", - CITATION_MMSEQS2, {{"queryDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::sequenceDb }, + CITATION_TAXONOMY|CITATION_MMSEQS2, {{"queryDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::sequenceDb }, {"targetDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, {"taxaDB", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::taxResult }, {"tmpDir", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::directory }}}, @@ -365,14 +371,14 @@ std::vector baseCommands = { NULL, "Martin Steinegger ", " ", - CITATION_MMSEQS2, {{"sequenceDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::sequenceDb }, + CITATION_TAXONOMY, {{"sequenceDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::sequenceDb }, {"tmpDir", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::directory }}}, {"createbintaxonomy", createbintaxonomy, &par.onlyverbosity, COMMAND_TAXONOMY | COMMAND_EXPERT, "Create binary taxonomy from NCBI input", NULL, "Milot Mirdita ", " ", - CITATION_MMSEQS2, {{"names.dmp", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::flatfile }, + CITATION_TAXONOMY, {{"names.dmp", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::flatfile }, {"nodes.dmp", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::flatfile }, {"merged.dmp", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::flatfile }, {"taxonomyFile", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::flatfile }}}, @@ -381,16 +387,16 @@ std::vector baseCommands = { NULL, "Martin Steinegger ", " ", - CITATION_MMSEQS2, {{"targetDB", DbType::ACCESS_MODE_INPUT|DbType::NEED_TAXONOMY, DbType::NEED_DATA, &DbValidator::taxSequenceDb }, + CITATION_TAXONOMY, {{"targetDB", DbType::ACCESS_MODE_INPUT|DbType::NEED_TAXONOMY, DbType::NEED_DATA, &DbValidator::taxSequenceDb }, {"resultDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::allDb }, {"resultDB", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::allDb }}}, {"taxonomyreport", taxonomyreport, &par.taxonomyreport, COMMAND_TAXONOMY | COMMAND_FORMAT_CONVERSION, "Create a taxonomy report in Kraken or Krona format", NULL, "Milot Mirdita & Florian Breitwieser ", - " ", - CITATION_MMSEQS2, {{"targetDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, - {"resultDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA | DbType::VARIADIC, &DbValidator::taxResult }, + " ", + CITATION_TAXONOMY, {{"seqTaxDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, + {"taxResultDB/resultDB/sequenceDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA | DbType::VARIADIC, &DbValidator::taxonomyReportInput }, {"taxonomyReport", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::flatfile }}}, {"filtertaxdb", filtertaxdb, &par.filtertaxdb, COMMAND_TAXONOMY, "Filter taxonomy result database", @@ -407,7 +413,7 @@ std::vector baseCommands = { "mmseqs filtertaxdb swissprotDB taxDB filteredTaxDB --taxon-list '9606||810'\n", "Martin Steinegger ", " ", - CITATION_MMSEQS2, {{"targetDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, + CITATION_TAXONOMY, {{"targetDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, {"resultDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::taxResult }, {"taxDB", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::taxResult }}}, // TODO make consistent with seqTaxDB -> taxSeqDb in Wiki @@ -423,7 +429,7 @@ std::vector baseCommands = { "mmseqs filtertaxseqdb swissprotDB swissprotDB_human_and_chlamydia --taxon-list '9606||810'\n\n", "Eli Levy Karin & Martin Steinegger ", " ", - CITATION_MMSEQS2, {{"taxSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, + CITATION_TAXONOMY, {{"taxSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, {"taxSeqDB", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::taxSequenceDb }}}, {"aggregatetax", aggregatetax, &par.aggregatetax, COMMAND_TAXONOMY, "Aggregate multiple taxon labels to a single label", @@ -439,7 +445,7 @@ std::vector baseCommands = { "mmseqs aggregatetax swissprotDB orfsAaDb_h taxPerOrf taxPerContig --majority 0.5\n\n", "Eli Levy Karin ", " ", - CITATION_MMSEQS2, {{"taxSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, + CITATION_TAXONOMY, {{"taxSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, {"setToSeqMap", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::allDb }, {"taxResPerSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::taxResult }, {"taxResPerSetDB", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::taxResult }}}, @@ -457,7 +463,7 @@ std::vector baseCommands = { "mmseqs aggregatetaxweights swissprotDB orfsAaDb_h taxPerOrf taxPerOrf_aln taxPerContig --majority 0.5\n\n", "Eli Levy Karin ", " ", - CITATION_MMSEQS2, {{"taxSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, + CITATION_TAXONOMY, {{"taxSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, {"setToSeqMap", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::allDb }, {"taxResPerSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::taxResult }, {"taxAlnResPerSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::resultDb }, @@ -467,7 +473,7 @@ std::vector baseCommands = { NULL, "Milot Mirdita ", " ", - CITATION_MMSEQS2, {{"queryDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::sequenceDb }, + CITATION_TAXONOMY, {{"queryDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::sequenceDb }, {"targetDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::taxSequenceDb }, {"resultDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::resultDb }, {"alignmentDB", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::alignmentDb }}}, @@ -476,7 +482,7 @@ std::vector baseCommands = { NULL, "Milot Mirdita ", " ", - CITATION_MMSEQS2, {{"targetDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, + CITATION_TAXONOMY|CITATION_MMSEQS2, {{"targetDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, {"resultDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::resultDb }, {"taxDB", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::taxResult }}}, {"majoritylca", majoritylca, &par.majoritylca, COMMAND_TAXONOMY | COMMAND_EXPERT, @@ -484,7 +490,7 @@ std::vector baseCommands = { NULL, "Milot Mirdita ", " ", - CITATION_MMSEQS2, {{"targetDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, + CITATION_TAXONOMY, {{"targetDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::NEED_TAXONOMY, &DbValidator::taxSequenceDb }, {"resultDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::resultDb }, {"taxDB", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::taxResult }}}, @@ -692,6 +698,13 @@ std::vector baseCommands = { " ", CITATION_MMSEQS2, {{"DB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, NULL }, {"DB", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::allDb }}}, + {"unpackdb", unpackdb, &par.onlyverbosity, COMMAND_STORAGE, + "Unpack a DB into separate files", + NULL, + "Milot Mirdita ", + " ", + CITATION_MMSEQS2, {{"DB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, NULL }, + {"outDir", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::directory }}}, {"touchdb", touchdb, &par.onlythreads, COMMAND_STORAGE, "Preload DB into memory (page cache)", NULL, @@ -1166,9 +1179,9 @@ std::vector baseCommands = { NULL, "Milot Mirdita ", " ... ", - CITATION_MMSEQS2|CITATION_UNICLUST, {{"ncbiTaxDir", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::VARIADIC, &DbValidator::flatfileAndStdin }, - {"nrSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::sequenceDb }, - {"mappingTSV", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::flatfile }}}, + CITATION_TAXONOMY, {{"ncbiTaxDir", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA|DbType::VARIADIC, &DbValidator::flatfileAndStdin }, + {"nrSeqDB", DbType::ACCESS_MODE_INPUT, DbType::NEED_DATA, &DbValidator::sequenceDb }, + {"mappingTSV", DbType::ACCESS_MODE_OUTPUT, DbType::NEED_DATA, &DbValidator::flatfile }}}, {"extractdomains", extractdomains, &par.extractdomains, COMMAND_SPECIAL, "Extract highest scoring alignment regions for each sequence from BLAST-tab file", NULL, diff --git a/lib/mmseqs/src/alignment/Alignment.cpp b/lib/mmseqs/src/alignment/Alignment.cpp index 93f3d6a..8306c83 100644 --- a/lib/mmseqs/src/alignment/Alignment.cpp +++ b/lib/mmseqs/src/alignment/Alignment.cpp @@ -34,6 +34,11 @@ Alignment::Alignment(const std::string &querySeqDB, const std::string &targetSeq if (addBacktrace == true) { alignmentMode = Parameters::ALIGNMENT_MODE_SCORE_COV_SEQID; } + outputClusterFormat = false; + if (par.alignmentMode == Parameters::ALIGNMENT_MODE_CLUSTER) { + alignmentMode = Parameters::ALIGNMENT_MODE_SCORE_ONLY; + outputClusterFormat = true; + } if (lcaAlign == true) { lcaSwMode = initSWMode(std::max(alignmentMode, (unsigned int)Parameters::ALIGNMENT_MODE_SCORE_ONLY), 0.0f, 0.0f); @@ -251,7 +256,11 @@ void Alignment::run() { } void Alignment::run(const std::string &outDB, const std::string &outDBIndex, const size_t dbFrom, const size_t dbSize, bool merge) { - DBWriter dbw(outDB.c_str(), outDBIndex.c_str(), threads, compressed, Parameters::DBTYPE_ALIGNMENT_RES); + int dbtype = Parameters::DBTYPE_ALIGNMENT_RES; + if(outputClusterFormat){ + dbtype = Parameters::DBTYPE_CLUSTER_RES; + } + DBWriter dbw(outDB.c_str(), outDBIndex.c_str(), threads, compressed, dbtype); dbw.open(); // handle no alignment case early, below would divide by 0 otherwise @@ -284,7 +293,7 @@ void Alignment::run(const std::string &outDB, const std::string &outDBIndex, con #endif std::string alnResultsOutString; alnResultsOutString.reserve(1024*1024); - char buffer[1024+32768]; + char buffer[1024 + 32768*4]; Sequence qSeq(maxSeqLen, querySeqType, m, 0, false, compBiasCorrection); Sequence dbSeq(maxSeqLen, targetSeqType, m, 0, false, compBiasCorrection); @@ -497,10 +506,16 @@ void Alignment::run(const std::string &outDB, const std::string &outDBIndex, con returnRes = &swRealignResults; } - - for (size_t result = 0; result < returnRes->size(); result++) { - size_t len = Matcher::resultToBuffer(buffer, (*returnRes)[result], addBacktrace); - alnResultsOutString.append(buffer, len); + if(outputClusterFormat) { + for (size_t result = 0; result < returnRes->size(); result++) { + alnResultsOutString.append(SSTR((*returnRes)[result].dbKey)); + alnResultsOutString.push_back('\n'); + } + }else{ + for (size_t result = 0; result < returnRes->size(); result++) { + size_t len = Matcher::resultToBuffer(buffer, (*returnRes)[result], addBacktrace); + alnResultsOutString.append(buffer, len); + } } dbw.writeData(alnResultsOutString.c_str(), alnResultsOutString.length(), queryDbKey, thread_idx); alnResultsOutString.clear(); diff --git a/lib/mmseqs/src/alignment/Alignment.h b/lib/mmseqs/src/alignment/Alignment.h index 819f204..4b6d0e9 100644 --- a/lib/mmseqs/src/alignment/Alignment.h +++ b/lib/mmseqs/src/alignment/Alignment.h @@ -85,6 +85,8 @@ class Alignment { int targetSeqType; bool compBiasCorrection; + bool outputClusterFormat; + int altAlignment; const unsigned int maxAccept; diff --git a/lib/mmseqs/src/alignment/rescorediagonal.cpp b/lib/mmseqs/src/alignment/rescorediagonal.cpp index cde0d99..434081c 100644 --- a/lib/mmseqs/src/alignment/rescorediagonal.cpp +++ b/lib/mmseqs/src/alignment/rescorediagonal.cpp @@ -124,7 +124,7 @@ int doRescorediagonal(Parameters &par, #ifdef OPENMP thread_idx = (unsigned int) omp_get_thread_num(); #endif - char buffer[1024 + 32768]; + char buffer[1024 + 32768*4]; std::string resultBuffer; resultBuffer.reserve(1000000); std::string queryBuffer; diff --git a/lib/mmseqs/src/clustering/AlignmentSymmetry.cpp b/lib/mmseqs/src/clustering/AlignmentSymmetry.cpp index 32db92a..acaeefa 100644 --- a/lib/mmseqs/src/clustering/AlignmentSymmetry.cpp +++ b/lib/mmseqs/src/clustering/AlignmentSymmetry.cpp @@ -57,6 +57,8 @@ void AlignmentSymmetry::readInData(DBReader*alnDbr, DBReader*alnDbr, DBReader0 ? sim : -sim); } + else if (Parameters::isEqualDbtype(alnType, Parameters::DBTYPE_CLUSTER_RES)) { + elementScoreTable[i][writePos] = (unsigned short) (USHRT_MAX); + } else { Debug(Debug::ERROR) << "Alignment format is not supported!\n"; EXIT(EXIT_FAILURE); diff --git a/lib/mmseqs/src/commons/BaseMatrix.cpp b/lib/mmseqs/src/commons/BaseMatrix.cpp index 86901b5..72a3a0c 100644 --- a/lib/mmseqs/src/commons/BaseMatrix.cpp +++ b/lib/mmseqs/src/commons/BaseMatrix.cpp @@ -164,20 +164,22 @@ double BaseMatrix::getBackgroundProb(size_t) { EXIT(EXIT_FAILURE); } -size_t BaseMatrix::memorySize(BaseMatrix *pMatrix){ - size_t matrixDataSize = pMatrix->matrixData.size() * sizeof(char); - size_t matrixNameSize = pMatrix->matrixName.size() * sizeof(char); +size_t BaseMatrix::memorySize(std::string & matrixName, std::string & matrixData){ + size_t matrixDataSize = matrixData.size() * sizeof(char); + size_t matrixNameSize = matrixName.size() * sizeof(char); return matrixDataSize + 1 + matrixNameSize; } -char * BaseMatrix::serialize(BaseMatrix *pMatrix) { - char* data = (char*) malloc(memorySize(pMatrix)); +char * BaseMatrix::serialize(std::string &matrixName, std::string &matrixData ) { + char* data = (char*) malloc(memorySize(matrixName, matrixData) + 1); char* p = data; - memcpy(p, pMatrix->matrixName.c_str(), pMatrix->matrixName.size() * sizeof(char)); - p += (pMatrix->matrixName.size() * sizeof(char)); + memcpy(p, matrixName.c_str(), matrixName.size() * sizeof(char)); + p += (matrixName.size() * sizeof(char)); memcpy(p, ":", 1); p += 1; - memcpy(p, pMatrix->matrixData.c_str(), pMatrix->matrixData.size() * sizeof(char)); + memcpy(p, matrixData.c_str(), matrixData.size() * sizeof(char)); + p += (matrixData.size() * sizeof(char));; + memcpy(p, "\0", 1); return data; } @@ -207,3 +209,20 @@ std::pair BaseMatrix::unserialize(const char * data){ } return std::make_pair(matrixName, matrixData); } + +std::string BaseMatrix::unserializeName(const char * data) { + size_t len = 0; + while(data[len] != '\0'){ + len++; + } + for (size_t pos = 0; pos < std::max(len, (size_t) 4) - 4; pos++) { + if (data[pos] == '.' + && data[pos + 1] == 'o' + && data[pos + 2] == 'u' + && data[pos + 3] == 't' + && data[pos + 4] == ':') { + return std::string(data, pos + 4); + } + } + return data; +} diff --git a/lib/mmseqs/src/commons/BaseMatrix.h b/lib/mmseqs/src/commons/BaseMatrix.h index 0ec8aae..308e9a3 100644 --- a/lib/mmseqs/src/commons/BaseMatrix.h +++ b/lib/mmseqs/src/commons/BaseMatrix.h @@ -70,9 +70,10 @@ class BaseMatrix{ static void computeBackground(double **probMat, double *pBack, int alphabetSize, bool containsX); - static size_t memorySize(BaseMatrix *pMatrix); + static size_t memorySize(std::string & matrixName , std::string & matrixData); static std::pair unserialize(const char * data); - static char * serialize(BaseMatrix *pMatrix); + static char * serialize(std::string &matrixName, std::string &matrixData ); + static std::string unserializeName(const char * data); }; diff --git a/lib/mmseqs/src/commons/CMakeLists.txt b/lib/mmseqs/src/commons/CMakeLists.txt index 2b41892..4ab6269 100644 --- a/lib/mmseqs/src/commons/CMakeLists.txt +++ b/lib/mmseqs/src/commons/CMakeLists.txt @@ -33,7 +33,6 @@ set(commons_header_files commons/ScoreMatrix.h commons/Sequence.h commons/StringBlock.h - commons/StringBlock.cpp commons/SubstitutionMatrix.h commons/SubstitutionMatrixProfileStates.h commons/tantan.h diff --git a/lib/mmseqs/src/commons/Command.cpp b/lib/mmseqs/src/commons/Command.cpp index 3c8714e..8dc24fa 100644 --- a/lib/mmseqs/src/commons/Command.cpp +++ b/lib/mmseqs/src/commons/Command.cpp @@ -54,4 +54,5 @@ std::vector DbValidator::flatfile = {Parameters::DBTYPE_FLATFILE}; std::vector DbValidator::flatfileAndStdin = {Parameters::DBTYPE_FLATFILE, Parameters::DBTYPE_STDIN}; std::vector DbValidator::flatfileStdinAndGeneric = {Parameters::DBTYPE_FLATFILE, Parameters::DBTYPE_STDIN, Parameters::DBTYPE_GENERIC_DB}; std::vector DbValidator::resultDb = {Parameters::DBTYPE_ALIGNMENT_RES, Parameters::DBTYPE_PREFILTER_RES, Parameters::DBTYPE_PREFILTER_REV_RES, Parameters::DBTYPE_CLUSTER_RES}; +std::vector DbValidator::taxonomyReportInput = {Parameters::DBTYPE_ALIGNMENT_RES, Parameters::DBTYPE_PREFILTER_RES, Parameters::DBTYPE_PREFILTER_REV_RES, Parameters::DBTYPE_CLUSTER_RES, Parameters::DBTYPE_TAXONOMICAL_RESULT, Parameters::DBTYPE_NUCLEOTIDES, Parameters::DBTYPE_HMM_PROFILE, Parameters::DBTYPE_AMINO_ACIDS}; std::vector DbValidator::empty = {}; diff --git a/lib/mmseqs/src/commons/Command.h b/lib/mmseqs/src/commons/Command.h index a43d81f..8c3c1fd 100644 --- a/lib/mmseqs/src/commons/Command.h +++ b/lib/mmseqs/src/commons/Command.h @@ -9,10 +9,11 @@ const unsigned int CITATION_UNICLUST = 1U << 2; const unsigned int CITATION_LINCLUST = 1U << 3; const unsigned int CITATION_PLASS = 1U << 4; const unsigned int CITATION_SERVER = 1U << 5; +const unsigned int CITATION_TAXONOMY = 1U << 6; // Make sure this is always the last bit // citations from inheriting modules will start from here -const unsigned int CITATION_END = CITATION_SERVER << 1; +const unsigned int CITATION_END = CITATION_TAXONOMY << 1; struct MMseqsParameter; @@ -61,6 +62,7 @@ struct DbValidator { static std::vector allDb; static std::vector allDbAndFlat; static std::vector taxResult; + static std::vector taxonomyReportInput; static std::vector directory; static std::vector flatfile; static std::vector flatfileAndStdin; diff --git a/lib/mmseqs/src/commons/DBReader.cpp b/lib/mmseqs/src/commons/DBReader.cpp index 120169b..c8290e9 100644 --- a/lib/mmseqs/src/commons/DBReader.cpp +++ b/lib/mmseqs/src/commons/DBReader.cpp @@ -175,9 +175,8 @@ template bool DBReader::open(int accessType){ size = Util::ompCountLines(indexDataChar, indexDataSize, threads); index = new(std::nothrow) Index[this->size]; - incrementMemory(sizeof(Index) * size); - Util::checkAllocation(index, "Can not allocate index memory in DBReader"); + incrementMemory(sizeof(Index) * size); bool isSortedById = readIndex(indexDataChar, indexDataSize, index, dataSize); indexData.close(); @@ -436,8 +435,9 @@ template char* DBReader::mmapData(FILE * file, size_t *dataSize) } } else { ret = static_cast(malloc(*dataSize)); - incrementMemory(*dataSize); Util::checkAllocation(ret, "Not enough system memory to read in the whole data file."); + incrementMemory(*dataSize); + size_t result = fread(ret, 1, *dataSize, file); if (result != *dataSize) { Debug(Debug::ERROR) << "Failed to read in datafile (" << dataFileName << "). Error " << errno << "\n"; diff --git a/lib/mmseqs/src/commons/DBWriter.cpp b/lib/mmseqs/src/commons/DBWriter.cpp index 4419a16..ac007e6 100644 --- a/lib/mmseqs/src/commons/DBWriter.cpp +++ b/lib/mmseqs/src/commons/DBWriter.cpp @@ -142,8 +142,8 @@ void DBWriter::open(size_t bufferSize) { } dataFilesBuffer[i] = new(std::nothrow) char[bufferSize]; - incrementMemory(bufferSize); Util::checkAllocation(dataFilesBuffer[i], "Cannot allocate buffer for DBWriter"); + incrementMemory(bufferSize); this->bufferSize = bufferSize; // set buffer to 64 diff --git a/lib/mmseqs/src/commons/FileUtil.cpp b/lib/mmseqs/src/commons/FileUtil.cpp index fa23e8c..953c366 100644 --- a/lib/mmseqs/src/commons/FileUtil.cpp +++ b/lib/mmseqs/src/commons/FileUtil.cpp @@ -215,7 +215,11 @@ void FileUtil::symlinkAlias(const std::string &file, const std::string &alias) { std::string FileUtil::getCurrentWorkingDirectory() { // CWD can be larger than PATH_MAX and allocating enough memory is somewhat tricky char* wd = NULL; +#ifdef PATH_MAX size_t bufferSize = PATH_MAX; +#else + size_t bufferSize = 1024; +#endif do { if (wd != NULL) { free(wd); diff --git a/lib/mmseqs/src/commons/MathUtil.h b/lib/mmseqs/src/commons/MathUtil.h index 3392b2c..7cfd599 100644 --- a/lib/mmseqs/src/commons/MathUtil.h +++ b/lib/mmseqs/src/commons/MathUtil.h @@ -13,12 +13,15 @@ #define M_PI (3.14159265358979323846264338327950288) #endif -#ifndef __has_attribute -#define __has_attribute(x) 0 +#if defined(__has_attribute) +# define HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define HAS_ATTRIBUTE(x) (0) #endif + #ifndef MAY_ALIAS -#if defined(__GNUC__) || __has_attribute(__may_alias__) +#if HAS_ATTRIBUTE(__may_alias__) # define MAY_ALIAS(x) x __attribute__((__may_alias__)) #else # define MAY_ALIAS(x) x diff --git a/lib/mmseqs/src/commons/MultiParam.cpp b/lib/mmseqs/src/commons/MultiParam.cpp index d3b8029..046cbd5 100644 --- a/lib/mmseqs/src/commons/MultiParam.cpp +++ b/lib/mmseqs/src/commons/MultiParam.cpp @@ -69,13 +69,17 @@ MultiParam::MultiParam(const char* aminoacids, const char* nucleotides) } MultiParam::MultiParam(const char* filename) { - if (strchr(filename, ',') != NULL) { - size_t len = strlen(filename); - aminoacids = (char*) malloc(len * sizeof(char)); - nucleotides = (char*) malloc(len * sizeof(char)); - if (sscanf(filename, "aa:%[^,],nucl:%s", aminoacids, nucleotides) != 2 && sscanf(filename, "nucl:%[^,],aa:%s", nucleotides, aminoacids) != 2) { - free((char*)nucleotides); - free((char*)aminoacids); + const char *split; + if ((split = strchr(filename, ',')) != NULL) { + const char* first = filename; + const char* second = split + 1; + if (strncmp("aa:", first, strlen("aa:")) == 0 && strncmp("nucl:", second, strlen("nucl:")) == 0) { + aminoacids = strndup(first + 3, split - first - 5); + nucleotides = strdup(second + 5); + } else if (strncmp("nucl:", first, strlen("nucl:")) == 0 && strncmp("aa:", second, strlen("aa:")) == 0) { + nucleotides = strndup(first + 5, split - first - 5); + aminoacids = strdup(second + 3); + } else { nucleotides = strdup("INVALID"); aminoacids = strdup("INVALID"); } diff --git a/lib/mmseqs/src/commons/Parameters.cpp b/lib/mmseqs/src/commons/Parameters.cpp index ab76357..9ddc9ac 100644 --- a/lib/mmseqs/src/commons/Parameters.cpp +++ b/lib/mmseqs/src/commons/Parameters.cpp @@ -11,6 +11,13 @@ #include #include +#include "blosum62.out.h" +#include "PAM30.out.h" +#include "VTML80.out.h" +#include "VTML40.out.h" +#include "nucleotide.out.h" +#include "base64/base64.h" + #ifdef __CYGWIN__ #include #endif @@ -56,7 +63,7 @@ Parameters::Parameters(): PARAM_SPACED_KMER_PATTERN(PARAM_SPACED_KMER_PATTERN_ID, "--spaced-kmer-pattern", "Spaced k-mer pattern", "User-specified spaced k-mer pattern", typeid(std::string), (void *) &spacedKmerPattern, "^1[01]*1$", MMseqsParameter::COMMAND_PREFILTER | MMseqsParameter::COMMAND_EXPERT), PARAM_LOCAL_TMP(PARAM_LOCAL_TMP_ID, "--local-tmp", "Local temporary path", "Path where some of the temporary files will be created", typeid(std::string), (void *) &localTmp, "", MMseqsParameter::COMMAND_PREFILTER | MMseqsParameter::COMMAND_EXPERT), // alignment - PARAM_ALIGNMENT_MODE(PARAM_ALIGNMENT_MODE_ID, "--alignment-mode", "Alignment mode", "How to compute the alignment:\n0: automatic\n1: only score and end_pos\n2: also start_pos and cov\n3: also seq.id\n4: only ungapped alignment", typeid(int), (void *) &alignmentMode, "^[0-4]{1}$", MMseqsParameter::COMMAND_ALIGN), + PARAM_ALIGNMENT_MODE(PARAM_ALIGNMENT_MODE_ID, "--alignment-mode", "Alignment mode", "How to compute the alignment:\n0: automatic\n1: only score and end_pos\n2: also start_pos and cov\n3: also seq.id\n4: only ungapped alignment\n5: score only (output) cluster format", typeid(int), (void *) &alignmentMode, "^[0-5]{1}$", MMseqsParameter::COMMAND_ALIGN), PARAM_E(PARAM_E_ID, "-e", "E-value threshold", "List matches below this E-value (range 0.0-inf)", typeid(double), (void *) &evalThr, "^([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)|[0-9]*(\\.[0-9]+)?$", MMseqsParameter::COMMAND_ALIGN), PARAM_C(PARAM_C_ID, "-c", "Coverage threshold", "List matches above this fraction of aligned (covered) residues (see --cov-mode)", typeid(float), (void *) &covThr, "^0(\\.[0-9]+)?|^1(\\.0+)?$", MMseqsParameter::COMMAND_ALIGN | MMseqsParameter::COMMAND_CLUSTLINEAR), PARAM_COV_MODE(PARAM_COV_MODE_ID, "--cov-mode", "Coverage mode", "0: coverage of query and target\n1: coverage of target\n2: coverage of query\n3: target seq. length has to be at least x% of query length\n4: query seq. length has to be at least x% of target length\n5: short seq. needs to be at least x% of the other seq. length", typeid(int), (void *) &covMode, "^[0-5]{1}$", MMseqsParameter::COMMAND_ALIGN), @@ -143,11 +150,13 @@ Parameters::Parameters(): PARAM_NUM_ITERATIONS(PARAM_NUM_ITERATIONS_ID, "--num-iterations", "Search iterations", "Number of iterative profile search iterations", typeid(int), (void *) &numIterations, "^[1-9]{1}[0-9]*$", MMseqsParameter::COMMAND_PROFILE), PARAM_START_SENS(PARAM_START_SENS_ID, "--start-sens", "Start sensitivity", "Start sensitivity", typeid(float), (void *) &startSens, "^[0-9]*(\\.[0-9]+)?$"), PARAM_SENS_STEPS(PARAM_SENS_STEPS_ID, "--sens-steps", "Search steps", "Number of search steps performed from --start-sens to -s", typeid(int), (void *) &sensSteps, "^[1-9]{1}$"), - PARAM_SLICE_SEARCH(PARAM_SLICE_SEARCH_ID, "--slice-search", "Slice search mode", "For bigger profile DB, run iteratively the search by greedily swapping the search results", typeid(bool), (void *) &sliceSearch, "", MMseqsParameter::COMMAND_PROFILE | MMseqsParameter::COMMAND_EXPERT), + PARAM_EXHAUSTIVE_SEARCH(PARAM_EXHAUSTIVE_SEARCH_ID, "--exhaustive-search", "Exhaustive search mode", "For bigger profile DB, run iteratively the search by greedily swapping the search results", typeid(bool), (void *) &exhaustiveSearch, "", MMseqsParameter::COMMAND_PROFILE | MMseqsParameter::COMMAND_EXPERT), + PARAM_EXHAUSTIVE_SEARCH_FILTER(PARAM_EXHAUSTIVE_SEARCH_FILTER_ID, "--exhaustive-search-filter", "Filter results during exhaustive search", "Filter result during search: 0: do not filter, 1: filter", typeid(int), (void *) &exhaustiveFilterMsa, "^[0-1]{1}$", MMseqsParameter::COMMAND_ALIGN | MMseqsParameter::COMMAND_EXPERT), + PARAM_STRAND(PARAM_STRAND_ID, "--strand", "Strand selection", "Strand selection only works for DNA/DNA search 0: reverse, 1: forward, 2: both", typeid(int), (void *) &strand, "^[0-2]{1}$", MMseqsParameter::COMMAND_EXPERT), - PARAM_ORF_FILTER(PARAM_ORF_FILTER_ID, "--orf-filter", "ORF filter", "Prefilter query ORFs with non-selective before search", typeid(int), (void *) &orfFilter, "^[0-1]{1}$", MMseqsParameter::COMMAND_HIDDEN), - PARAM_ORF_FILTER_S(PARAM_ORF_FILTER_S_ID, "--orf-filter-s", "ORF filter sensitivity", "Sensitivity used for query ORF prefiltering", typeid(float), (void *) &orfFilterSens, "^[0-9]*(\\.[0-9]+)?$", MMseqsParameter::COMMAND_HIDDEN), - PARAM_ORF_FILTER_E(PARAM_ORF_FILTER_E_ID, "--orf-filter-e", "ORF filter e-value", "E-value threshold used for query ORF prefiltering", typeid(double), (void *) &orfFilterEval, "^([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)|[0-9]*(\\.[0-9]+)?$", MMseqsParameter::COMMAND_HIDDEN), + PARAM_ORF_FILTER(PARAM_ORF_FILTER_ID, "--orf-filter", "ORF filter", "Prefilter query ORFs with non-selective search\nOnly used during nucleotide-vs-protein classification\nNOTE: Consider disabling when classifying short reads", typeid(int), (void *) &orfFilter, "^[0-1]{1}$"), + PARAM_ORF_FILTER_S(PARAM_ORF_FILTER_S_ID, "--orf-filter-s", "ORF filter sensitivity", "Sensitivity used for query ORF prefiltering", typeid(float), (void *) &orfFilterSens, "^[0-9]*(\\.[0-9]+)?$"), + PARAM_ORF_FILTER_E(PARAM_ORF_FILTER_E_ID, "--orf-filter-e", "ORF filter e-value", "E-value threshold used for query ORF prefiltering", typeid(double), (void *) &orfFilterEval, "^([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)|[0-9]*(\\.[0-9]+)?$"), PARAM_LCA_SEARCH(PARAM_LCA_SEARCH_ID, "--lca-search", "LCA search mode", "Efficient search for LCA candidates", typeid(bool), (void *) &lcaSearch, "", MMseqsParameter::COMMAND_PROFILE | MMseqsParameter::COMMAND_EXPERT), // easysearch PARAM_GREEDY_BEST_HITS(PARAM_GREEDY_BEST_HITS_ID, "--greedy-best-hits", "Greedy best hits", "Choose the best hits greedily to cover the query", typeid(bool), (void *) &greedyBestHits, ""), @@ -258,12 +267,15 @@ Parameters::Parameters(): // expandaln PARAM_EXPANSION_MODE(PARAM_EXPANSION_MODE_ID, "--expansion-mode", "Expansion mode", "Update score, E-value, and sequence identity by 0: input alignment 1: rescoring the inferred backtrace", typeid(int), (void *) &expansionMode, "^[0-2]{1}$"), // taxonomy - PARAM_LCA_MODE(PARAM_LCA_MODE_ID, "--lca-mode", "LCA mode", "LCA Mode 1: single search LCA , 2/3: accelerated 2bLCA, 4: top hit", typeid(int), (void *) &taxonomySearchMode, "^[1-4]{1}$"), - PARAM_TAX_OUTPUT_MODE(PARAM_TAX_OUTPUT_MODE_ID, "--tax-output-mode", "Taxonomy output mode", "0: output LCA, 1: output alignment 2: output both", typeid(int), (void *) &taxonomyOutpuMode, "^[0-2]{1}$"), + PARAM_LCA_MODE(PARAM_LCA_MODE_ID, "--lca-mode", "LCA mode", "LCA Mode 1: single search LCA , 2/3: approximate 2bLCA, 4: top hit", typeid(int), (void *) &taxonomySearchMode, "^[1-4]{1}$"), + PARAM_TAX_OUTPUT_MODE(PARAM_TAX_OUTPUT_MODE_ID, "--tax-output-mode", "Taxonomy output mode", "0: output LCA, 1: output alignment 2: output both", typeid(int), (void *) &taxonomyOutputMode, "^[0-2]{1}$"), // createsubdb, filtertaxseqdb PARAM_SUBDB_MODE(PARAM_SUBDB_MODE_ID, "--subdb-mode", "Subdb mode", "Subdb mode 0: copy data 1: soft link data and write index", typeid(int), (void *) &subDbMode, "^[0-1]{1}$"), PARAM_TAR_INCLUDE(PARAM_TAR_INCLUDE_ID, "--tar-include", "Tar Inclusion Regex", "Include file names based on this regex", typeid(std::string), (void *) &tarInclude, "^.*$"), PARAM_TAR_EXCLUDE(PARAM_TAR_EXCLUDE_ID, "--tar-exclude", "Tar Exclusion Regex", "Exclude file names based on this regex", typeid(std::string), (void *) &tarExclude, "^.*$"), + // unpackdb + PARAM_UNPACK_SUFFIX(PARAM_UNPACK_SUFFIX_ID, "--unpack-suffix", "Unpack suffix", "File suffix for unpacked files", typeid(std::string), (void *) &unpackSuffix, "^.*$"), + PARAM_UNPACK_NAME_MODE(PARAM_UNPACK_NAME_MODE_ID, "--unpack-name-mode", "Unpack name mode", "Name unpacked files by 0: DB key, 1: accession (through .lookup)", typeid(int), (void *) &unpackNameMode, "^[0-1]{1}$"), // for modules that should handle -h themselves PARAM_HELP(PARAM_HELP_ID, "-h", "Help", "Help", typeid(bool), (void *) &help, "", MMseqsParameter::COMMAND_HIDDEN), PARAM_HELP_LONG(PARAM_HELP_LONG_ID, "--help", "Help", "Help", typeid(bool), (void *) &help, "", MMseqsParameter::COMMAND_HIDDEN) @@ -274,7 +286,8 @@ Parameters::Parameters(): } instance = this; - // onlyverbosity + + // onlyverbosity onlyverbosity.push_back(&PARAM_V); // verbandcompression @@ -584,6 +597,7 @@ Parameters::Parameters(): msa2profile.push_back(&PARAM_FILTER_NDIFF); msa2profile.push_back(&PARAM_GAP_OPEN); msa2profile.push_back(&PARAM_GAP_EXTEND); + msa2profile.push_back(&PARAM_SKIP_QUERY); msa2profile.push_back(&PARAM_THREADS); msa2profile.push_back(&PARAM_COMPRESSED); msa2profile.push_back(&PARAM_V); @@ -1113,11 +1127,9 @@ Parameters::Parameters(): searchworkflow.push_back(&PARAM_NUM_ITERATIONS); searchworkflow.push_back(&PARAM_START_SENS); searchworkflow.push_back(&PARAM_SENS_STEPS); - searchworkflow.push_back(&PARAM_SLICE_SEARCH); + searchworkflow.push_back(&PARAM_EXHAUSTIVE_SEARCH); + searchworkflow.push_back(&PARAM_EXHAUSTIVE_SEARCH_FILTER); searchworkflow.push_back(&PARAM_STRAND); - searchworkflow.push_back(&PARAM_ORF_FILTER); - searchworkflow.push_back(&PARAM_ORF_FILTER_E); - searchworkflow.push_back(&PARAM_ORF_FILTER_S); searchworkflow.push_back(&PARAM_LCA_SEARCH); searchworkflow.push_back(&PARAM_DISK_SPACE_LIMIT); searchworkflow.push_back(&PARAM_RUNNER); @@ -1182,9 +1194,13 @@ Parameters::Parameters(): easyclusterworkflow = combineList(clusterworkflow, createdb); // taxonomy - taxonomy = combineList(searchworkflow, lca); + taxonomy.push_back(&PARAM_ORF_FILTER); + taxonomy.push_back(&PARAM_ORF_FILTER_E); + taxonomy.push_back(&PARAM_ORF_FILTER_S); taxonomy.push_back(&PARAM_LCA_MODE); taxonomy.push_back(&PARAM_TAX_OUTPUT_MODE); + taxonomy = combineList(taxonomy, lca); + taxonomy = combineList(taxonomy, searchworkflow); // taxpercontig taxpercontig = combineList(taxonomy, aggregatetax); @@ -1193,9 +1209,12 @@ Parameters::Parameters(): // easy taxonomy easytaxonomy = combineList(taxonomy, addtaxonomy); + easytaxonomy = combineList(easytaxonomy, taxonomyreport); easytaxonomy = combineList(easytaxonomy, convertalignments); easytaxonomy = combineList(easytaxonomy, createtsv); easytaxonomy = combineList(easytaxonomy, createdb); + easytaxonomy = removeParameter(easytaxonomy, PARAM_TAX_OUTPUT_MODE); + easytaxonomy = removeParameter(easytaxonomy, PARAM_PICK_ID_FROM); // multi hit db multihitdb = combineList(createdb, extractorfs); @@ -1512,7 +1531,11 @@ void Parameters::parseParameters(int argc, const char *pargv[], const Command &c } argIdx++; } else if (typeid(MultiParam) == par[parIdx]->type) { - MultiParam value = MultiParam(pargv[argIdx+1]); + std::string val(pargv[argIdx+1]); + if (Util::startWith("b64:", val)) { + val = base64_decode(val.c_str() + 4, val.size() - 4); + } + MultiParam value = MultiParam(val); if (value == MultiParam("INVALID", "INVALID")) { printUsageMessage(command, 0xFFFFFFFF); Debug(Debug::ERROR) << "Error in value parsing " << par[parIdx]->name << "\n"; @@ -1574,8 +1597,12 @@ void Parameters::parseParameters(int argc, const char *pargv[], const Command &c } argIdx++; } else if (typeid(std::string) == par[parIdx]->type) { + std::string val(pargv[argIdx+1]); + if (Util::startWith("b64:", val)) { + val = base64_decode(val.c_str() + 4, val.size() - 4); + } std::string* currVal = (std::string*)par[parIdx]->value; - currVal->assign(pargv[argIdx+1]); + currVal->assign(val); par[parIdx]->wasSet = true; argIdx++; } else if (typeid(bool) == par[parIdx]->type) { @@ -1599,21 +1626,23 @@ void Parameters::parseParameters(int argc, const char *pargv[], const Command &c if (hasUnrecognizedParameter) { printUsageMessage(command, 0xFFFFFFFF); - Debug(Debug::INFO) << "Unrecognized parameter " << parameter << "\n"; // Suggest some parameter that the user might have meant std::vector::const_iterator index = par.end(); int maxDistance = 0; for (std::vector::const_iterator it = par.begin(); it != par.end(); ++it) { int distance = DistanceCalculator::localLevenshteinDistance(parameter, (*it)->name); - if(distance > maxDistance) { + if (distance > maxDistance) { maxDistance = distance; index = it; } } - if(index != par.end()) { - Debug(Debug::WARNING) << "Did you mean \"" << (*index)->name << "\"?\n"; + Debug(Debug::ERROR) << "Unrecognized parameter \"" << parameter << "\""; + if (index != par.end()) { + Debug(Debug::ERROR) << ". Did you mean \"" << (*index)->name << "\" (" << (*index)->display << ")?\n"; + } else { + Debug(Debug::ERROR) << "\n"; } EXIT(EXIT_FAILURE); @@ -1786,93 +1815,118 @@ void Parameters::parseParameters(int argc, const char *pargv[], const Command &c if (parseFlags & PARSE_ALLOW_EMPTY) break; printUsageMessage(command, outputFlags); - Debug(Debug::ERROR) << "Unrecognized parameters!" << "\n"; printParameters(command.cmd, argc, pargv, par); + Debug(Debug::ERROR) << "Unrecognized parameters!" << "\n"; EXIT(EXIT_FAILURE); } + + // set up substituionMatrix + for(size_t i = 0 ; i < substitutionMatrices.size(); i++) { + bool isAminoAcid = (strcmp(scoringMatrixFile.aminoacids, substitutionMatrices[i].name.c_str()) == 0); + bool isNucleotide = (strcmp(scoringMatrixFile.nucleotides, substitutionMatrices[i].name.c_str()) == 0); + bool isSeedAminoAcid = (strcmp(seedScoringMatrixFile.aminoacids, substitutionMatrices[i].name.c_str()) == 0); + bool isSeedNucleotide = (strcmp(seedScoringMatrixFile.nucleotides, substitutionMatrices[i].name.c_str()) == 0); + if (isAminoAcid || isNucleotide|| isSeedAminoAcid|| isSeedNucleotide) { + std::string matrixData((const char *)substitutionMatrices[i].subMatData, substitutionMatrices[i].subMatDataLen); + std::string matrixName = substitutionMatrices[i].name; + if(isAminoAcid) { + free(scoringMatrixFile.aminoacids); + scoringMatrixFile.aminoacids = BaseMatrix::serialize(matrixName, matrixData); + } + if(isNucleotide) { + free(scoringMatrixFile.nucleotides); + scoringMatrixFile.nucleotides = BaseMatrix::serialize(matrixName, matrixData); + } + if(isSeedAminoAcid) { + free(seedScoringMatrixFile.aminoacids); + seedScoringMatrixFile.aminoacids = BaseMatrix::serialize(matrixName, matrixData); + } + if(isSeedNucleotide) { + free(seedScoringMatrixFile.nucleotides); + seedScoringMatrixFile.nucleotides = BaseMatrix::serialize(matrixName, matrixData); + } + } + } + if (ignorePathCountChecks == false) { - checkIfDatabaseIsValid(command, isStartVar, isMiddleVar, isEndVar); + checkIfDatabaseIsValid(command, argc, pargv, isStartVar, isMiddleVar, isEndVar); } - if(printPar == true) { + if (printPar == true) { printParameters(command.cmd, argc, pargv, par); } + } -void Parameters::checkIfTaxDbIsComplete(std::string & filename){ +std::vector Parameters::findMissingTaxDbFiles(const std::string &filename) { + std::vector missingFiles; if (FileUtil::fileExists((filename + "_mapping").c_str()) == false) { - Debug(Debug::ERROR) << "Database " << filename << " need taxonomical information.\n" - << "The " << filename << "_mapping is missing.\n"; - EXIT(EXIT_FAILURE); + missingFiles.emplace_back(filename + "_mapping"); + } else if (FileUtil::fileExists((filename + "_taxonomy").c_str()) == true) { + return missingFiles; } - if (FileUtil::fileExists((filename + "_taxonomy").c_str()) == true) { - return; - } - if (FileUtil::fileExists((filename + "_nodes.dmp").c_str()) == false) { - Debug(Debug::ERROR) << "Database " << filename << " need taxonomical information.\n" - << "The " << filename << "_nodes.dmp is missing.\n"; - EXIT(EXIT_FAILURE); - } - if (FileUtil::fileExists((filename + "_names.dmp").c_str()) == false) { - Debug(Debug::ERROR) << "Database " << filename << " need taxonomical information.\n" - << "The " << filename << "_names.dmp is missing.\n"; - EXIT(EXIT_FAILURE); + const std::vector suffices = {"_nodes.dmp", "_names.dmp", "_merged.dmp"}; + for (size_t i = 0; i < suffices.size(); ++i) { + if (FileUtil::fileExists((filename + suffices[i]).c_str()) == false) { + missingFiles.emplace_back(filename + suffices[i]); + } } - if (FileUtil::fileExists((filename + "_merged.dmp").c_str()) == false) { - Debug(Debug::ERROR) << "Database " << filename << " need taxonomical information.\n" - << "The " << filename << "_merged.dmp is missing.\n"; - EXIT(EXIT_FAILURE); + return missingFiles; +} + +void Parameters::printTaxDbError(const std::string &filename, const std::vector& missingFiles) { + Debug(Debug::ERROR) << "Input taxonomy database \"" << filename << "\" is missing files:\n"; + for (size_t i = 0; i < missingFiles.size(); ++i) { + Debug(Debug::ERROR) << "- " << missingFiles[i] << "\n"; } } -void Parameters::checkIfDatabaseIsValid(const Command& command, bool isStartVar, bool isMiddleVar, bool isEndVar) { +void Parameters::checkIfDatabaseIsValid(const Command& command, int argc, const char** argv, bool isStartVar, bool isMiddleVar, bool isEndVar) { size_t fileIdx = 0; for (size_t dbIdx = 0; dbIdx < command.databases.size(); dbIdx++) { const DbType &db = command.databases[dbIdx]; - // special checks + // special checks if (db.accessMode == db.ACCESS_MODE_INPUT) { size_t argumentDist = 0; - if(dbIdx == 0 && isStartVar){ + if (dbIdx == 0 && isStartVar) { argumentDist = (filenames.size() - command.databases.size()); - }else if(dbIdx == command.databases.size() - 1 && isEndVar){ + } else if (dbIdx == command.databases.size() - 1 && isEndVar) { argumentDist = (filenames.size() - command.databases.size()); - }else if((command.databases[dbIdx].specialType & DbType::VARIADIC) && isMiddleVar){ + } else if ((command.databases[dbIdx].specialType & DbType::VARIADIC) && isMiddleVar) { argumentDist = (filenames.size() - command.databases.size()); } - size_t currFileIdx = fileIdx; - for(; fileIdx <= currFileIdx+argumentDist; fileIdx++){ + for (; fileIdx <= currFileIdx + argumentDist; fileIdx++) { if (db.validator == NULL) { continue; } - std::string dbTypeFile = std::string(filenames[fileIdx]) + ".dbtype"; - // check if file exists - // if file is not a - if (FileUtil::fileExists((filenames[fileIdx]).c_str()) == false && FileUtil::fileExists(dbTypeFile.c_str()) == false && filenames[fileIdx] != "stdin" ) { - Debug(Debug::ERROR) << "Input " << filenames[fileIdx] << " does not exist.\n"; + if (filenames[fileIdx] != "stdin" && FileUtil::fileExists((filenames[fileIdx]).c_str()) == false && FileUtil::fileExists((filenames[fileIdx] + ".dbtype").c_str()) == false) { + printParameters(command.cmd, argc, argv, *command.params); + Debug(Debug::ERROR) << "Input " << filenames[fileIdx] << " does not exist\n"; EXIT(EXIT_FAILURE); } int dbtype = FileUtil::parseDbType(filenames[fileIdx].c_str()); - if (db.specialType & DbType::NEED_HEADER) { - if (FileUtil::fileExists((filenames[fileIdx] + "_h.dbtype").c_str()) == false && Parameters::isEqualDbtype(dbtype, Parameters::DBTYPE_INDEX_DB)==false) { - Debug(Debug::ERROR) << "Database " << filenames[fileIdx] << " needs header information.\n" - << filenames[fileIdx] << "_h is missing.\n"; - EXIT(EXIT_FAILURE); - } + if (db.specialType & DbType::NEED_HEADER && FileUtil::fileExists((filenames[fileIdx] + "_h.dbtype").c_str()) == false && Parameters::isEqualDbtype(dbtype, Parameters::DBTYPE_INDEX_DB) == false) { + printParameters(command.cmd, argc, argv, *command.params); + Debug(Debug::ERROR) << "Database " << filenames[fileIdx] << " needs header information\n"; + EXIT(EXIT_FAILURE); } if (db.specialType & DbType::NEED_TAXONOMY) { - checkIfTaxDbIsComplete(filenames[fileIdx]); - } - if (db.specialType & DbType::NEED_LOOKUP) { - if (FileUtil::fileExists((filenames[fileIdx] + ".lookup").c_str()) == false) { - Debug(Debug::ERROR) << "Database " << filenames[fileIdx] << " needs a lookup file.\n" - << filenames[fileIdx] << ".lookup is missing.\n"; + std::vector missingFiles = findMissingTaxDbFiles(filenames[fileIdx]); + if (missingFiles.empty() == false) { + printParameters(command.cmd, argc, argv, *command.params); + printTaxDbError(filenames[fileIdx], missingFiles); EXIT(EXIT_FAILURE); } } + if (db.specialType & DbType::NEED_LOOKUP && FileUtil::fileExists((filenames[fileIdx] + ".lookup").c_str()) == false) { + printParameters(command.cmd, argc, argv, *command.params); + Debug(Debug::ERROR) << "Database " << filenames[fileIdx] << " needs a lookup file\n"; + EXIT(EXIT_FAILURE); + } bool dbtypeFound = false; for (size_t i = 0; i < db.validator->size() && dbtypeFound == false; i++) { int validatorDbtype = db.validator->at(i); @@ -1888,48 +1942,32 @@ void Parameters::checkIfDatabaseIsValid(const Command& command, bool isStartVar, } } if (dbtypeFound == false) { - Debug(Debug::ERROR) << "Input database \"" << filenames[fileIdx] << "\" is wrong!" << "\n" - << "Current input: " << Parameters::getDbTypeName(dbtype) << ". Allowed input: "; - for (size_t i = 0; i < db.validator->size() && dbtypeFound == false; i++) { - Debug(Debug::ERROR) << Parameters::getDbTypeName(db.validator->at(i)); - if (i != db.validator->size() - 1) { - Debug(Debug::ERROR) << ", "; - } + printParameters(command.cmd, argc, argv, *command.params); + Debug(Debug::ERROR) << "Input database \"" << filenames[fileIdx] << "\" has the wrong type (" + << Parameters::getDbTypeName(dbtype) << ")\nAllowed input:\n"; + for (size_t i = 0; i < db.validator->size(); ++i) { + Debug(Debug::ERROR) << "- " << Parameters::getDbTypeName(db.validator->at(i)) << "\n"; } - Debug(Debug::ERROR) << "\n"; EXIT(EXIT_FAILURE); } } } else if (db.accessMode == db.ACCESS_MODE_OUTPUT) { if (db.validator == &DbValidator::directory) { if (FileUtil::directoryExists(filenames[fileIdx].c_str()) == false) { - Debug(Debug::WARNING) << "Tmp " << filenames[dbIdx] - << " folder does not exist or is not a directory.\n"; if (FileUtil::makeDir(filenames[fileIdx].c_str()) == false) { - Debug(Debug::ERROR) << "Can not create tmp folder " << filenames[dbIdx] << ".\n"; + printParameters(command.cmd, argc, argv, *command.params); + Debug(Debug::ERROR) << "Cannot create temporary directory " << filenames[dbIdx] << "\n"; EXIT(EXIT_FAILURE); } else { - Debug(Debug::INFO) << "Create dir " << filenames[dbIdx] << "\n"; + Debug(Debug::INFO) << "Create directory " << filenames[dbIdx] << "\n"; } } fileIdx++; } else { if (FileUtil::fileExists(filenames[fileIdx].c_str()) == true) { - Debug(Debug::WARNING) << filenames[fileIdx] << " exists and will be overwritten.\n"; + Debug(Debug::WARNING) << filenames[fileIdx] << " exists and will be overwritten\n"; } fileIdx++; -// FILE *fp = fopen(filenames[dbIdx].c_str(), "a"); -// if (fp == NULL) { -// if (errno == EACCES) { -// Debug(Debug::ERROR) << "No permission to write file " << filenames[dbIdx] << ".\n"; -// EXIT(EXIT_FAILURE); -// } else { -// Debug(Debug::ERROR) << "Error while writing file " << filenames[dbIdx] << ".\n"; -// EXIT(EXIT_FAILURE); -// } -// } -// fclose(fp); -// FileUtil::remove(filenames[dbIdx].c_str()); } } else { fileIdx++; @@ -1946,7 +1984,12 @@ void Parameters::printParameters(const std::string &module, int argc, const char Debug(Debug::INFO) << module << " "; for (int i = 0; i < argc; i++) { - Debug(Debug::INFO) << pargv[i] << " "; + // don't expose users to the interal b64 masking of whitespace characters + if (strncmp("b64:", pargv[i], 4) == 0) { + Debug(Debug::INFO) << "'" << base64_decode(pargv[i] + 4, strlen(pargv[i]) - 4) << "' "; + } else { + Debug(Debug::INFO) << pargv[i] << " "; + } } Debug(Debug::INFO) << "\n\n"; @@ -1974,6 +2017,12 @@ void Parameters::printParameters(const std::string &module, int argc, const char ss << *((int *)par[i]->value); } else if(typeid(ByteParser) == par[i]->type) { ss << ByteParser::format(*((size_t *)par[i]->value), 'a', 'h'); + } else if(PARAM_SUB_MAT.uniqid == par[i]->uniqid || + PARAM_SEED_SUB_MAT.uniqid == par[i]->uniqid) { + MultiParam * param = ((MultiParam *) par[i]->value); + MultiParam tmpPar(BaseMatrix::unserializeName(param->aminoacids).c_str(), + BaseMatrix::unserializeName(param->nucleotides).c_str()); + ss << MultiParam::format(tmpPar); } else if(typeid(MultiParam) == par[i]->type) { ss << MultiParam::format(*((MultiParam *)par[i]->value)); } else if(typeid(MultiParam) == par[i]->type) { @@ -2022,7 +2071,8 @@ void Parameters::setDefaults() { numIterations = 1; startSens = 4; sensSteps = 1; - sliceSearch = false; + exhaustiveSearch = false; + exhaustiveFilterMsa = 0; strand = 1; orfFilter = 0; orfFilterSens = 2.0; @@ -2280,13 +2330,17 @@ void Parameters::setDefaults() { tarInclude = ".*"; tarExclude = "^$"; + // unpackdb + unpackSuffix = ""; + unpackNameMode = Parameters::UNPACK_NAME_ACCESSION; + lcaRanks = ""; showTaxLineage = 0; // bin for all unclassified sequences // https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=12908 // other sequences (plasmids, etc) // https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=28384 - blacklist = "12908,28384"; + blacklist = "12908:unclassified sequences,28384:other sequences"; // aggregatetax majorityThr = 0.5; @@ -2299,8 +2353,17 @@ void Parameters::setDefaults() { expansionMode = EXPAND_TRANSFER_EVALUE; // taxonomy - taxonomySearchMode = Parameters::TAXONOMY_ACCEL_2BLCA; - taxonomyOutpuMode = Parameters::TAXONOMY_OUTPUT_LCA; + taxonomySearchMode = Parameters::TAXONOMY_APPROX_2BLCA; + taxonomyOutputMode = Parameters::TAXONOMY_OUTPUT_LCA; + + // substituion matrix + substitutionMatrices = { + {"nucleotide.out", nucleotide_out, nucleotide_out_len }, + {"blosum62.out", blosum62_out, blosum62_out_len }, + {"VTML80.out", VTML80_out, VTML80_out_len}, + {"VTML40.out", VTML40_out, VTML40_out_len}, + {"PAM30.out", PAM30_out, PAM30_out_len} + }; citations = { { CITATION_MMSEQS1, "Hauser M, Steinegger M, Soding J: MMseqs software suite for fast and deep clustering and searching of large protein sequence sets. Bioinformatics, 32(9), 1323-1330 (2016)" }, @@ -2309,6 +2372,7 @@ void Parameters::setDefaults() { { CITATION_LINCLUST, "Steinegger M, Soding J: Clustering huge protein sequence sets in linear time. Nature Communications, 9(1), 2542 (2018)" }, { CITATION_PLASS, "Steinegger M, Mirdita M, Soding J: Protein-level assembly increases protein sequence recovery from metagenomic samples manyfold. Nature Methods, 16(7), 603-606 (2019)" }, { CITATION_SERVER, "Mirdita M, Steinegger M, Soding J: MMseqs2 desktop and local web server app for fast, interactive sequence searches. Bioinformatics, 35(16), 2856–2858 (2019)" }, + { CITATION_TAXONOMY, "Mirdita M, Steinegger M, Breitwieser F, Soding J, Levy Karin E: Fast and sensitive taxonomic assignment to metagenomic contigs. bioRxiv, 2020.11.27.401018 (2020)" }, }; } @@ -2414,10 +2478,28 @@ std::string Parameters::createParameterString(const std::vectortype){ ss << par[i]->name << " "; ss << *((double *)par[i]->value) << " "; + } else if (PARAM_SUB_MAT.uniqid == par[i]->uniqid || + PARAM_SEED_SUB_MAT.uniqid == par[i]->uniqid){ + MultiParam * param = ((MultiParam *) par[i]->value); + MultiParam tmpPar(BaseMatrix::unserializeName(param->aminoacids).c_str(), + BaseMatrix::unserializeName(param->nucleotides).c_str()); + std::string value = MultiParam::format(tmpPar); + // encode parameters as base64 if it contains whitespaces + // whitespaces break parameters in the workflow shell scripts + if (value.find_first_of(" \n\t") != std::string::npos) { + ss << par[i]->name << " b64:" << base64_encode(value.c_str(), value.size()) << " "; + } else { + ss << par[i]->name << " " << value << " "; + } } else if (typeid(std::string) == par[i]->type){ - if (*((std::string *) par[i]->value) != "") { - ss << par[i]->name << " "; - ss << *((std::string *) par[i]->value) << " "; + std::string& value = *((std::string *) par[i]->value); + if (value != "") { + // see above + if (value.find_first_of(" \n\t") != std::string::npos) { + ss << par[i]->name << " b64:" << base64_encode(value.c_str(), value.size()) << " "; + } else { + ss << par[i]->name << " " << value << " "; + } } } else if (typeid(bool) == par[i]->type){ bool val = *((bool *)(par[i]->value)); @@ -2428,7 +2510,13 @@ std::string Parameters::createParameterString(const std::vector) == par[i]->type) { ss << par[i]->name << " "; - ss << MultiParam::format(*((MultiParam *) par[i]->value)) << " "; + std::string value = MultiParam::format(*((MultiParam *) par[i]->value)); + // see above + if (value.find_first_of(" \n\t") != std::string::npos) { + ss << par[i]->name << " b64:" << base64_encode(value.c_str(), value.size()) << " "; + } else { + ss << par[i]->name << " " << value << " "; + } } else if (typeid(MultiParam) == par[i]->type) { ss << par[i]->name << " "; ss << MultiParam::format(*((MultiParam *) par[i]->value)) << " "; diff --git a/lib/mmseqs/src/commons/Parameters.h b/lib/mmseqs/src/commons/Parameters.h index 2d12965..e1d6113 100644 --- a/lib/mmseqs/src/commons/Parameters.h +++ b/lib/mmseqs/src/commons/Parameters.h @@ -106,6 +106,7 @@ class Parameters { static const unsigned int ALIGNMENT_MODE_SCORE_COV = 2; static const unsigned int ALIGNMENT_MODE_SCORE_COV_SEQID = 3; static const unsigned int ALIGNMENT_MODE_UNGAPPED = 4; + static const unsigned int ALIGNMENT_MODE_CLUSTER = 5; static const unsigned int EXPAND_TRANSFER_EVALUE = 0; static const unsigned int EXPAND_RESCORE_BACKTRACE = 1; @@ -202,7 +203,7 @@ class Parameters { // taxonomy search strategy static const int TAXONOMY_SINGLE_SEARCH = 1; static const int TAXONOMY_2BLCA = 2; - static const int TAXONOMY_ACCEL_2BLCA = 3; + static const int TAXONOMY_APPROX_2BLCA = 3; static const int TAXONOMY_TOP_HIT = 4; static const int PARSE_VARIADIC = 1; @@ -277,6 +278,10 @@ class Parameters { static const int SUBDB_MODE_HARD = 0; static const int SUBDB_MODE_SOFT = 1; + // unpackdb + static const int UNPACK_NAME_KEY = 0; + static const int UNPACK_NAME_ACCESSION = 1; + // result direction static const int PARAM_RESULT_DIRECTION_QUERY = 0; static const int PARAM_RESULT_DIRECTION_TARGET = 1; @@ -409,7 +414,8 @@ class Parameters { int numIterations; float startSens; int sensSteps; - bool sliceSearch; + bool exhaustiveSearch; + int exhaustiveFilterMsa; int strand; int orfFilter; float orfFilterSens; @@ -622,7 +628,7 @@ class Parameters { // taxonomy int taxonomySearchMode; - int taxonomyOutpuMode; + int taxonomyOutputMode; // createsubdb int subDbMode; @@ -631,6 +637,10 @@ class Parameters { std::string tarInclude; std::string tarExclude; + // unpackdb + std::string unpackSuffix; + int unpackNameMode; + // for modules that should handle -h themselves bool help; @@ -655,7 +665,7 @@ class Parameters { void printParameters(const std::string &module, int argc, const char* pargv[], const std::vector &par); - void checkIfDatabaseIsValid(const Command& command, bool isStartVar, bool isMiddleVar, bool isEndVar); + void checkIfDatabaseIsValid(const Command& command, int argc, const char** argv, bool isStartVar, bool isMiddleVar, bool isEndVar); std::vector removeParameter(const std::vector& par, const MMseqsParameter& x); @@ -791,7 +801,8 @@ class Parameters { PARAMETER(PARAM_NUM_ITERATIONS) PARAMETER(PARAM_START_SENS) PARAMETER(PARAM_SENS_STEPS) - PARAMETER(PARAM_SLICE_SEARCH) + PARAMETER(PARAM_EXHAUSTIVE_SEARCH) + PARAMETER(PARAM_EXHAUSTIVE_SEARCH_FILTER) PARAMETER(PARAM_STRAND) PARAMETER(PARAM_ORF_FILTER) PARAMETER(PARAM_ORF_FILTER_S) @@ -949,10 +960,23 @@ class Parameters { PARAMETER(PARAM_TAR_INCLUDE) PARAMETER(PARAM_TAR_EXCLUDE) + // unpackdb + PARAMETER(PARAM_UNPACK_SUFFIX) + PARAMETER(PARAM_UNPACK_NAME_MODE) + // for modules that should handle -h themselves PARAMETER(PARAM_HELP) PARAMETER(PARAM_HELP_LONG) + struct PredefinedSubstitutionMatrix{ + std::string name; + const unsigned char * subMatData; + unsigned int subMatDataLen; + PredefinedSubstitutionMatrix(const char * name, const unsigned char * subMatData, const unsigned int subMatDataLen) + : name(name), subMatData(subMatData), subMatDataLen(subMatDataLen) {} + + }; + std::vector substitutionMatrices; std::vector empty; std::vector onlyverbosity; @@ -1066,7 +1090,8 @@ class Parameters { void overrideParameterDescription(MMseqsParameter& par, const char *description, const char *regex = NULL, int category = 0); - static void checkIfTaxDbIsComplete(std::string & filename); + static std::vector findMissingTaxDbFiles(const std::string &filename); + static void printTaxDbError(const std::string &filename, const std::vector& missingFiles); static bool isEqualDbtype(const int type1, const int type2) { return ((type1 & 0x3FFFFFFF) == (type2 & 0x3FFFFFFF)); diff --git a/lib/mmseqs/src/commons/Sequence.cpp b/lib/mmseqs/src/commons/Sequence.cpp index c2c2419..51aec29 100644 --- a/lib/mmseqs/src/commons/Sequence.cpp +++ b/lib/mmseqs/src/commons/Sequence.cpp @@ -20,10 +20,10 @@ Sequence::Sequence(size_t maxLen, int seqType, const BaseMatrix *subMat, const u this->spaced = spaced; this->seqType = seqType; std::pair spacedKmerInformation; - if (userSpacedKmerPattern.empty()) { - spacedKmerInformation = getSpacedPattern(spaced, kmerSize); - } else { + if (spaced == true && userSpacedKmerPattern.empty() == false) { spacedKmerInformation = parseSpacedPattern(kmerSize, spaced, userSpacedKmerPattern); + } else { + spacedKmerInformation = getSpacedPattern(spaced, kmerSize); } this->spacedPattern = spacedKmerInformation.first; this->spacedPatternSize = spacedKmerInformation.second; diff --git a/lib/mmseqs/src/commons/StringBlock.cpp b/lib/mmseqs/src/commons/StringBlock.cpp deleted file mode 100644 index 6d7e534..0000000 --- a/lib/mmseqs/src/commons/StringBlock.cpp +++ /dev/null @@ -1,40 +0,0 @@ -#include "StringBlock.h" -#include "FastSort.h" -#include - -void StringBlock::compact() { - size_t* indices = new size_t[entryCount]; - std::iota(indices, indices + entryCount, 0); - SORT_SERIAL(indices, indices + entryCount, SortBlockByIndex(data, offsets)); - size_t unique = 1; - size_t totalLength = strlen(getString(indices[0])); - for (size_t i = 1; i < entryCount; ++i) { - if (strcmp(getString(indices[i]), getString(indices[i - 1])) == 0) { - offsets[indices[i]] = offsets[indices[i - 1]]; - } else { - unique++; - totalLength += strlen(getString(indices[i])); - } - } - char* newData = (char*)malloc((totalLength + unique) * sizeof(char)); - size_t* newOffsets = (size_t*)malloc(entryCount * sizeof(size_t)); - size_t offset = 0; - for (size_t i = 0; i < entryCount; ++i) { - if (i != 0 && strcmp(getString(indices[i]), getString(indices[i - 1])) == 0) { - newOffsets[indices[i]] = newOffsets[indices[i - 1]]; - } else { - newOffsets[indices[i]] = offset; - size_t length = strlen(getString(indices[i])); - memcpy(newData + offset, getString(indices[i]), length); - newData[offset + length] = '\0'; - offset += length + 1; - } - } - free(data); - data = newData; - free(offsets); - offsets = newOffsets; - entryCapacity = entryCount; - byteCapacity = (totalLength + unique) * sizeof(char); - delete[] indices; -} diff --git a/lib/mmseqs/src/commons/StringBlock.h b/lib/mmseqs/src/commons/StringBlock.h index b5ed55e..ab1da29 100644 --- a/lib/mmseqs/src/commons/StringBlock.h +++ b/lib/mmseqs/src/commons/StringBlock.h @@ -4,15 +4,19 @@ #include #include #include +#include +#include "FastSort.h" + +template class StringBlock { public: - StringBlock(size_t byteCapacity_ = 32 * 1024, size_t entryCapacity_ = 1024) { + StringBlock(size_t byteCapacity_ = 32 * 1024, T entryCapacity_ = 1024) { byteCapacity = byteCapacity_; data = (char*)malloc(byteCapacity * sizeof(char)); entryCapacity = entryCapacity_; - offsets = (size_t*)malloc(entryCapacity * sizeof(size_t)); + offsets = (T*)malloc(entryCapacity * sizeof(T)); offsets[0] = 0; entryCount = 0; @@ -26,7 +30,7 @@ class StringBlock { } } - const char* getString(size_t idx) const { + const char* getString(T idx) const { if (idx >= entryCount) { return NULL; } @@ -34,7 +38,7 @@ class StringBlock { } size_t append(const char* string, size_t length) { - size_t nextOffset = offsets[entryCount]; + T nextOffset = offsets[entryCount]; if (nextOffset + length >= byteCapacity) { byteCapacity = (nextOffset + length + 1) * 1.5; data = (char*)realloc(data, byteCapacity * sizeof(char)); @@ -45,16 +49,51 @@ class StringBlock { if (entryCount >= entryCapacity) { entryCapacity = (entryCount + 1) * 1.5; - offsets = (size_t*)realloc(offsets, entryCapacity * sizeof(size_t)); + offsets = (T*)realloc(offsets, entryCapacity * sizeof(T)); } offsets[entryCount] = nextOffset + length + 1; return entryCount - 1; } - void compact(); + void compact() { + T* indices = new T[entryCount]; + std::iota(indices, indices + entryCount, 0); + SORT_SERIAL(indices, indices + entryCount, SortBlockByIndex(data, offsets)); + size_t unique = 1; + size_t totalLength = strlen(getString(indices[0])); + for (size_t i = 1; i < entryCount; ++i) { + if (strcmp(getString(indices[i]), getString(indices[i - 1])) == 0) { + offsets[indices[i]] = offsets[indices[i - 1]]; + } else { + unique++; + totalLength += strlen(getString(indices[i])); + } + } + char* newData = (char*)malloc((totalLength + unique) * sizeof(char)); + T* newOffsets = (T*)malloc(entryCount * sizeof(T)); + T offset = 0; + for (T i = 0; i < entryCount; ++i) { + if (i != 0 && strcmp(getString(indices[i]), getString(indices[i - 1])) == 0) { + newOffsets[indices[i]] = newOffsets[indices[i - 1]]; + } else { + newOffsets[indices[i]] = offset; + size_t length = strlen(getString(indices[i])); + memcpy(newData + offset, getString(indices[i]), length); + newData[offset + length] = '\0'; + offset += length + 1; + } + } + free(data); + data = newData; + free(offsets); + offsets = newOffsets; + entryCapacity = entryCount; + byteCapacity = (totalLength + unique) * sizeof(char); + delete[] indices; + } static size_t memorySize(const StringBlock& block) { - return 3 * sizeof(size_t) + block.byteCapacity * sizeof(char) + block.entryCapacity * sizeof(size_t); + return sizeof(size_t) + 2 * sizeof(T) + block.byteCapacity * sizeof(char) + block.entryCapacity * sizeof(T); } static char* serialize(const StringBlock &block) { @@ -62,14 +101,14 @@ class StringBlock { char* p = mem; memcpy(p, &block.byteCapacity, sizeof(size_t)); p += sizeof(size_t); - memcpy(p, &block.entryCapacity, sizeof(size_t)); - p += sizeof(size_t); - memcpy(p, &block.entryCount, sizeof(size_t)); - p += sizeof(size_t); + memcpy(p, &block.entryCapacity, sizeof(T)); + p += sizeof(T); + memcpy(p, &block.entryCount, sizeof(T)); + p += sizeof(T); memcpy(p, block.data, block.byteCapacity * sizeof(char)); p += block.byteCapacity * sizeof(char); - memcpy(p, block.offsets, block.entryCapacity * sizeof(size_t)); - p += block.entryCapacity * sizeof(size_t); + memcpy(p, block.offsets, block.entryCapacity * sizeof(T)); + p += block.entryCapacity * sizeof(T); return mem; } @@ -77,35 +116,35 @@ class StringBlock { const char* p = mem; size_t byteCapacity = *((size_t*)p); p += sizeof(size_t); - size_t entryCapacity = *((size_t*)p); - p += sizeof(size_t); - size_t entryCount = *((size_t*)p); - p += sizeof(size_t); + size_t entryCapacity = *((T*)p); + p += sizeof(T); + size_t entryCount = *((T*)p); + p += sizeof(T); char* data = (char*)p; p += byteCapacity * sizeof(char); - size_t* offsets = (size_t*)p; - p += entryCapacity * sizeof(size_t); - return new StringBlock(byteCapacity, entryCapacity, entryCount, data, offsets); + T* offsets = (T*)p; + p += entryCapacity * sizeof(T); + return new StringBlock(byteCapacity, entryCapacity, entryCount, data, offsets); } private: - StringBlock(size_t byteCapacity, size_t entryCapacity, size_t entryCount, char* data, size_t* offsets) + StringBlock(size_t byteCapacity, T entryCapacity, T entryCount, char* data, T* offsets) : byteCapacity(byteCapacity), entryCapacity(entryCapacity), entryCount(entryCount), data(data), offsets(offsets), externalData(true) {}; size_t byteCapacity; - size_t entryCapacity; - size_t entryCount; + T entryCapacity; + T entryCount; char* data; - size_t* offsets; + T* offsets; bool externalData; struct SortBlockByIndex { - SortBlockByIndex(char* data, size_t* offsets) : data(data), offsets(offsets) {} - bool operator() (size_t i, size_t j) const { + SortBlockByIndex(char* data, T* offsets) : data(data), offsets(offsets) {} + bool operator() (T i, T j) const { return strcmp(data + offsets[i], data + offsets[j]) < 0; } char* data; - size_t* offsets; + T* offsets; }; }; diff --git a/lib/mmseqs/src/commons/SubstitutionMatrix.cpp b/lib/mmseqs/src/commons/SubstitutionMatrix.cpp index 5ba72bd..e89a4e4 100644 --- a/lib/mmseqs/src/commons/SubstitutionMatrix.cpp +++ b/lib/mmseqs/src/commons/SubstitutionMatrix.cpp @@ -3,12 +3,6 @@ #include "Debug.h" #include "lambda_calculator.h" -#include "blosum62.out.h" -#include "PAM30.out.h" -#include "VTML80.out.h" -#include "VTML40.out.h" - -#include "nucleotide.out.h" #include #include @@ -18,22 +12,7 @@ SubstitutionMatrix::SubstitutionMatrix(const char *filename, float bitFactor, float scoreBias) : bitFactor(bitFactor) { std::pair parsedMatrix = BaseMatrix::unserialize(filename); - if (strcmp(parsedMatrix.first.c_str(), "nucleotide.out") == 0) { - matrixData = std::string((const char *)nucleotide_out, nucleotide_out_len); - matrixName = "nucleotide.out"; - } else if (strcmp(parsedMatrix.first.c_str(), "blosum62.out") == 0) { - matrixData = std::string((const char *) blosum62_out, blosum62_out_len); - matrixName = "blosum62.out"; - } else if (strcmp(parsedMatrix.first.c_str(), "VTML80.out") == 0) { - matrixData = std::string((const char *)VTML80_out, VTML80_out_len); - matrixName = "VTML80.out"; - } else if (strcmp(parsedMatrix.first.c_str(), "VTML40.out") == 0) { - matrixData = std::string((const char *)VTML40_out, VTML40_out_len); - matrixName = "VTML40.out"; - } else if (strcmp(parsedMatrix.first.c_str(), "PAM30.out") == 0) { - matrixData = std::string((const char *)PAM30_out, PAM30_out_len); - matrixName = "PAM30.out"; - } else if(parsedMatrix.second != "") { + if(parsedMatrix.second != "") { // the filename can contain the substituion matrix // SUBMATNAME.out:DATA // this is used for index databases diff --git a/lib/mmseqs/src/commons/Util.h b/lib/mmseqs/src/commons/Util.h index 8dd15cd..32fc706 100644 --- a/lib/mmseqs/src/commons/Util.h +++ b/lib/mmseqs/src/commons/Util.h @@ -47,23 +47,27 @@ template<> std::string SSTR(float); #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) -#ifndef __has_builtin -#define __has_builtin(x) 0 +#if defined(__has_builtin) +# define HAS_BUILTIN(x) __has_builtin(x) +#else +# define HAS_BUILTIN(x) (0) #endif -#if defined(__GNUC__) || __has_builtin(__builtin_expect) -#define LIKELY(x) __builtin_expect((x),1) -#define UNLIKELY(x) __builtin_expect((x),0) +#if HAS_BUILTIN(__builtin_expect) +# define LIKELY(x) __builtin_expect((x),1) +# define UNLIKELY(x) __builtin_expect((x),0) #else -#define LIKELY(x) (x) -#define UNLIKELY(x) (x) +# define LIKELY(x) (x) +# define UNLIKELY(x) (x) #endif -#ifndef __has_attribute -#define __has_attribute(x) 0 +#if defined(__has_attribute) +# define HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define HAS_ATTRIBUTE(x) (0) #endif -#if defined(__GNUC__) || __has_attribute(__unused__) +#if HAS_ATTRIBUTE(__unused__) # define MAYBE_UNUSED(x) x __attribute__((__unused__)) #else # define MAYBE_UNUSED(x) x diff --git a/lib/mmseqs/src/linclust/LinsearchIndexReader.cpp b/lib/mmseqs/src/linclust/LinsearchIndexReader.cpp index be7d1ee..085745a 100644 --- a/lib/mmseqs/src/linclust/LinsearchIndexReader.cpp +++ b/lib/mmseqs/src/linclust/LinsearchIndexReader.cpp @@ -269,7 +269,8 @@ std::string LinsearchIndexReader::findIncompatibleParameter(DBReadermatrixName, subMat->matrixData); + dbw.writeData(subData, BaseMatrix::memorySize(subMat->matrixName, subMat->matrixData), PrefilteringIndexReader::SCOREMATRIXNAME, 0); dbw.alignToPageSize(); free(subData); diff --git a/lib/mmseqs/src/linclust/kmermatcher.cpp b/lib/mmseqs/src/linclust/kmermatcher.cpp index a5cc154..6c2a16d 100644 --- a/lib/mmseqs/src/linclust/kmermatcher.cpp +++ b/lib/mmseqs/src/linclust/kmermatcher.cpp @@ -1,34 +1,27 @@ +// include xxhash early to avoid incompatibilites with SIMDe +#define XXH_INLINE_ALL +#include "xxhash.h" + #include "kmermatcher.h" +#include "Debug.h" #include "Indexer.h" -#include "ReducedMatrix.h" -#include "DBWriter.h" #include "SubstitutionMatrix.h" -#include "Util.h" -#include "Parameters.h" -#include "Matcher.h" -#include "Debug.h" -#include "MemoryTracker.h" -#include "DBReader.h" -#include "MathUtil.h" -#include "FileUtil.h" +#include "ReducedMatrix.h" +#include "ExtendedSubstitutionMatrix.h" #include "NucleotideMatrix.h" -#include "QueryMatcher.h" -#include "FileUtil.h" -#include "Timer.h" #include "tantan.h" -#include "ExtendedSubstitutionMatrix.h" +#include "QueryMatcher.h" #include "KmerGenerator.h" #include "MarkovKmerScore.h" -#include "xxhash.h" -#include -#include -#include -#include -#include +#include "FileUtil.h" +#include "FastSort.h" + +#include #include #include -#include -#include "FastSort.h" + +#include +#include #ifdef OPENMP #include @@ -1191,8 +1184,8 @@ void setKmerLengthAndAlphabet(Parameters ¶meters, size_t aaDbSize, int seqTy if(Parameters::isEqualDbtype(seqTyp, Parameters::DBTYPE_NUCLEOTIDES)){ if(parameters.kmerSize == 0) { parameters.kmerSize = std::max(17, static_cast(log(static_cast(aaDbSize))/log(4))); + parameters.spacedKmerPattern = ""; parameters.alphabetSize.nucleotides = 5; - } if(parameters.kmersPerSequence == 0){ parameters.kmersPerSequence = 60; @@ -1209,6 +1202,7 @@ void setKmerLengthAndAlphabet(Parameters ¶meters, size_t aaDbSize, int seqTy parameters.kmerSize = std::max(10, static_cast(log(static_cast(aaDbSize))/log(8.7))); parameters.alphabetSize.aminoacids = 13; } + parameters.spacedKmerPattern = ""; } if(parameters.kmersPerSequence == 0){ parameters.kmersPerSequence = 20; diff --git a/lib/mmseqs/src/linclust/kmermatcher.h b/lib/mmseqs/src/linclust/kmermatcher.h index d342208..43a7413 100644 --- a/lib/mmseqs/src/linclust/kmermatcher.h +++ b/lib/mmseqs/src/linclust/kmermatcher.h @@ -1,12 +1,11 @@ #ifndef MMSEQS_KMERMATCHER_H #define MMSEQS_KMERMATCHER_H -#include -#include "DBWriter.h" -#include "Util.h" #include "DBReader.h" +#include "DBWriter.h" #include "Parameters.h" #include "BaseMatrix.h" +#include struct SequencePosition{ unsigned short score; diff --git a/lib/mmseqs/src/prefiltering/CacheFriendlyOperations.cpp b/lib/mmseqs/src/prefiltering/CacheFriendlyOperations.cpp index 7eb00b9..165e81a 100644 --- a/lib/mmseqs/src/prefiltering/CacheFriendlyOperations.cpp +++ b/lib/mmseqs/src/prefiltering/CacheFriendlyOperations.cpp @@ -252,14 +252,14 @@ bool CacheFriendlyOperations::checkForOverflowAndResizeArray(bool inclu delete[] binDataFrame; binDataFrame = new(std::nothrow) CounterResult[BINCOUNT * binSize]; - memset(binDataFrame, 0, sizeof(CounterResult) * binSize * BINCOUNT); Util::checkAllocation(binDataFrame, "Cannot reallocate reallocBinMemory in CacheFriendlyOperations"); + memset(binDataFrame, 0, sizeof(CounterResult) * binSize * BINCOUNT); if (includeTmpResult) { delete[] tmpElementBuffer; tmpElementBuffer = new(std::nothrow) TmpResult[binSize]; - memset(tmpElementBuffer, 0, sizeof(TmpResult) * binSize); Util::checkAllocation(tmpElementBuffer, "Cannot reallocate tmpElementBuffer in CacheFriendlyOperations"); + memset(tmpElementBuffer, 0, sizeof(TmpResult) * binSize); } return true; } diff --git a/lib/mmseqs/src/prefiltering/PrefilteringIndexReader.cpp b/lib/mmseqs/src/prefiltering/PrefilteringIndexReader.cpp index 5294321..201f1da 100644 --- a/lib/mmseqs/src/prefiltering/PrefilteringIndexReader.cpp +++ b/lib/mmseqs/src/prefiltering/PrefilteringIndexReader.cpp @@ -103,8 +103,8 @@ void PrefilteringIndexReader::createIndexFile(const std::string &outDB, } Debug(Debug::INFO) << "Write SCOREMATRIXNAME (" << SCOREMATRIXNAME << ")\n"; - char* subData = BaseMatrix::serialize(subMat); - writer.writeData(subData, BaseMatrix::memorySize(subMat), SCOREMATRIXNAME, SPLIT_META); + char* subData = BaseMatrix::serialize(subMat->matrixName, subMat->matrixData); + writer.writeData(subData, BaseMatrix::memorySize(subMat->matrixName, subMat->matrixData), SCOREMATRIXNAME, SPLIT_META); writer.alignToPageSize(SPLIT_META); free(subData); diff --git a/lib/mmseqs/src/taxonomy/NcbiTaxonomy.cpp b/lib/mmseqs/src/taxonomy/NcbiTaxonomy.cpp index 5f84bc8..a035d17 100644 --- a/lib/mmseqs/src/taxonomy/NcbiTaxonomy.cpp +++ b/lib/mmseqs/src/taxonomy/NcbiTaxonomy.cpp @@ -13,7 +13,7 @@ #include #include -const int NcbiTaxonomy::SERIALIZATION_VERSION = 1; +const int NcbiTaxonomy::SERIALIZATION_VERSION = 2; int **makeMatrix(size_t maxNodes) { size_t dimension = maxNodes * 2; @@ -33,7 +33,7 @@ void deleteMatrix(int** M) { } NcbiTaxonomy::NcbiTaxonomy(const std::string &namesFile, const std::string &nodesFile, const std::string &mergedFile) : externalData(false) { - block = new StringBlock(); + block = new StringBlock(); std::vector tmpNodes; loadNodes(tmpNodes, nodesFile); loadMerged(mergedFile); @@ -266,12 +266,10 @@ bool NcbiTaxonomy::IsAncestor(TaxID ancestor, TaxID child) { } if (!nodeExists(child)) { - Debug(Debug::WARNING) << "No node for taxID " << child << ".\n"; return false; } if (!nodeExists(ancestor)) { - Debug(Debug::WARNING) << "No node for taxID " << ancestor << ".\n"; return false; } @@ -400,7 +398,7 @@ int NcbiTaxonomy::nodeId(TaxID taxonId) const { } bool NcbiTaxonomy::nodeExists(TaxID taxonId) const { - return D[taxonId] != -1; + return taxonId <= maxTaxID && D[taxonId] != -1; } TaxonNode const * NcbiTaxonomy::taxonNode(TaxID taxonId, bool fail) const { @@ -708,7 +706,7 @@ std::pair NcbiTaxonomy::serialize(const NcbiTaxonomy& t) { size_t matrixDim = (t.maxNodes * 2); size_t matrixK = (int)(MathUtil::flog2(matrixDim)) + 1; size_t matrixSize = matrixDim * matrixK * sizeof(int); - size_t blockSize = StringBlock::memorySize(*t.block); + size_t blockSize = StringBlock::memorySize(*t.block); size_t memSize = sizeof(int) // SERIALIZATION_VERSION + sizeof(size_t) // maxNodes + sizeof(int) // maxTaxID @@ -739,7 +737,7 @@ std::pair NcbiTaxonomy::serialize(const NcbiTaxonomy& t) { p += t.maxNodes * sizeof(int); memcpy(p, t.M[0], matrixSize); p += matrixSize; - char* blockData = StringBlock::serialize(*t.block); + char* blockData = StringBlock::serialize(*t.block); memcpy(p, blockData, blockSize); p += blockSize; free(blockData); @@ -776,6 +774,6 @@ NcbiTaxonomy* NcbiTaxonomy::unserialize(char* mem) { M[i] = M[i-1] + matrixK; } p += matrixSize; - StringBlock* block = StringBlock::unserialize(p); + StringBlock* block = StringBlock::unserialize(p); return new NcbiTaxonomy(taxonNodes, maxNodes, maxTaxID, D, E, L, H, M, block); } diff --git a/lib/mmseqs/src/taxonomy/NcbiTaxonomy.h b/lib/mmseqs/src/taxonomy/NcbiTaxonomy.h index 9ff2947..46a3fd0 100644 --- a/lib/mmseqs/src/taxonomy/NcbiTaxonomy.h +++ b/lib/mmseqs/src/taxonomy/NcbiTaxonomy.h @@ -110,7 +110,8 @@ class NcbiTaxonomy { bool IsAncestor(TaxID ancestor, TaxID child); TaxonNode const* taxonNode(TaxID taxonId, bool fail = true) const; - //std::unordered_map getCladeCounts(std::unordered_map& taxonCounts, TaxID taxon = 1) const; + bool nodeExists(TaxID taxId) const; + std::unordered_map getCladeCounts(std::unordered_map& taxonCounts) const; WeightedTaxResult weightedMajorityLCA(const std::vector &setTaxa, const float majorityCutoff); @@ -129,12 +130,11 @@ class NcbiTaxonomy { void elh(std::vector> const & children, int node, int level, std::vector &tmpE, std::vector &tmpL); void InitRangeMinimumQuery(); int nodeId(TaxID taxId) const; - bool nodeExists(TaxID taxId) const; int RangeMinimumQuery(int i, int j) const; int lcaHelper(int i, int j) const; - NcbiTaxonomy(TaxonNode* taxonNodes, size_t maxNodes, int maxTaxID, int *D, int *E, int *L, int *H, int **M, StringBlock *block) + NcbiTaxonomy(TaxonNode* taxonNodes, size_t maxNodes, int maxTaxID, int *D, int *E, int *L, int *H, int **M, StringBlock *block) : taxonNodes(taxonNodes), maxNodes(maxNodes), maxTaxID(maxTaxID), D(D), E(E), L(L), H(H), M(M), block(block), externalData(true), mmapData(NULL), mmapSize(0) {}; int maxTaxID; int *D; // maps from taxID to node ID in taxonNodes @@ -142,7 +142,7 @@ class NcbiTaxonomy { int *L; // Level of nodes in tour sequence (size 2N-1) int *H; int **M; - StringBlock* block; + StringBlock* block; bool externalData; char* mmapData; diff --git a/lib/mmseqs/src/taxonomy/lca.cpp b/lib/mmseqs/src/taxonomy/lca.cpp index 0e4d6ce..7ee22ae 100644 --- a/lib/mmseqs/src/taxonomy/lca.cpp +++ b/lib/mmseqs/src/taxonomy/lca.cpp @@ -33,6 +33,16 @@ int dolca(int argc, const char **argv, const Command& command, bool majority) { DBReader reader(par.db2.c_str(), par.db2Index.c_str(), par.threads, DBReader::USE_DATA|DBReader::USE_INDEX); reader.open(DBReader::LINEAR_ACCCESS); + if (majority) { + if (par.voteMode != Parameters::AGG_TAX_UNIFORM && Parameters::isEqualDbtype(reader.getDbtype(), Parameters::DBTYPE_CLUSTER_RES)) { + Debug(Debug::WARNING) << "Cluster input can only be used with --vote-mode 0\nContinuing with --vote-mode 0\n"; + par.voteMode = Parameters::AGG_TAX_UNIFORM; + } else if (par.voteMode == Parameters::AGG_TAX_MINUS_LOG_EVAL && (Parameters::isEqualDbtype(reader.getDbtype(), Parameters::DBTYPE_PREFILTER_RES) || Parameters::isEqualDbtype(reader.getDbtype(), Parameters::DBTYPE_PREFILTER_REV_RES))) { + Debug(Debug::WARNING) << "Prefilter input can only be used with --vote-mode 0 or 2\nContinuing with --vote-mode 0\n"; + par.voteMode = Parameters::AGG_TAX_UNIFORM; + } + } + DBWriter writer(par.db3.c_str(), par.db3Index.c_str(), par.threads, par.compressed, Parameters::DBTYPE_TAXONOMICAL_RESULT); writer.open(); @@ -40,15 +50,35 @@ int dolca(int argc, const char **argv, const Command& command, bool majority) { // a few NCBI taxa are blacklisted by default, they contain unclassified sequences (e.g. metagenomes) or other sequences (e.g. plasmids) // if we do not remove those, a lot of sequences would be classified as Root, even though they have a sensible LCA - std::vector blacklist = Util::split(par.blacklist, ","); - const size_t taxaBlacklistSize = blacklist.size(); - int* taxaBlacklist = new int[taxaBlacklistSize]; - for (size_t i = 0; i < taxaBlacklistSize; ++i) { - taxaBlacklist[i] = Util::fast_atoi(blacklist[i].c_str()); + std::vector blacklist; + std::vector splits = Util::split(par.blacklist, ","); + for (size_t i = 0; i < splits.size(); ++i) { + TaxID taxon = Util::fast_atoi(splits[i].c_str()); + if (taxon == 0) { + Debug(Debug::WARNING) << "Cannot block root taxon 0\n"; + continue; + } + if (t->nodeExists(taxon) == false) { + Debug(Debug::WARNING) << "Ignoring missing blocked taxon " << taxon << "\n"; + continue; + } + + const char *split; + if ((split = strchr(splits[i].c_str(), ':')) != NULL) { + const char* name = split + 1; + const TaxonNode* node = t->taxonNode(taxon, false); + if (node == NULL) { + Debug(Debug::WARNING) << "Ignoring missing blocked taxon " << taxon << "\n"; + continue; + } + const char* nodeName = t->getString(node->nameIdx); + if (strcmp(nodeName, name) != 0) { + Debug(Debug::WARNING) << "Node name '" << name << "' does not match to be blocked name '" << nodeName << "'\n"; + continue; + } + } + blacklist.emplace_back(taxon); } - Debug::Progress progress(reader.getSize()); - size_t taxonNotFound = 0; - size_t found = 0; // will be used when no hits std::string noTaxResult = "0\tno rank\tunclassified"; @@ -61,7 +91,9 @@ int dolca(int argc, const char **argv, const Command& command, bool majority) { noTaxResult += '\n'; - Debug(Debug::INFO) << "Computing LCA\n"; + size_t taxonNotFound = 0; + size_t found = 0; + Debug::Progress progress(reader.getSize()); #pragma omp parallel { const char *entry[255]; @@ -109,10 +141,11 @@ int dolca(int argc, const char **argv, const Command& command, bool majority) { // remove blacklisted taxa bool isBlacklisted = false; - for (size_t j = 0; j < taxaBlacklistSize; ++j) { - if(taxaBlacklist[j] == 0) + for (size_t j = 0; j < blacklist.size(); ++j) { + if (blacklist[j] == 0) { continue; - if (t->IsAncestor(taxaBlacklist[j], taxon)) { + } + if (t->IsAncestor(blacklist[j], taxon)) { isBlacklisted = true; break; } @@ -180,12 +213,10 @@ int dolca(int argc, const char **argv, const Command& command, bool majority) { result.clear(); } } - Debug(Debug::INFO) << "\n"; Debug(Debug::INFO) << "Taxonomy for " << taxonNotFound << " out of " << taxonNotFound+found << " entries not found\n"; writer.close(); reader.close(); delete t; - delete[] taxaBlacklist; return EXIT_SUCCESS; } diff --git a/lib/mmseqs/src/taxonomy/taxonomyreport.cpp b/lib/mmseqs/src/taxonomy/taxonomyreport.cpp index 7d64ab4..1a7d9df 100644 --- a/lib/mmseqs/src/taxonomy/taxonomyreport.cpp +++ b/lib/mmseqs/src/taxonomy/taxonomyreport.cpp @@ -5,6 +5,7 @@ #include "Debug.h" #include "Util.h" #include "krona_prelude.html.h" +#include "FastSort.h" #include #include @@ -13,12 +14,12 @@ #include #endif -static bool compareToFirstInt(const std::pair& lhs, const std::pair& rhs){ +static bool compareToFirstInt(const std::pair &lhs, const std::pair &rhs) { return (lhs.first <= rhs.first); } template -V at(const std::unordered_map& map, K key, V default_value = V()) { +V at(const std::unordered_map &map, K key, V default_value = V()) { typename std::unordered_map::const_iterator it = map.find(key); if (it == map.end()) { return default_value; @@ -27,7 +28,7 @@ V at(const std::unordered_map& map, K key, V default_value = V()) { } } -unsigned int cladeCountVal(const std::unordered_map& map, TaxID key) { +unsigned int cladeCountVal(const std::unordered_map &map, TaxID key) { typename std::unordered_map::const_iterator it = map.find(key); if (it == map.end()) { return 0; @@ -36,11 +37,10 @@ unsigned int cladeCountVal(const std::unordered_map& map, Ta } } - -void taxReport(FILE* FP, const NcbiTaxonomy& taxDB, const std::unordered_map & cladeCounts,unsigned long totalReads,TaxID taxID = 0, int depth = 0) { +void taxReport(FILE *FP, const NcbiTaxonomy &taxDB, const std::unordered_map &cladeCounts, unsigned long totalReads, TaxID taxID = 0, int depth = 0) { std::unordered_map::const_iterator it = cladeCounts.find(taxID); - unsigned int cladeCount = it == cladeCounts.end()? 0 : it->second.cladeCount; - unsigned int taxCount = it == cladeCounts.end()? 0 : it->second.taxCount; + unsigned int cladeCount = it == cladeCounts.end() ? 0 : it->second.cladeCount; + unsigned int taxCount = it == cladeCounts.end() ? 0 : it->second.taxCount; if (taxID == 0) { if (cladeCount > 0) { fprintf(FP, "%.4f\t%i\t%i\tno rank\t0\tunclassified\n", @@ -52,14 +52,14 @@ void taxReport(FILE* FP, const NcbiTaxonomy& taxDB, const std::unordered_maprankIdx), taxID, std::string(2*depth, ' ').c_str(), taxDB.getString(taxon->nameIdx)); - + 100 * cladeCount / double(totalReads), cladeCount, taxCount, + taxDB.getString(taxon->rankIdx), taxID, std::string(2 * depth, ' ').c_str(), taxDB.getString(taxon->nameIdx)); std::vector children = it->second.children; - std::sort(children.begin(), children.end(), [&](int a, int b) { return cladeCountVal(cladeCounts, a) > cladeCountVal(cladeCounts,b); }); - for (TaxID childTaxId : children) { + SORT_SERIAL(children.begin(), children.end(), [&](int a, int b) { return cladeCountVal(cladeCounts, a) > cladeCountVal(cladeCounts, b); }); + for (size_t i = 0; i < children.size(); ++i) { + TaxID childTaxId = children[i]; if (cladeCounts.count(childTaxId)) { taxReport(FP, taxDB, cladeCounts, totalReads, childTaxId, depth + 1); } else { @@ -69,7 +69,7 @@ void taxReport(FILE* FP, const NcbiTaxonomy& taxDB, const std::unordered_map & cladeCounts,unsigned long totalReads,TaxID taxID = 0, int depth = 0) { +void kronaReport(FILE *FP, const NcbiTaxonomy &taxDB, const std::unordered_map &cladeCounts, unsigned long totalReads, TaxID taxID = 0, int depth = 0) { std::unordered_map::const_iterator it = cladeCounts.find(taxID); - unsigned int cladeCount = it == cladeCounts.end()? 0 : it->second.cladeCount; -// unsigned int taxCount = it == cladeCounts.end()? 0 : it->second.taxCount; - if (cladeCount == 0) { - return; - } + unsigned int cladeCount = it == cladeCounts.end() ? 0 : it->second.cladeCount; if (taxID == 0) { if (cladeCount > 0) { fprintf(FP, "%d", cladeCount); @@ -113,12 +109,13 @@ void kronaReport(FILE* FP, const NcbiTaxonomy& taxDB, const std::unordered_mapnameIdx)); fprintf(FP, "%d", escapedName.c_str(), cladeCount); std::vector children = it->second.children; - std::sort(children.begin(), children.end(), [&](int a, int b) { return cladeCountVal(cladeCounts, a) > cladeCountVal(cladeCounts,b); }); - for (TaxID childTaxId : children) { + SORT_SERIAL(children.begin(), children.end(), [&](int a, int b) { return cladeCountVal(cladeCounts, a) > cladeCountVal(cladeCounts, b); }); + for (size_t i = 0; i < children.size(); ++i) { + TaxID childTaxId = children[i]; if (cladeCounts.count(childTaxId)) { kronaReport(FP, taxDB, cladeCounts, totalReads, childTaxId, depth + 1); } else { @@ -129,79 +126,113 @@ void kronaReport(FILE* FP, const NcbiTaxonomy& taxDB, const std::unordered_map> mapping; - if(FileUtil::fileExists(std::string(par.db1 + "_mapping").c_str()) == false){ - Debug(Debug::ERROR) << par.db1 + "_mapping" << " does not exist. Please create the taxonomy mapping!\n"; - EXIT(EXIT_FAILURE); - } - bool isSorted = Util::readMapping( par.db1 + "_mapping", mapping); - if(isSorted == false){ - std::stable_sort(mapping.begin(), mapping.end(), compareToFirstInt); + NcbiTaxonomy *taxDB = NcbiTaxonomy::openTaxonomy(par.db1); + // allow reading any kind of sequence database + const int readerDbType = FileUtil::parseDbType(par.db2.c_str()); + const bool isSequenceDB = Parameters::isEqualDbtype(readerDbType, Parameters::DBTYPE_HMM_PROFILE) + || Parameters::isEqualDbtype(readerDbType, Parameters::DBTYPE_AMINO_ACIDS) + || Parameters::isEqualDbtype(readerDbType, Parameters::DBTYPE_NUCLEOTIDES); + int dataMode = DBReader::USE_INDEX; + if (isSequenceDB == false) { + dataMode |= DBReader::USE_DATA; } - - DBReader reader(par.db2.c_str(), par.db2Index.c_str(), 1, DBReader::USE_DATA|DBReader::USE_INDEX); + DBReader reader(par.db2.c_str(), par.db2Index.c_str(), par.threads, dataMode); reader.open(DBReader::LINEAR_ACCCESS); - // TODO: Better way to get file specified by param3? - FILE *resultFP = fopen(par.db3.c_str(), "w"); - - - // 2. Read LCA file - Debug::Progress progress(reader.getSize()); - Debug(Debug::INFO) << "Reading LCA results\n"; + // support reading both LCA databases and result databases (e.g. alignment) + const bool isTaxonomyInput = Parameters::isEqualDbtype(reader.getDbtype(), Parameters::DBTYPE_TAXONOMICAL_RESULT); + std::vector> mapping; + if (isTaxonomyInput == false) { + if (FileUtil::fileExists(std::string(par.db1 + "_mapping").c_str()) == false) { + Debug(Debug::ERROR) << par.db1 + "_mapping" << " does not exist. Please create the taxonomy mapping!\n"; + EXIT(EXIT_FAILURE); + } + bool isSorted = Util::readMapping(par.db1 + "_mapping", mapping); + if (isSorted == false) { + std::stable_sort(mapping.begin(), mapping.end(), compareToFirstInt); + } + } + FILE *resultFP = FileUtil::openAndDelete(par.db3.c_str(), "w"); std::unordered_map taxCounts; - -// Currentlly not parallel -// #pragma omp parallel + Debug::Progress progress(reader.getSize()); +#pragma omp parallel { - const char *entry[255]; - //char buffer[1024]; unsigned int thread_idx = 0; #ifdef OPENMP thread_idx = (unsigned int) omp_get_thread_num(); #endif -// #pragma omp for schedule(dynamic, 10) reduction (+:taxonNotFound, found) + std::unordered_map localTaxCounts; +#pragma omp for schedule(dynamic, 10) for (size_t i = 0; i < reader.getSize(); ++i) { progress.updateProgress(); + if (isSequenceDB == true) { + std::pair val; + val.first = reader.getDbKey(i); + std::vector>::iterator mappingIt; + mappingIt = std::upper_bound(mapping.begin(), mapping.end(), val, compareToFirstInt); + if (mappingIt != mapping.end() && mappingIt->first == val.first) { + ++localTaxCounts[mappingIt->second]; + } + continue; + } + char *data = reader.getData(i, thread_idx); + while (*data != '\0') { + if (isTaxonomyInput) { + TaxID taxon = Util::fast_atoi(data); + ++localTaxCounts[taxon]; + } else { + // match dbKey to its taxon based on mapping + std::pair val; + val.first = Util::fast_atoi(data); + std::vector>::iterator mappingIt; + mappingIt = std::upper_bound(mapping.begin(), mapping.end(), val, compareToFirstInt); + if (mappingIt != mapping.end() && mappingIt->first == val.first) { + ++localTaxCounts[mappingIt->second]; + } + } + data = Util::skipLine(data); + } + } - const size_t columns = Util::getWordsOfLine(data, entry, 255); - if (columns == 0) { - Debug(Debug::WARNING) << "Empty entry: " << i << "!"; + // merge maps again +#pragma omp critical + for (std::unordered_map::const_iterator it = localTaxCounts.cbegin(); it != localTaxCounts.cend(); ++it) { + if (taxCounts[it->first]) { + taxCounts[it->first] += it->second; } else { - int taxon = Util::fast_atoi(entry[0]); - ++taxCounts[taxon]; - //__sync_fetch_and_add(&(offsets[kmerIdx]), 1); + taxCounts[it->first] = it->second; } } - }; - Debug(Debug::INFO) << "\n"; - Debug(Debug::INFO) << "Found " << taxCounts.size() << " different taxa for " << reader.getSize() << " different reads.\n"; + } + Debug(Debug::INFO) << "Found " << taxCounts.size() << " different taxa for " << reader.getSize() << " different reads\n"; unsigned int unknownCnt = (taxCounts.find(0) != taxCounts.end()) ? taxCounts.at(0) : 0; - Debug(Debug::INFO) << unknownCnt << " reads are unclassified.\n"; + Debug(Debug::INFO) << unknownCnt << " reads are unclassified\n"; + const size_t entryCount = reader.getSize(); + reader.close(); std::unordered_map cladeCounts = taxDB->getCladeCounts(taxCounts); if (par.reportMode == 0) { - taxReport(resultFP, *taxDB, cladeCounts, reader.getSize()); + taxReport(resultFP, *taxDB, cladeCounts, entryCount); } else { fwrite(krona_prelude_html, krona_prelude_html_len, sizeof(char), resultFP); - fprintf(resultFP, "%zu", reader.getSize()); - kronaReport(resultFP, *taxDB, cladeCounts, reader.getSize()); + fprintf(resultFP, "%zu", entryCount); + kronaReport(resultFP, *taxDB, cladeCounts, entryCount); fprintf(resultFP, ""); } delete taxDB; - reader.close(); + if (fclose(resultFP) != 0) { + Debug(Debug::ERROR) << "Cannot close file " << par.db3 << "\n"; + return EXIT_FAILURE; + } return EXIT_SUCCESS; } diff --git a/lib/mmseqs/src/util/CMakeLists.txt b/lib/mmseqs/src/util/CMakeLists.txt index 3b0fcb3..c7351e6 100644 --- a/lib/mmseqs/src/util/CMakeLists.txt +++ b/lib/mmseqs/src/util/CMakeLists.txt @@ -66,6 +66,7 @@ set(util_source_files util/translateaa.cpp util/tsv2db.cpp util/tar2db.cpp + util/unpackdb.cpp util/proteinaln2nucl.cpp util/versionstring.cpp util/diskspaceavail.cpp diff --git a/lib/mmseqs/src/util/alignall.cpp b/lib/mmseqs/src/util/alignall.cpp index 6a9f424..816fa36 100644 --- a/lib/mmseqs/src/util/alignall.cpp +++ b/lib/mmseqs/src/util/alignall.cpp @@ -73,7 +73,7 @@ int alignall(int argc, const char **argv, const Command &command) { Sequence query(par.maxSeqLen, targetSeqType, subMat, 0, false, par.compBiasCorrection); Sequence target(par.maxSeqLen, targetSeqType, subMat, 0, false, par.compBiasCorrection); - char buffer[1024 + 32768]; + char buffer[1024 + 32768*4]; std::vector results; results.reserve(300); diff --git a/lib/mmseqs/src/util/alignbykmer.cpp b/lib/mmseqs/src/util/alignbykmer.cpp index 4081ad8..b82115b 100644 --- a/lib/mmseqs/src/util/alignbykmer.cpp +++ b/lib/mmseqs/src/util/alignbykmer.cpp @@ -185,7 +185,7 @@ int alignbykmer(int argc, const char **argv, const Command &command) { #ifdef OPENMP thread_idx = (unsigned int) omp_get_thread_num(); #endif - char buffer[1024 + 32768]; + char buffer[1024 + 32768*4]; char dbKeyBuffer[255 + 1]; #pragma omp for schedule(dynamic, 1) diff --git a/lib/mmseqs/src/util/convertca3m.cpp b/lib/mmseqs/src/util/convertca3m.cpp index 1902c71..3791c70 100644 --- a/lib/mmseqs/src/util/convertca3m.cpp +++ b/lib/mmseqs/src/util/convertca3m.cpp @@ -36,7 +36,7 @@ int convertca3m(int argc, const char **argv, const Command &command) { std::vector results; results.reserve(1000); - char buffer[1024]; + char buffer[1024 + 32768*4]; #pragma omp for schedule(dynamic, 10) for (size_t i = 0; i < reader.getSize(); ++i) { diff --git a/lib/mmseqs/src/util/expandaln.cpp b/lib/mmseqs/src/util/expandaln.cpp index 620a9a3..60759e4 100644 --- a/lib/mmseqs/src/util/expandaln.cpp +++ b/lib/mmseqs/src/util/expandaln.cpp @@ -82,8 +82,10 @@ static bool compareHitsByKeyScore(const Matcher::result_t &first, const Matcher: int expandaln(int argc, const char **argv, const Command& command, bool returnAlnRes) { Parameters &par = Parameters::getInstance(); + // default for expand2profile to filter MSA + par.filterMsa = 1; + par.pca = 0.0; par.parseParameters(argc, argv, command, true, 0, 0); - DBReader aReader(par.db1.c_str(), par.db1Index.c_str(), par.threads, DBReader::USE_INDEX | DBReader::USE_DATA); aReader.open(DBReader::NOSORT); const int aSeqDbType = aReader.getDbtype(); @@ -174,7 +176,7 @@ int expandaln(int argc, const char **argv, const Command& command, bool returnAl float *compositionBias = (float*)malloc(compBufferSize); memset(compositionBias, 0, compBufferSize); - char buffer[1024 + 32000]; + char buffer[1024 + 32768*4]; std::vector resultsBc; resultsBc.reserve(300); diff --git a/lib/mmseqs/src/util/indexdb.cpp b/lib/mmseqs/src/util/indexdb.cpp index 772a9a1..5e648e7 100644 --- a/lib/mmseqs/src/util/indexdb.cpp +++ b/lib/mmseqs/src/util/indexdb.cpp @@ -31,7 +31,8 @@ std::string findIncompatibleParameter(DBReader& index, const Param return "kmerScore"; if (meta.spacedKmer != par.spacedKmer) return "spacedKmer"; - if (par.seedScoringMatrixFile != PrefilteringIndexReader::getSubstitutionMatrixName(&index)) + if (BaseMatrix::unserializeName(par.seedScoringMatrixFile.aminoacids) != PrefilteringIndexReader::getSubstitutionMatrixName(&index)&& + BaseMatrix::unserializeName(par.seedScoringMatrixFile.nucleotides) != PrefilteringIndexReader::getSubstitutionMatrixName(&index)) return "seedScoringMatrixFile"; if (par.spacedKmerPattern != PrefilteringIndexReader::getSpacedPattern(&index)) return "spacedKmerPattern"; diff --git a/lib/mmseqs/src/util/msa2profile.cpp b/lib/mmseqs/src/util/msa2profile.cpp index bb12c63..abfc66b 100644 --- a/lib/mmseqs/src/util/msa2profile.cpp +++ b/lib/mmseqs/src/util/msa2profile.cpp @@ -195,6 +195,11 @@ int msa2profile(int argc, const char **argv, const Command &command) { } } + // allow skipping first sequence in case of consensus, etc + if (par.skipQuery == true) { + kseq_read(seq); + } + while (kseq_read(seq) >= 0) { if (seq->name.l == 0 || seq->seq.l == 0) { Debug(Debug::WARNING) << "Invalid fasta sequence " << setSize << " in entry " << queryKey << "\n"; @@ -352,7 +357,7 @@ int msa2profile(int argc, const char **argv, const Command &command) { resultWriter.close(true); qDbr.close(); - DBReader::softlinkDb(par.db1, par.db2, (DBFiles::Files)(DBFiles::LOOKUP | DBFiles::SOURCE)); + DBReader::copyDb(par.db1, par.db2, (DBFiles::Files)(DBFiles::LOOKUP | DBFiles::SOURCE)); if (sequenceReader != NULL) { sequenceReader->close(); diff --git a/lib/mmseqs/src/util/msa2result.cpp b/lib/mmseqs/src/util/msa2result.cpp index c113cd0..46c73c3 100644 --- a/lib/mmseqs/src/util/msa2result.cpp +++ b/lib/mmseqs/src/util/msa2result.cpp @@ -170,9 +170,7 @@ int msa2result(int argc, const char **argv, const Command &command) { const float matchRatio = par.matchRatio; MsaFilter filter(maxSeqLength + 1, maxSetSize, &subMat, par.gapOpen.aminoacids, par.gapExtend.aminoacids); - char buffer[2048]; - std::vector results; - results.reserve(300); + char buffer[1024 + 32768*4]; #pragma omp for schedule(dynamic, 1) for (size_t id = 0; id < msaReader.getSize(); ++id) { @@ -221,6 +219,11 @@ int msa2result(int argc, const char **argv, const Command &command) { } } + // allow skipping first sequence in case of consensus, etc + if (par.skipQuery == true) { + kseq_read(seq); + } + unsigned int startKey = setSizes[id]; while (kseq_read(seq) >= 0) { if (seq->name.l == 0 || seq->seq.l == 0) { @@ -369,6 +372,7 @@ int msa2result(int argc, const char **argv, const Command &command) { } PSSMCalculator::Profile pssmRes = calculator.computePSSMFromMSA(filteredSetSize, centerLength, (const char **) msaSequences, par.wg); + resultWriter.writeStart(thread_idx); for (size_t i = 0; i < setSize; ++i) { const char* currSeq = msaSequences[i]; unsigned int currentCol = 0; @@ -439,17 +443,11 @@ int msa2result(int argc, const char **argv, const Command &command) { // and update them and compute the score Matcher::updateResultByRescoringBacktrace(consSeqNoGaps.c_str(), currSeqNoGaps.c_str(), fastMatrix.matrix, evaluer, par.gapOpen.aminoacids, par.gapExtend.aminoacids, res); - - results.emplace_back(res); - } - resultWriter.writeStart(thread_idx); - for (size_t i = 0; i < setSize; ++i) { - unsigned int len = Matcher::resultToBuffer(buffer, results[i], true, true); + unsigned int len = Matcher::resultToBuffer(buffer, res, true, true); resultWriter.writeAdd(buffer, len, thread_idx); } resultWriter.writeEnd(queryKey, thread_idx); - results.clear(); } kseq_destroy(seq); diff --git a/lib/mmseqs/src/util/offsetalignment.cpp b/lib/mmseqs/src/util/offsetalignment.cpp index def60fa..bcc9cb2 100644 --- a/lib/mmseqs/src/util/offsetalignment.cpp +++ b/lib/mmseqs/src/util/offsetalignment.cpp @@ -356,7 +356,7 @@ int offsetalignment(int argc, const char **argv, const Command &command) { #ifdef OPENMP thread_idx = static_cast(omp_get_thread_num()); #endif - char * buffer = new char[65536]; + char buffer[1024 + 32768*4]; std::string ss; ss.reserve(1024); @@ -469,7 +469,6 @@ int offsetalignment(int argc, const char **argv, const Command &command) { tmp.clear(); } } - delete[] buffer; } Debug(Debug::INFO) << "\n"; resultWriter.close(); diff --git a/lib/mmseqs/src/util/orftocontig.cpp b/lib/mmseqs/src/util/orftocontig.cpp index 7b2e5e0..ae0314e 100644 --- a/lib/mmseqs/src/util/orftocontig.cpp +++ b/lib/mmseqs/src/util/orftocontig.cpp @@ -33,7 +33,7 @@ int orftocontig(int argn, const char **argv, const Command& command) { #ifdef OPENMP thread_idx = static_cast(omp_get_thread_num()); #endif - char orfToContigBuffer[1024]; + char orfToContigBuffer[1024 + 32768*4]; #pragma omp for schedule(dynamic, 100) for (size_t id = 0; id < orfHeadersReader.getSize(); ++id) { diff --git a/lib/mmseqs/src/util/proteinaln2nucl.cpp b/lib/mmseqs/src/util/proteinaln2nucl.cpp index ee099be..7966d86 100644 --- a/lib/mmseqs/src/util/proteinaln2nucl.cpp +++ b/lib/mmseqs/src/util/proteinaln2nucl.cpp @@ -71,7 +71,7 @@ int proteinaln2nucl(int argc, const char **argv, const Command &command) { thread_idx = static_cast(omp_get_thread_num()); #endif - char buffer[1024]; + char buffer[1024 + 32768*4]; std::string result; result.reserve(1024); diff --git a/lib/mmseqs/src/util/result2profile.cpp b/lib/mmseqs/src/util/result2profile.cpp index 12f29bd..31119c9 100644 --- a/lib/mmseqs/src/util/result2profile.cpp +++ b/lib/mmseqs/src/util/result2profile.cpp @@ -136,7 +136,7 @@ int result2profile(int argc, const char **argv, const Command &command, bool ret char dbKey[255]; const char *entry[255]; - char buffer[2048]; + char buffer[1024 + 32768*4]; std::vector alnResults; alnResults.reserve(300); diff --git a/lib/mmseqs/src/util/summarizeresult.cpp b/lib/mmseqs/src/util/summarizeresult.cpp index c500b89..8d55971 100644 --- a/lib/mmseqs/src/util/summarizeresult.cpp +++ b/lib/mmseqs/src/util/summarizeresult.cpp @@ -45,7 +45,7 @@ int summarizeresult(int argc, const char **argv, const Command &command) { thread_idx = static_cast(omp_get_thread_num()); #endif - char buffer[32768]; + char buffer[1024 + 32768*4]; std::vector covered(par.maxSeqLen + 1, false); #pragma omp for schedule(dynamic, 10) diff --git a/lib/mmseqs/src/util/swapresults.cpp b/lib/mmseqs/src/util/swapresults.cpp index 116da15..b953072 100644 --- a/lib/mmseqs/src/util/swapresults.cpp +++ b/lib/mmseqs/src/util/swapresults.cpp @@ -167,8 +167,8 @@ int doswap(Parameters& par, bool isGeneralMode) { for (size_t split = 0; split < splits.size(); split++) { unsigned int dbKeyToWrite = splits[split].first; size_t bytesToWrite = splits[split].second; - char *tmpData = new char[bytesToWrite]; - Util::checkAllocation(tmpData, "Can not allocate tmpData memory in doswap"); + char *tmpData = new(std::nothrow) char[bytesToWrite]; + Util::checkAllocation(tmpData, "Cannot allocate tmpData memory"); Debug(Debug::INFO) << "\nReading results.\n"; Debug::Progress progress(resultSize); #pragma omp parallel @@ -248,7 +248,7 @@ int doswap(Parameters& par, bool isGeneralMode) { std::vector curRes; curRes.reserve(300); - char buffer[1024+32768]; + char buffer[1024 + 32768*4]; std::string ss; ss.reserve(100000); diff --git a/lib/mmseqs/src/util/tar2db.cpp b/lib/mmseqs/src/util/tar2db.cpp index 151725e..a025088 100644 --- a/lib/mmseqs/src/util/tar2db.cpp +++ b/lib/mmseqs/src/util/tar2db.cpp @@ -40,7 +40,12 @@ int mtar_gzopen(mtar_t *tar, const char *filename) { return MTAR_EOPENFAIL; } - // Return ok +#if defined(ZLIB_VERNUM) && ZLIB_VERNUM >= 0x1240 + if (gzbuffer((gzFile)tar->stream, 1 * 1024 * 1024) != 0) { + Debug(Debug::WARNING) << "Could not set gzbuffer size, performance might be bad\n"; + } +#endif + return MTAR_ESUCCESS; } #endif @@ -71,17 +76,18 @@ int tar2db(int argc, const char **argv, const Command& command) { std::string sourceFile = dataFile + ".source"; FILE *source = FileUtil::openAndDelete(sourceFile.c_str(), "w"); - std::string lookupFile = dataFile + ".lookup"; - FILE *lookup = FileUtil::openAndDelete(lookupFile.c_str(), "w"); - DBWriter writer(dataFile.c_str(), indexFile.c_str(), par.threads, par.compressed, par.outputDbType); writer.open(); - Debug::Progress progress; - char buffer[4096]; + std::string lookupFile = dataFile + ".lookup"; + DBWriter lookupWriter(lookupFile.c_str(), (lookupFile + ".index").c_str(), par.threads, 0, Parameters::DBTYPE_OMIT_FILE); + lookupWriter.open(); + + Debug::Progress progress; size_t globalKey = 0; for (size_t i = 0; i < filenames.size(); i++) { + char buffer[4096]; size_t len = snprintf(buffer, sizeof(buffer), "%zu\t%s\n", i, FileUtil::baseName(filenames[i]).c_str()); int written = fwrite(buffer, sizeof(char), len, source); if (written != (int) len) { @@ -89,6 +95,7 @@ int tar2db(int argc, const char **argv, const Command& command) { EXIT(EXIT_FAILURE); } + int localThreads = par.threads; mtar_t tar; if (Util::endsWith(".tar.gz", filenames[i]) || Util::endsWith(".tgz", filenames[i])) { #ifdef HAVE_ZLIB @@ -96,6 +103,7 @@ int tar2db(int argc, const char **argv, const Command& command) { Debug(Debug::ERROR) << "Cannot open file " << filenames[i] << "\n"; EXIT(EXIT_FAILURE); } + localThreads = 1; #else Debug(Debug::ERROR) << "MMseqs2 was not compiled with zlib support. Cannot read compressed input.\n"; EXIT(EXIT_FAILURE); @@ -107,11 +115,12 @@ int tar2db(int argc, const char **argv, const Command& command) { } } -#pragma omp parallel shared(tar, buffer) +#pragma omp parallel shared(tar) num_threads(localThreads) { - size_t bufferSize = 10 * 1024; + char buffer[4096]; + size_t bufferSize = 1024 * 1024; char *dataBuffer = (char *) malloc(bufferSize); - size_t inflateSize = 10 * 1024; + size_t inflateSize = 1024 * 1024; char *inflateBuffer = (char *) malloc(inflateSize); mtar_header_t header; size_t currentKey = 0; @@ -143,9 +152,9 @@ int tar2db(int argc, const char **argv, const Command& command) { { if (tar.isFinished == 0 && (mtar_read_header(&tar, &header)) != MTAR_ENULLRECORD) { if (header.type == MTAR_TREG) { - progress.updateProgress(); if (include.isMatch(header.name) == false || exclude.isMatch(header.name) == true) { __sync_fetch_and_add(&(globalKey), 1); + proceed = true; writeEntry = false; } else { if (header.size > bufferSize) { @@ -157,18 +166,11 @@ int tar2db(int argc, const char **argv, const Command& command) { EXIT(EXIT_FAILURE); } proceed = true; + writeEntry = true; currentKey = __sync_fetch_and_add(&(globalKey), 1); - - size_t len = snprintf(buffer, sizeof(buffer), "%zu\t%s\t%zu\n", currentKey, - FileUtil::baseName(header.name).c_str(), i); - int written = fwrite(buffer, sizeof(char), len, lookup); - if (written != (int) len) { - Debug(Debug::ERROR) << "Cannot write to lookup file " << lookupFile << "\n"; - EXIT(EXIT_FAILURE); - } } } else { - proceed = false; + proceed = true; writeEntry = false; } } else { @@ -178,6 +180,7 @@ int tar2db(int argc, const char **argv, const Command& command) { } } if (proceed && writeEntry) { + progress.updateProgress(); if (Util::endsWith(".gz", header.name)) { #ifdef HAVE_ZLIB inflateReset(&strm); @@ -228,6 +231,8 @@ int tar2db(int argc, const char **argv, const Command& command) { } else { writer.writeData(dataBuffer, header.size, currentKey, thread_idx); } + size_t len = snprintf(buffer, sizeof(buffer), "%zu\t%s\t%zu\n", currentKey, FileUtil::baseName(header.name).c_str(), i); + lookupWriter.writeData(buffer, len, thread_idx, false, false); } } @@ -241,10 +246,8 @@ int tar2db(int argc, const char **argv, const Command& command) { mtar_close(&tar); } // filename for writer.close(); - if (fclose(lookup) != 0) { - Debug(Debug::ERROR) << "Cannot close file " << lookupFile << "\n"; - EXIT(EXIT_FAILURE); - } + lookupWriter.close(true); + FileUtil::remove(lookupWriter.getIndexFileName()); if (fclose(source) != 0) { Debug(Debug::ERROR) << "Cannot close file " << sourceFile << "\n"; EXIT(EXIT_FAILURE); diff --git a/lib/mmseqs/src/util/transitivealign.cpp b/lib/mmseqs/src/util/transitivealign.cpp index acbb45b..ea3dfd5 100644 --- a/lib/mmseqs/src/util/transitivealign.cpp +++ b/lib/mmseqs/src/util/transitivealign.cpp @@ -66,7 +66,7 @@ int transitivealign(int argc, const char **argv, const Command &command) { // Sequence query(par.maxSeqLen, targetSeqType, subMat, par.kmerSize, par.spacedKmer, par.compBiasCorrection); // Sequence target(par.maxSeqLen, targetSeqType, subMat, par.kmerSize, par.spacedKmer, par.compBiasCorrection); - char * buffer = new char[1024 + 32768*4]; + char buffer[1024 + 32768*4]; BacktraceTranslator btTranslate; std::vector results; results.reserve(300); @@ -150,7 +150,6 @@ int transitivealign(int argc, const char **argv, const Command &command) { } resultWriter.writeEnd(alnKey, thread_idx); } - delete [] buffer; } alnReader.remapData(); } @@ -231,8 +230,8 @@ int transitivealign(int argc, const char **argv, const Command &command) { for (size_t split = 0; split < splits.size(); split++) { unsigned int dbKeyToWrite = splits[split].first; size_t bytesToWrite = splits[split].second; - char *tmpData = new char[bytesToWrite]; - Util::checkAllocation(tmpData, "Could not allocate tmpData memory in doswap"); + char *tmpData = new(std::nothrow) char[bytesToWrite]; + Util::checkAllocation(tmpData, "Cannot allocate tmpData memory"); Debug(Debug::INFO) << "\nReading results.\n"; #pragma omp parallel { diff --git a/lib/mmseqs/src/util/unpackdb.cpp b/lib/mmseqs/src/util/unpackdb.cpp new file mode 100644 index 0000000..e7f54d2 --- /dev/null +++ b/lib/mmseqs/src/util/unpackdb.cpp @@ -0,0 +1,59 @@ +#include "Parameters.h" +#include "DBReader.h" +#include "DBWriter.h" +#include "Util.h" +#include "FileUtil.h" +#include "Debug.h" + +#ifdef OPENMP +#include +#endif + +int unpackdb(int argc, const char **argv, const Command& command) { + Parameters& par = Parameters::getInstance(); + par.parseParameters(argc, argv, command, true, 0, 0); + + int mode = DBReader::USE_INDEX|DBReader::USE_DATA; + if (par.unpackNameMode == Parameters::UNPACK_NAME_ACCESSION) { + mode |= DBReader::USE_LOOKUP; + } + DBReader reader(par.db1.c_str(), par.db1Index.c_str(), par.threads, mode); + reader.open(DBReader::LINEAR_ACCCESS); + + if (FileUtil::directoryExists(par.db2.c_str()) == false && FileUtil::makeDir(par.db2.c_str()) == false) { + Debug(Debug::ERROR) << "Cannot create output folder " << par.db2 << "\n"; + EXIT(EXIT_FAILURE); + } + + size_t entries = reader.getSize(); + Debug::Progress progress(entries); +#pragma omp parallel + { + unsigned int thread_idx = 0; +#ifdef OPENMP + thread_idx = static_cast(omp_get_thread_num()); +#endif + +#pragma omp for schedule(dynamic, 100) + for (size_t i = 0; i < entries; ++i) { + progress.updateProgress(); + unsigned int key = reader.getDbKey(i); + std::string name = par.db2; + if (name.back() != '/') { + name.append(1, '/'); + } + if (par.unpackNameMode == Parameters::UNPACK_NAME_ACCESSION) { + size_t lookupId = reader.getLookupIdByKey(key); + name.append(reader.getLookupEntryName(lookupId)); + } else { + name.append(SSTR(key)); + } + name.append(par.unpackSuffix); + FILE* handle = FileUtil::openAndDelete(name.c_str(), "w"); + fwrite(reader.getData(i, thread_idx), sizeof(char), reader.getEntryLen(i) - 1, handle); + fclose(handle); + } + } + reader.close(); + return EXIT_SUCCESS; +} diff --git a/lib/mmseqs/src/workflow/Databases.cpp b/lib/mmseqs/src/workflow/Databases.cpp index 3ff14a9..d8fd16e 100644 --- a/lib/mmseqs/src/workflow/Databases.cpp +++ b/lib/mmseqs/src/workflow/Databases.cpp @@ -81,6 +81,13 @@ std::vector downloads = {{ "https://ftp.ncbi.nlm.nih.gov/blast/db/FASTA", false, Parameters::DBTYPE_NUCLEOTIDES, databases_sh, databases_sh_len, { } +}, { + "GTDB", + "Genome Taxonomy Database is a phylogenetically consistent, genome-based taxonomy that provides rank-normalized classifications for ~150,000 bacterial and archaeal genomes from domain to genus.", + "Parks et al: A complete domain-to-species taxonomy for Bacteria and Archaea. Nat Biotechnol 38(9), 1079–1086 (2020)", + "https://gtdb.ecogenomic.org", + true, Parameters::DBTYPE_AMINO_ACIDS, databases_sh, databases_sh_len, + { } }, { "PDB", "The Protein Data Bank is the single worldwide archive of structural data of biological macromolecules.", @@ -116,6 +123,13 @@ std::vector downloads = {{ "https://xfam.wordpress.com/2020/06/30/a-new-pfam-b-is-released", false, Parameters::DBTYPE_HMM_PROFILE, databases_sh, databases_sh_len, { } +}, { + "CDD", + "Conserved Domain Database is a protein annotation resource consisting of well-annotated MSAs for ancient domains and full-length proteins.", + "Lu et al: CDD/SPARCLE: the conserved domain database in 2020. Nucleic Acids Res 48(D1), D265–D268 (2020)", + "https://www.ncbi.nlm.nih.gov/Structure/cdd/cdd.shtml", + false, Parameters::DBTYPE_HMM_PROFILE, databases_sh, databases_sh_len, + { } }, { "eggNOG", "eggNOG is a hierarchical, functionally and phylogenetically annotated orthology resource", @@ -204,7 +218,9 @@ std::string listDatabases(const Command &command, bool detailed) { description.append(1, '\t'); appendPadded(description, (downloads[i].hasTaxonomy ? "yes" : "-"), 8, PAD_RIGHT); description.append(1, '\t'); - appendPadded(description, downloads[i].url, urlWidth); + // last field in line should not be padded + //appendPadded(description, downloads[i].url, urlWidth); + description.append(downloads[i].url); description.append(1, '\n'); if (detailed) { if (strlen(downloads[i].description) > 0) { @@ -217,6 +233,7 @@ std::string listDatabases(const Command &command, bool detailed) { description.append(downloads[i].citation); description.append(1, '\n'); } + description.append(1, '\n'); } } diff --git a/lib/mmseqs/src/workflow/EasyRbh.cpp b/lib/mmseqs/src/workflow/EasyRbh.cpp index 30d55b9..c78da25 100644 --- a/lib/mmseqs/src/workflow/EasyRbh.cpp +++ b/lib/mmseqs/src/workflow/EasyRbh.cpp @@ -79,8 +79,13 @@ int easyrbh(int argc, const char **argv, const Command &command) { std::string target = par.filenames.back().c_str(); cmd.addVariable("TARGET", target.c_str()); par.filenames.pop_back(); - if(needTaxonomy || needTaxonomyMapping){ - Parameters::checkIfTaxDbIsComplete(target); + + if (needTaxonomy || needTaxonomyMapping) { + std::vector missingFiles = Parameters::findMissingTaxDbFiles(target); + if (missingFiles.empty() == false) { + Parameters::printTaxDbError(target, missingFiles); + EXIT(EXIT_FAILURE); + } } cmd.addVariable("QUERY", par.filenames.back().c_str()); diff --git a/lib/mmseqs/src/workflow/EasySearch.cpp b/lib/mmseqs/src/workflow/EasySearch.cpp index c78f980..4a90bbd 100644 --- a/lib/mmseqs/src/workflow/EasySearch.cpp +++ b/lib/mmseqs/src/workflow/EasySearch.cpp @@ -16,7 +16,6 @@ void setEasySearchDefaults(Parameters *p, bool linsearch) { p->removeTmpFiles = true; p->writeLookup = false; p->alignmentMode = Parameters::ALIGNMENT_MODE_SCORE_COV_SEQID; - p->orfFilter = 0; } void setEasySearchMustPassAlong(Parameters *p, bool linsearch) { @@ -26,7 +25,6 @@ void setEasySearchMustPassAlong(Parameters *p, bool linsearch) { p->PARAM_S.wasSet = true; p->PARAM_REMOVE_TMP_FILES.wasSet = true; p->PARAM_ALIGNMENT_MODE.wasSet = true; - p->PARAM_ORF_FILTER.wasSet = true; } int doeasysearch(int argc, const char **argv, const Command &command, bool linsearch) { @@ -102,8 +100,12 @@ int doeasysearch(int argc, const char **argv, const Command &command, bool linse cmd.addVariable("TARGET", target.c_str()); par.filenames.pop_back(); - if(needTaxonomy || needTaxonomyMapping){ - Parameters::checkIfTaxDbIsComplete(target); + if (needTaxonomy || needTaxonomyMapping) { + std::vector missingFiles = Parameters::findMissingTaxDbFiles(target); + if (missingFiles.empty() == false) { + Parameters::printTaxDbError(target, missingFiles); + EXIT(EXIT_FAILURE); + } } if (linsearch) { diff --git a/lib/mmseqs/src/workflow/EasyTaxonomy.cpp b/lib/mmseqs/src/workflow/EasyTaxonomy.cpp index 18bfa95..b5563d5 100644 --- a/lib/mmseqs/src/workflow/EasyTaxonomy.cpp +++ b/lib/mmseqs/src/workflow/EasyTaxonomy.cpp @@ -6,55 +6,24 @@ #include "easytaxonomy.sh.h" void setEasyTaxonomyDefaults(Parameters *p) { - p->spacedKmer = true; p->removeTmpFiles = true; - p->alignmentMode = Parameters::ALIGNMENT_MODE_SCORE_COV; p->createdbMode = Parameters::SEQUENCE_SPLIT_MODE_SOFT; p->writeLookup = false; - p->sensitivity = 5.7; - p->evalThr = 1; - p->orfStartMode = 1; - p->orfMinLength = 30; - p->orfMaxLength = 32734; - p->orfFilter = 0; } void setEasyTaxonomyMustPassAlong(Parameters *p) { - p->PARAM_SPACED_KMER_MODE.wasSet = true; p->PARAM_REMOVE_TMP_FILES.wasSet = true; - p->PARAM_ALIGNMENT_MODE.wasSet = true; - p->PARAM_S.wasSet = true; - p->PARAM_E.wasSet = true; - p->PARAM_ORF_START_MODE.wasSet = true; - p->PARAM_ORF_MIN_LENGTH.wasSet = true; - p->PARAM_ORF_MAX_LENGTH.wasSet = true; - p->PARAM_ORF_FILTER.wasSet = true; + p->PARAM_CREATEDB_MODE.wasSet = true; + p->PARAM_WRITE_LOOKUP.wasSet = true; } int easytaxonomy(int argc, const char **argv, const Command& command) { Parameters& par = Parameters::getInstance(); - par.PARAM_ADD_BACKTRACE.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_MAX_REJECTED.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_DB_OUTPUT.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_OVERLAP.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_DB_OUTPUT.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_RESCORE_MODE.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_NUM_ITERATIONS.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_PICK_ID_FROM.addCategory(MMseqsParameter::COMMAND_EXPERT); for (size_t i = 0; i < par.createdb.size(); i++){ par.createdb[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); } - for (size_t i = 0; i < par.extractorfs.size(); i++){ - par.extractorfs[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); - } - for (size_t i = 0; i < par.translatenucs.size(); i++){ - par.translatenucs[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); - } - for (size_t i = 0; i < par.splitsequence.size(); i++) { - par.splitsequence[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); - } - for (size_t i = 0; i < par.result2profile.size(); i++){ - par.result2profile[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); + for (size_t i = 0; i < par.searchworkflow.size(); i++){ + par.searchworkflow[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); } for (size_t i = 0; i < par.convertalignments.size(); i++){ par.convertalignments[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); @@ -62,6 +31,8 @@ int easytaxonomy(int argc, const char **argv, const Command& command) { for (size_t i = 0; i < par.createtsv.size(); i++){ par.createtsv[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); } + par.PARAM_S.removeCategory(MMseqsParameter::COMMAND_EXPERT); + par.PARAM_E.removeCategory(MMseqsParameter::COMMAND_EXPERT); par.PARAM_COMPRESSED.removeCategory(MMseqsParameter::COMMAND_EXPERT); par.PARAM_THREADS.removeCategory(MMseqsParameter::COMMAND_EXPERT); par.PARAM_V.removeCategory(MMseqsParameter::COMMAND_EXPERT); @@ -88,17 +59,19 @@ int easytaxonomy(int argc, const char **argv, const Command& command) { cmd.addVariable("RUNNER", par.runner.c_str()); cmd.addVariable("VERBOSITY", par.createParameterString(par.onlyverbosity).c_str()); - par.taxonomyOutpuMode = Parameters::TAXONOMY_OUTPUT_ALIGNMENT; + par.taxonomyOutputMode = Parameters::TAXONOMY_OUTPUT_BOTH; par.PARAM_TAX_OUTPUT_MODE.wasSet = true; cmd.addVariable("TAXONOMY_PAR", par.createParameterString(par.taxonomy, true).c_str()); cmd.addVariable("CREATEDB_QUERY_PAR", par.createParameterString(par.createdb).c_str()); cmd.addVariable("LCA_PAR", par.createParameterString(par.lca).c_str()); cmd.addVariable("CONVERT_PAR", par.createParameterString(par.convertalignments).c_str()); - cmd.addVariable("THREADS_COMP_PAR", par.createParameterString(par.threadsandcompression).c_str()); - cmd.addVariable("THREADS_PAR", par.createParameterString(par.onlythreads).c_str()); + cmd.addVariable("TAXONOMYREPORT_PAR", par.createParameterString(par.taxonomyreport).c_str()); cmd.addVariable("CREATETSV_PAR", par.createParameterString(par.createtsv).c_str()); - par.evalThr = 100000000; + par.evalThr = FLT_MAX; cmd.addVariable("SWAPRESULT_PAR", par.createParameterString(par.swapresult).c_str()); + par.pickIdFrom = 1; + cmd.addVariable("ADDTAXONOMY_PAR", par.createParameterString(par.addtaxonomy).c_str()); + cmd.addVariable("THREADS_COMP_PAR", par.createParameterString(par.threadsandcompression).c_str()); FileUtil::writeFile(tmpDir + "/easy-taxonomy.sh", easytaxonomy_sh, easytaxonomy_sh_len); std::string program(tmpDir + "/easy-taxonomy.sh"); cmd.execProgram(program.c_str(), par.filenames); diff --git a/lib/mmseqs/src/workflow/Enrich.cpp b/lib/mmseqs/src/workflow/Enrich.cpp index 4efbdee..c308b74 100644 --- a/lib/mmseqs/src/workflow/Enrich.cpp +++ b/lib/mmseqs/src/workflow/Enrich.cpp @@ -36,9 +36,9 @@ int enrich(int argc, const char **argv, const Command &command) { int originalNumIterations = par.numIterations; par.numIterations = 1; - par.sliceSearch = true; + par.exhaustiveSearch = true; cmd.addVariable("PROF_SEARCH_PAR", par.createParameterString(par.searchworkflow).c_str()); - par.sliceSearch = false; + par.exhaustiveSearch = false; par.numIterations = originalNumIterations; diff --git a/lib/mmseqs/src/workflow/Search.cpp b/lib/mmseqs/src/workflow/Search.cpp index 14a2e5f..04ceacd 100644 --- a/lib/mmseqs/src/workflow/Search.cpp +++ b/lib/mmseqs/src/workflow/Search.cpp @@ -22,7 +22,6 @@ void setSearchDefaults(Parameters *p) { p->alignmentMode = Parameters::ALIGNMENT_MODE_SCORE_COV; p->sensitivity = 5.7; p->evalThr = 0.001; - //p->orfLongest = true; p->orfStartMode = 1; p->orfMinLength = 30; p->orfMaxLength = 32734; @@ -248,7 +247,7 @@ int search(int argc, const char **argv, const Command& command) { } // FIXME: use larger default k-mer size in target-profile case if memory is available // overwrite default kmerSize for target-profile searches and parse parameters again - if (par.sliceSearch == false && (searchMode & Parameters::SEARCH_MODE_FLAG_TARGET_PROFILE) && par.PARAM_K.wasSet == false) { + if (par.exhaustiveSearch == false && (searchMode & Parameters::SEARCH_MODE_FLAG_TARGET_PROFILE) && par.PARAM_K.wasSet == false) { par.kmerSize = 5; } @@ -315,7 +314,7 @@ int search(int argc, const char **argv, const Command& command) { cmd.addVariable("RUNNER", par.runner.c_str()); // cmd.addVariable("ALIGNMENT_DB_EXT", Parameters::isEqualDbtype(targetDbType, Parameters::DBTYPE_PROFILE_STATE_SEQ) ? ".255" : ""); par.filenames[1] = targetDB; - if (par.sliceSearch == true) { + if (par.exhaustiveSearch == true) { // By default (0), diskSpaceLimit (in bytes) will be set in the workflow to use as much as possible cmd.addVariable("AVAIL_DISK", SSTR(static_cast(par.diskSpaceLimit)).c_str()); @@ -327,20 +326,27 @@ int search(int argc, const char **argv, const Command& command) { int originalCovMode = par.covMode; par.covMode = Util::swapCoverageMode(par.covMode); size_t maxResListLen = par.maxResListLen; - par.maxResListLen = INT_MAX; + par.maxResListLen = std::max((size_t)300, queryDbSize); cmd.addVariable("PREFILTER_PAR", par.createParameterString(par.prefilter).c_str()); par.maxResListLen = maxResListLen; double originalEvalThr = par.evalThr; par.evalThr = std::numeric_limits::max(); - cmd.addVariable("SWAP_PAR", par.createParameterString(par.swapresult).c_str()); + cmd.addVariable("SWAPRES_PAR", par.createParameterString(par.swapresult).c_str()); par.evalThr = originalEvalThr; + cmd.addVariable("FILTER_PAR", par.createParameterString(par.filterresult).c_str()); + if(par.exhaustiveFilterMsa == 1){ + cmd.addVariable("FILTER_RESULT", "1"); + } if (isUngappedMode) { par.rescoreMode = Parameters::RESCORE_MODE_ALIGNMENT; cmd.addVariable("ALIGNMENT_PAR", par.createParameterString(par.rescorediagonal).c_str()); par.rescoreMode = originalRescoreMode; } else { cmd.addVariable("ALIGNMENT_PAR", par.createParameterString(par.align).c_str()); + par.alignmentMode = Parameters::ALIGNMENT_MODE_CLUSTER; + cmd.addVariable("ALIGNMENT_IT_PAR", par.createParameterString(par.align).c_str()); } + cmd.addVariable("SORTRESULT_PAR", par.createParameterString(par.sortresult).c_str()); par.covMode = originalCovMode; @@ -453,7 +459,6 @@ int search(int argc, const char **argv, const Command& command) { FileUtil::writeFile(tmpDir + "/translated_search.sh", translated_search_sh, translated_search_sh_len); cmd.addVariable("QUERY_NUCL", (searchMode & Parameters::SEARCH_MODE_FLAG_QUERY_TRANSLATED) ? "TRUE" : NULL); cmd.addVariable("TARGET_NUCL", (searchMode & Parameters::SEARCH_MODE_FLAG_TARGET_TRANSLATED) ? "TRUE" : NULL); - cmd.addVariable("ORF_FILTER", par.orfFilter ? "TRUE" : NULL); cmd.addVariable("THREAD_COMP_PAR", par.createParameterString(par.threadsandcompression).c_str()); par.subDbMode = 1; cmd.addVariable("CREATESUBDB_PAR", par.createParameterString(par.createsubdb).c_str()); diff --git a/lib/mmseqs/src/workflow/Taxonomy.cpp b/lib/mmseqs/src/workflow/Taxonomy.cpp index 2ad8067..4dbd34c 100644 --- a/lib/mmseqs/src/workflow/Taxonomy.cpp +++ b/lib/mmseqs/src/workflow/Taxonomy.cpp @@ -20,7 +20,6 @@ void setTaxonomyDefaults(Parameters *p) { p->orfStartMode = 1; p->orfMinLength = 30; p->orfMaxLength = 32734; - p->showTaxLineage = 0; p->orfFilter = true; } void setTaxonomyMustPassAlong(Parameters *p) { @@ -33,36 +32,16 @@ void setTaxonomyMustPassAlong(Parameters *p) { p->PARAM_ORF_START_MODE.wasSet = true; p->PARAM_ORF_MIN_LENGTH.wasSet = true; p->PARAM_ORF_MAX_LENGTH.wasSet = true; - p->PARAM_TAXON_ADD_LINEAGE.wasSet = true; - } int taxonomy(int argc, const char **argv, const Command& command) { Parameters& par = Parameters::getInstance(); - par.PARAM_ADD_BACKTRACE.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_MAX_REJECTED.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_DB_OUTPUT.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_OVERLAP.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_DB_OUTPUT.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_RESCORE_MODE.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_NUM_ITERATIONS.addCategory(MMseqsParameter::COMMAND_EXPERT); - par.PARAM_PICK_ID_FROM.addCategory(MMseqsParameter::COMMAND_EXPERT); - for (size_t i = 0; i < par.createdb.size(); i++) { - par.createdb[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); - } - for (size_t i = 0; i < par.extractorfs.size(); i++) { - par.extractorfs[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); - } - for (size_t i = 0; i < par.translatenucs.size(); i++) { - par.translatenucs[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); - } - for (size_t i = 0; i < par.splitsequence.size(); i++) { - par.splitsequence[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); - } - for (size_t i = 0; i < par.result2profile.size(); i++) { - par.result2profile[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); + for (size_t i = 0; i < par.searchworkflow.size(); i++) { + par.searchworkflow[i]->addCategory(MMseqsParameter::COMMAND_EXPERT); } + par.PARAM_S.removeCategory(MMseqsParameter::COMMAND_EXPERT); + par.PARAM_E.removeCategory(MMseqsParameter::COMMAND_EXPERT); par.PARAM_COMPRESSED.removeCategory(MMseqsParameter::COMMAND_EXPERT); par.PARAM_THREADS.removeCategory(MMseqsParameter::COMMAND_EXPERT); par.PARAM_V.removeCategory(MMseqsParameter::COMMAND_EXPERT); @@ -72,8 +51,8 @@ int taxonomy(int argc, const char **argv, const Command& command) { setTaxonomyMustPassAlong(&par); if (par.taxonomySearchMode == Parameters::TAXONOMY_2BLCA) { - Debug(Debug::WARNING) << "2bLCA was replaced by Accelerated 2bLCA\n"; - par.taxonomySearchMode = Parameters::TAXONOMY_ACCEL_2BLCA; + Debug(Debug::WARNING) << "2bLCA was replaced by approximate 2bLCA\n"; + par.taxonomySearchMode = Parameters::TAXONOMY_APPROX_2BLCA; } std::string indexStr = PrefilteringIndexReader::searchForIndex(par.db2); @@ -96,8 +75,8 @@ int taxonomy(int argc, const char **argv, const Command& command) { int searchMode = computeSearchMode(queryDbType, targetDbType, targetSrcDbType, par.searchType); if ((searchMode & Parameters::SEARCH_MODE_FLAG_QUERY_NUCLEOTIDE) && (searchMode & Parameters::SEARCH_MODE_FLAG_TARGET_NUCLEOTIDE)) { - if (par.taxonomySearchMode == Parameters::TAXONOMY_ACCEL_2BLCA) { - Debug(Debug::WARNING) << "Accel. 2bLCA cannot be used with nucl-nucl taxonomy, using top-hit instead"; + if (par.taxonomySearchMode == Parameters::TAXONOMY_APPROX_2BLCA) { + Debug(Debug::WARNING) << "Accel. 2bLCA cannot be used with nucl-nucl taxonomy, using top-hit instead\n"; par.taxonomySearchMode = Parameters::TAXONOMY_TOP_HIT; } } @@ -126,10 +105,12 @@ int taxonomy(int argc, const char **argv, const Command& command) { // never show lineage for the orfs par.showTaxLineage = 0; par.PARAM_TAXON_ADD_LINEAGE.wasSet = true; - par.taxonomyOutpuMode = 2; + int taxonomyOutputMode = par.taxonomyOutputMode; + par.taxonomyOutputMode = Parameters::TAXONOMY_OUTPUT_BOTH; par.PARAM_TAX_OUTPUT_MODE.wasSet = true; cmd.addVariable("TAXONOMY_PAR", par.createParameterString(par.taxonomy, true).c_str()); par.showTaxLineage = showTaxLineageOrig; + par.taxonomyOutputMode = taxonomyOutputMode; cmd.addVariable("AGGREGATETAX_PAR", par.createParameterString(par.aggregatetax).c_str()); cmd.addVariable("SWAPDB_PAR", par.createParameterString(par.swapdb).c_str()); @@ -152,25 +133,25 @@ int taxonomy(int argc, const char **argv, const Command& command) { } else { if (par.taxonomySearchMode == Parameters::TAXONOMY_TOP_HIT) { cmd.addVariable("TOPHIT_MODE", "1"); - } else if (par.taxonomySearchMode == Parameters::TAXONOMY_ACCEL_2BLCA) { + } else if (par.taxonomySearchMode == Parameters::TAXONOMY_APPROX_2BLCA) { par.lcaSearch = true; par.PARAM_LCA_SEARCH.wasSet = true; cmd.addVariable("TOPHIT_MODE", NULL); } cmd.addVariable("SEARCH_PAR", par.createParameterString(par.searchworkflow, true).c_str()); - if (par.taxonomyOutpuMode == Parameters::TAXONOMY_OUTPUT_LCA) { - cmd.addVariable("TAX_OUTPUT", "0"); - cmd.addVariable("LCA_PAR", par.createParameterString(par.lca).c_str()); - } else if (par.taxonomyOutpuMode == Parameters::TAXONOMY_OUTPUT_BOTH) { - cmd.addVariable("TAX_OUTPUT", "2"); - cmd.addVariable("LCA_PAR", par.createParameterString(par.lca).c_str()); - } else { - cmd.addVariable("TAX_OUTPUT", "1"); - } program = tmpDir + "/taxonomy.sh"; FileUtil::writeFile(program.c_str(), taxonomy_sh, taxonomy_sh_len); } + if (par.taxonomyOutputMode == Parameters::TAXONOMY_OUTPUT_LCA) { + cmd.addVariable("TAX_OUTPUT", "0"); + cmd.addVariable("LCA_PAR", par.createParameterString(par.lca).c_str()); + } else if (par.taxonomyOutputMode == Parameters::TAXONOMY_OUTPUT_BOTH) { + cmd.addVariable("TAX_OUTPUT", "2"); + cmd.addVariable("LCA_PAR", par.createParameterString(par.lca).c_str()); + } else { + cmd.addVariable("TAX_OUTPUT", "1"); + } cmd.execProgram(program.c_str(), par.filenames); return EXIT_SUCCESS; diff --git a/lib/mmseqs/util/regression b/lib/mmseqs/util/regression index 53779d6..2b20d2b 160000 --- a/lib/mmseqs/util/regression +++ b/lib/mmseqs/util/regression @@ -1 +1 @@ -Subproject commit 53779d6c042e05cf18c728e7c8b3299b53aea50e +Subproject commit 2b20d2ba3533b6fd5343b78398b4df4d1c2e8f87