diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8796624d8..b11855114 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,13 +24,15 @@ on: branches: - "*" + workflow_dispatch: + jobs: cleanup_stale_workflows: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: Clone the anvill repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Python dependencies run: | @@ -51,13 +53,12 @@ jobs: fail-fast: false matrix: image: - - { name: "ubuntu", tag: "20.04" } - binja: - - { channel: "headless", version: "3.1.3479" } - llvm: ["14", "15"] - cxxcommon_version: ["v0.2.16"] + - { name: "ubuntu", tag: "22.04" } + llvm: ["17"] + cxxcommon_version: ["v0.6.0"] - runs-on: ubuntu-20.04 + runs-on: + labels: gha-ubuntu-32 container: image: docker.pkg.github.com/lifting-bits/cxx-common/vcpkg-builder-${{ matrix.image.name }}:${{ matrix.image.tag }} @@ -83,14 +84,14 @@ jobs: ${rel_ccache_path} \ ${rel_workspace_path} - echo ::set-output name=SOURCE::$(pwd)/${rel_source_path} - echo ::set-output name=REL_SOURCE::${rel_source_path} - echo ::set-output name=BUILD::$(pwd)/${rel_build_path} - echo ::set-output name=REL_BUILD::${rel_build_path} - echo ::set-output name=INSTALL::$(pwd)/${rel_install_path} - echo ::set-output name=DOWNLOADS::$(pwd)/${rel_downloads_path} - echo ::set-output name=CCACHE::$(pwd)/${rel_ccache_path} - echo ::set-output name=WORKSPACE::$(pwd)/${rel_workspace_path} + echo "SOURCE=$(pwd)/${rel_source_path}" >> ${GITHUB_OUTPUT} + echo "REL_SOURCE=${rel_source_path}" >> ${GITHUB_OUTPUT} + echo "BUILD=$(pwd)/${rel_build_path}" >> ${GITHUB_OUTPUT} + echo "REL_BUILD=${rel_build_path}" >> ${GITHUB_OUTPUT} + echo "INSTALL=$(pwd)/${rel_install_path}" >> ${GITHUB_OUTPUT} + echo "DOWNLOADS=$(pwd)/${rel_downloads_path}" >> ${GITHUB_OUTPUT} + echo "CCACHE=$(pwd)/${rel_ccache_path}" >> ${GITHUB_OUTPUT} + echo "WORKSPACE=$(pwd)/${rel_workspace_path}" >> ${GITHUB_OUTPUT} - name: Update the cache (downloads) uses: actions/cache@v3 @@ -141,26 +142,24 @@ jobs: git config --global user.name "github-actions[bot]" - name: Install stable rust - shell: bash - run: | - apt-get install -y cargo + uses: dtolnay/rust-toolchain@stable - name: "Install Just" shell: bash run: | cargo install just - - uses: actions/setup-java@v2 + - uses: actions/setup-java@v3 with: distribution: "temurin" - java-version: "11" + java-version: "17" - name: Clone Ghidra Spec Generation - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: path: ${{ steps.build_paths.outputs.REL_SOURCE }}/irene3 repository: "trailofbits/irene3" fetch-depth: 0 - submodules: false + submodules: true ssh-key: "${{ secrets.IRENE3_DEPLOY }}" - name: Add cargo bin to path @@ -169,10 +168,10 @@ jobs: echo ~/.cargo/bin >>$GITHUB_PATH - name: "Setup Ghidra" + working-directory: ${{ steps.build_paths.outputs.REL_SOURCE }}/irene3 run: | - wget https://github.com/NationalSecurityAgency/ghidra/releases/download/Ghidra_10.1.5_build/ghidra_10.1.5_PUBLIC_20220726.zip --output-document=ghidra.zip - unzip ghidra.zip - echo "GHIDRA_INSTALL_DIR=$(pwd)/ghidra_10.1.5_PUBLIC" >> $GITHUB_ENV + just install-ghidra + echo "GHIDRA_INSTALL_DIR=$(pwd)/deps/ghidra" >> $GITHUB_ENV - name: Install Ghidra Spec Generation shell: bash @@ -210,13 +209,13 @@ jobs: echo "${destination_path}/installed/x64-linux-rel/tools/llvm-${{matrix.llvm}}" >> $GITHUB_PATH - echo ::set-output name=PATH::${destination_path} + echo "PATH=${destination_path}" >> ${GITHUB_OUTPUT} - name: Select the build job count shell: bash id: build_job_count run: | - echo ::set-output name=VALUE::$(($(nproc) + 1)) + echo "VALUE=$(($(nproc) + 1))" >> ${GITHUB_OUTPUT} - name: Configure remill working-directory: ${{ steps.build_paths.outputs.BUILD }} @@ -257,7 +256,7 @@ jobs: cmake --build remill_build \ --target install - echo ::set-output name=PATH::${DESTDIR} + echo "PATH=${DESTDIR}" >> ${GITHUB_OUTPUT} - name: Build, configure and install remill (Presets) working-directory: ${{ steps.build_paths.outputs.SOURCE }}/anvill/remill @@ -287,6 +286,7 @@ jobs: -Dsleigh_DIR:PATH=${{ steps.remill_installer.outputs.PATH }}/usr/local/lib/cmake/sleigh \ -DANVILL_ENABLE_TESTS=true \ -DANVILL_ENABLE_INSTALL=true \ + -DANVILL_INSTALL_PYTHON3_LIBS=false \ -G Ninja \ ${{ steps.build_paths.outputs.SOURCE }}/anvill @@ -313,7 +313,7 @@ jobs: cmake --build anvill_build \ --target install - echo ::set-output name=PATH::${DESTDIR} + echo "PATH=${DESTDIR}" >> ${GITHUB_OUTPUT} - name: Build, configure and install anvill (Presets) working-directory: ${{ steps.build_paths.outputs.SOURCE }}/anvill @@ -327,7 +327,6 @@ jobs: scripts/build-preset.sh debug - name: Run the tests env: - BINJA_DECODE_KEY: ${{ secrets.BINJA_DECODE_KEY }} CTEST_OUTPUT_ON_FAILURE: 1 shell: bash @@ -355,48 +354,52 @@ jobs: shell: bash working-directory: ${{ steps.build_paths.outputs.BUILD }} run: | - echo ::set-output name=DEB_PACKAGE_PATH::${{ steps.build_paths.outputs.REL_BUILD }}/$(ls *.deb) - echo ::set-output name=RPM_PACKAGE_PATH::${{ steps.build_paths.outputs.REL_BUILD }}/$(ls *.rpm) - echo ::set-output name=TGZ_PACKAGE_PATH::${{ steps.build_paths.outputs.REL_BUILD }}/$(ls *.tar.gz) + echo "DEB_PACKAGE_PATH=${{ steps.build_paths.outputs.REL_BUILD }}/$(ls *.deb)" >> ${GITHUB_OUTPUT} + echo "RPM_PACKAGE_PATH=${{ steps.build_paths.outputs.REL_BUILD }}/$(ls *.rpm)" >> ${GITHUB_OUTPUT} + echo "TGZ_PACKAGE_PATH=${{ steps.build_paths.outputs.REL_BUILD }}/$(ls *.tar.gz)" >> ${GITHUB_OUTPUT} - name: Install the DEB package run: | dpkg -i ${{ steps.package_names.outputs.DEB_PACKAGE_PATH }} - - name: Run Integration Tests (AMP Challenge Binaries) + - name: Run Integration Tests (AnghaBench 50) shell: bash working-directory: ${{ steps.build_paths.outputs.REL_SOURCE }}/anvill run: | python3 -m pip install -r libraries/lifting-tools-ci/requirements.txt - scripts/test-amp-challenge-bins.sh \ - --ghidra-install-dir $GHIDRA_INSTALL_DIR \ - --decompile-cmd "anvill-decompile-spec" - env: - TOB_AMP_PASSPHRASE: ${{secrets.TOB_AMP_PASSPHRASE}} + scripts/test-angha-50.sh \ + --ghidra-install-dir $GHIDRA_INSTALL_DIR \ + --decompile-cmd "anvill-decompile-spec" \ + --jobs 8 + + - name: Tar and Compress logs + if: failure() + run: | + shopt -s globstar + tar -cf test-errs.tar.xz ${{ steps.build_paths.outputs.REL_SOURCE }}/anvill/angha-test-50/**/std* + shell: bash - # - name: Run Integration Tests (AnghaBench 1K) - # shell: bash - # working-directory: ${{ steps.build_paths.outputs.REL_SOURCE }}/anvill - # run: | - # python3 -m pip install -r libraries/lifting-tools-ci/requirements.txt - # scripts/test-angha-1k.sh \ - # --python-cmd "python3 -m anvill" \ - # --decompile-cmd "anvill-decompile-json" + - name: Upload stderr/stdout logs on error + if: failure() + uses: actions/upload-artifact@v3 + with: + name: AnghaBench 50 logs + path: test-errs.tar.xz - name: Store the DEB package - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: name: ${{ matrix.image.name }}-${{ matrix.image.tag }}_llvm${{ matrix.llvm }}_deb_package path: ${{ steps.package_names.outputs.DEB_PACKAGE_PATH }} - name: Store the RPM package - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: name: ${{ matrix.image.name }}-${{ matrix.image.tag }}_llvm${{ matrix.llvm }}_rpm_package path: ${{ steps.package_names.outputs.RPM_PACKAGE_PATH }} - name: Store the TGZ package - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: name: ${{ matrix.image.name }}-${{ matrix.image.tag }}_llvm${{ matrix.llvm }}_tgz_package path: ${{ steps.package_names.outputs.TGZ_PACKAGE_PATH }} @@ -410,11 +413,11 @@ jobs: strategy: fail-fast: false matrix: - os: ["macos-11"] - llvm: ["14", "15"] - cxxcommon_version: ["v0.2.16"] + os: ["macos-13"] + llvm: ["17"] + cxxcommon_version: ["v0.6.0"] - runs-on: ${{ matrix.os }} + runs-on: macos-13 steps: - name: Setup the build paths @@ -434,17 +437,17 @@ jobs: ${rel_ccache_path} \ ${rel_workspace_path} - echo ::set-output name=SOURCE::$(pwd)/${rel_source_path} - echo ::set-output name=REL_SOURCE::${rel_source_path} - echo ::set-output name=BUILD::$(pwd)/${rel_build_path} - echo ::set-output name=REL_BUILD::${rel_build_path} - echo ::set-output name=INSTALL::$(pwd)/${rel_install_path} - echo ::set-output name=DOWNLOADS::$(pwd)/${rel_downloads_path} - echo ::set-output name=CCACHE::$(pwd)/${rel_ccache_path} - echo ::set-output name=WORKSPACE::$(pwd)/${rel_workspace_path} + echo "SOURCE=$(pwd)/${rel_source_path}" >> ${GITHUB_OUTPUT} + echo "REL_SOURCE=${rel_source_path}" >> ${GITHUB_OUTPUT} + echo "BUILD=$(pwd)/${rel_build_path}" >> ${GITHUB_OUTPUT} + echo "REL_BUILD=${rel_build_path}" >> ${GITHUB_OUTPUT} + echo "INSTALL=$(pwd)/${rel_install_path}" >> ${GITHUB_OUTPUT} + echo "DOWNLOADS=$(pwd)/${rel_downloads_path}" >> ${GITHUB_OUTPUT} + echo "CCACHE=$(pwd)/${rel_ccache_path}" >> ${GITHUB_OUTPUT} + echo "WORKSPACE=$(pwd)/${rel_workspace_path}" >> ${GITHUB_OUTPUT} - name: Update the cache (downloads) - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ${{ steps.build_paths.outputs.DOWNLOADS }} @@ -455,7 +458,7 @@ jobs: gitmodules_${{ matrix.os }}_${{ matrix.llvm }}_${{ matrix.cxxcommon_version }} - name: Update the cache (ccache) - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ${{ steps.build_paths.outputs.CCACHE }} @@ -472,7 +475,7 @@ jobs: ninja - name: Clone the anvill repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: path: ${{ steps.build_paths.outputs.REL_SOURCE }}/anvill fetch-depth: 0 @@ -489,7 +492,7 @@ jobs: id: cxxcommon_installer working-directory: ${{ steps.build_paths.outputs.DOWNLOADS }} run: | - folder_name="vcpkg_${{ matrix.os }}_llvm-${{ matrix.llvm }}_xcode-13.0_amd64" + folder_name="vcpkg_${{ matrix.os }}_llvm-${{ matrix.llvm }}_xcode-15.0_amd64" archive_name="${folder_name}.tar.xz" url="https://github.com/lifting-bits/cxx-common/releases/download/${{ matrix.cxxcommon_version}}/${archive_name}" @@ -514,13 +517,13 @@ jobs: echo "${destination_path}/installed/x64-osx-rel/bin" >> $GITHUB_PATH - echo ::set-output name=PATH::${destination_path} + echo "PATH=${destination_path}" >> ${GITHUB_OUTPUT} - name: Select the build job count shell: bash id: build_job_count run: | - echo ::set-output name=VALUE::$(($(sysctl -n hw.logicalcpu) + 1)) + echo "VALUE=$(($(sysctl -n hw.logicalcpu) + 1))" >> ${GITHUB_OUTPUT} - name: Configure remill working-directory: ${{ steps.build_paths.outputs.BUILD }} @@ -561,7 +564,7 @@ jobs: cmake --build remill_build \ --target install - echo ::set-output name=PATH::${DESTDIR} + echo "PATH=${DESTDIR}" >> ${GITHUB_OUTPUT} - name: Build, configure and install remill (Presets) working-directory: ${{ steps.build_paths.outputs.SOURCE }}/anvill/remill @@ -617,7 +620,7 @@ jobs: cmake --build anvill_build \ --target install - echo ::set-output name=PATH::${DESTDIR} + echo "PATH=${DESTDIR}" >> ${GITHUB_OUTPUT} - name: Build, configure and install anvill (Presets) working-directory: ${{ steps.build_paths.outputs.SOURCE }}/anvill @@ -657,23 +660,35 @@ jobs: shell: bash working-directory: ${{ steps.build_paths.outputs.BUILD }} run: | - echo ::set-output name=TGZ_PACKAGE_PATH::${{ steps.build_paths.outputs.REL_BUILD }}/$(ls *.tar.gz) + echo "TGZ_PACKAGE_PATH=${{ steps.build_paths.outputs.REL_BUILD }}/$(ls *.tar.gz)" >> ${GITHUB_OUTPUT} - name: Store the TGZ package - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: name: ${{ matrix.os }}_llvm${{ matrix.llvm }}_tgz_package path: ${{ steps.package_names.outputs.TGZ_PACKAGE_PATH }} + passes_ci: + needs: build_linux + runs-on: ubuntu-22.04 + if: always() + steps: + - name: Successful linux build + if: ${{ contains(needs.*.result, 'success') }} + run: exit 0 + - name: Failing linux build + if: ${{ !(contains(needs.*.result, 'success')) }} + run: exit 1 + release_packages: # Do not run the release procedure if any of the builds has failed needs: [build_linux, build_macos] - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') steps: - name: Clone the anvill repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: path: anvill fetch-depth: 0 @@ -685,7 +700,7 @@ jobs: ./scripts/generate_changelog.sh changelog.md - name: Download all artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 - name: Draft the new release id: create_release @@ -703,13 +718,13 @@ jobs: - name: Group the packages by platform run: | - zip -r9 anvill_ubuntu-20.04_packages.zip \ - ubuntu-20.04* + zip -r9 anvill_ubuntu-22.04_packages.zip \ + ubuntu-22.04* - zip -r9 anvill_macos-11_packages.zip \ - macos-11* + zip -r9 anvill_macos-12_packages.zip \ + macos-12* - - name: Upload the Ubuntu 20.04 packages + - name: Upload the Ubuntu 22.04 packages uses: actions/upload-release-asset@v1 env: @@ -717,8 +732,8 @@ jobs: with: upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: anvill_ubuntu-20.04_packages.zip - asset_name: anvill_ubuntu-20.04_packages.zip + asset_path: anvill_ubuntu-22.04_packages.zip + asset_name: anvill_ubuntu-22.04_packages.zip asset_content_type: application/gzip - name: Upload the macOS 11 packages @@ -729,8 +744,8 @@ jobs: with: upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: anvill_macos-11_packages.zip - asset_name: anvill_macos-11_packages.zip + asset_path: anvill_macos-12_packages.zip + asset_name: anvill_macos-12_packages.zip asset_content_type: application/gzip Docker_Linux: @@ -740,12 +755,10 @@ jobs: strategy: matrix: - llvm: ["14", "15"] - ubuntu: ["20.04"] - binja: - - { channel: "headless", version: "3.1.3479" } + llvm: ["17"] + ubuntu: ["22.04"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: true - name: Build LLVM ${{ matrix.llvm }} on ${{ matrix.ubuntu }} diff --git a/CMakeLists.txt b/CMakeLists.txt index 8f4ad622f..3f165ed66 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,6 +28,7 @@ find_package(glog CONFIG REQUIRED) find_package(Z3 CONFIG REQUIRED) find_package(doctest CONFIG REQUIRED) find_package(LLVM CONFIG REQUIRED) +include_directories(SYSTEM ${LLVM_INCLUDE_DIRS}) llvm_map_components_to_libnames(llvm_libs support core irreader bitreader bitwriter) find_package(sleigh CONFIG) @@ -56,7 +57,7 @@ if(ANVILL_ENABLE_INSTALL) endif(ANVILL_ENABLE_INSTALL) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_EXTENSIONS OFF) if(ANVILL_ENABLE_TESTS) diff --git a/Dockerfile b/Dockerfile index 7718a962b..3b6cd0041 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ -ARG LLVM_VERSION=15 +ARG LLVM_VERSION=17 ARG ARCH=amd64 -ARG UBUNTU_VERSION=20.04 -ARG CXX_COMMON_VERSION=0.2.16 +ARG UBUNTU_VERSION=22.04 +ARG CXX_COMMON_VERSION=0.6.0 ARG DISTRO_BASE=ubuntu${UBUNTU_VERSION} ARG BUILD_BASE=ubuntu:${UBUNTU_VERSION} ARG LIBRARIES=/opt/trailofbits @@ -15,7 +15,7 @@ ARG LLVM_VERSION ARG CXX_COMMON_VERSION ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ - apt-get install -qqy --no-install-recommends git libdbus-1-3 curl unzip python3 python3-pip python3.8 python3.8-venv python3-setuptools xz-utils cmake && \ + apt-get install -qqy --no-install-recommends git libdbus-1-3 curl unzip python3 python3-pip python3-setuptools xz-utils cmake && \ rm -rf /var/lib/apt/lists/* #### NOTE #### @@ -32,7 +32,7 @@ ARG CXX_COMMON_VERSION ARG LIBRARIES RUN apt-get update && \ - apt-get install -qqy xz-utils python3.8-venv make rpm && \ + apt-get install -qqy xz-utils python3 python3.10-venv make rpm && \ rm -rf /var/lib/apt/lists/* # Build dependencies @@ -59,7 +59,7 @@ ENV VIRTUAL_ENV=/opt/trailofbits/venv ENV PATH="${VIRTUAL_ENV}/bin:${PATH}" # create a virtualenv in /opt/trailofbits/venv -RUN python3.8 -m venv ${VIRTUAL_ENV} +RUN python3 -m venv ${VIRTUAL_ENV} # Needed for sourcing venv SHELL ["/bin/bash", "-c"] diff --git a/README.md b/README.md index e0091baaf..cdacbe7ca 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ plugin that is currently closed source. You can checkout the tag: [binja-final-v ## Getting Help -If you are experiencing undocumented problems with Anvill then ask for help in the `#binary-lifting` channel of the [Empire Hacking Slack](https://empireslacking.herokuapp.com/). +If you are experiencing undocumented problems with Anvill then ask for help in the `#binary-lifting` channel of the [Empire Hacking Slack](https://slack.empirehacking.nyc/). ## Supported Platforms @@ -103,10 +103,10 @@ Or you can tell CMake where to find the remill installation prefix by passing `- ### Docker image -To build via Docker run, specify the architecture, base Ubuntu image and LLVM version. For example, to build Anvill linking against LLVM 14 on Ubuntu 20.04 on AMD64 do: +To build via Docker run, specify the architecture, base Ubuntu image and LLVM version. For example, to build Anvill linking against LLVM 16 on Ubuntu 20.04 on AMD64 do: ```shell -ARCH=amd64; UBUNTU_VERSION=20.04; LLVM=14; \ +ARCH=amd64; UBUNTU_VERSION=20.04; LLVM=16; \ docker build . \ -t anvill-llvm${LLVM}-ubuntu${UBUNTU_VERSION}-${ARCH} \ -f Dockerfile \ diff --git a/bin/Decompile/Main.cpp b/bin/Decompile/Main.cpp index f089da416..b74245233 100644 --- a/bin/Decompile/Main.cpp +++ b/bin/Decompile/Main.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +43,11 @@ DEFINE_bool(add_breakpoints, false, "lifted bitcode."); DEFINE_bool(add_names, false, "Try to apply symbol names to lifted entities."); +DEFINE_bool(disable_opt, false, "Dont apply optimization passes"); +DEFINE_bool(llvm_debug, false, "Enable LLVM debug flag"); +DEFINE_bool(inline_basic_blocks, false, + "Enables inlining of basic blocks for high level output"); + DEFINE_string( default_callable_spec, "", @@ -105,9 +111,6 @@ int main(int argc, char *argv[]) { remill::GetReference(maybe_buff); llvm::LLVMContext context; -#if LLVM_VERSION_NUMBER < LLVM_VERSION(15, 0) - context.enableOpaquePointers(); -#endif llvm::Module module("lifted_code", context); auto maybe_spec = @@ -158,6 +161,7 @@ int main(int argc, char *argv[]) { anvill::SpecificationControlFlowProvider cfp(spec); anvill::SpecificationMemoryProvider mp(spec); anvill::LifterOptions options(spec.Arch().get(), module, *tp.get(), cfp, mp); + options.should_inline_basic_blocks = FLAGS_inline_basic_blocks; // options.state_struct_init_procedure = // anvill::StateStructureInitializationProcedure::kNone; @@ -241,7 +245,14 @@ int main(int argc, char *argv[]) { llvm::EnableStatistics(); } - anvill::OptimizeModule(lifter, module); + if (FLAGS_llvm_debug) { + llvm::DebugFlag = true; + } + + if (!FLAGS_disable_opt) { + anvill::OptimizeModule(lifter, module, spec.GetBlockContexts(), spec); + } + int ret = EXIT_SUCCESS; diff --git a/bin/Decompile/tests/scripts/roundtrip.py b/bin/Decompile/tests/scripts/roundtrip.py deleted file mode 100755 index d3d2c6995..000000000 --- a/bin/Decompile/tests/scripts/roundtrip.py +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/env python3 - -# -# Copyright (c) 2019-present, Trail of Bits, Inc. -# All rights reserved. -# -# This source code is licensed in accordance with the terms specified in -# the LICENSE file found in the root directory of this source tree. -# - -import unittest -import subprocess -import argparse -import tempfile -import os -import platform -import sys -import shutil - - -class RunError(Exception): - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return str(self.msg) - - -def write_command_log(cmd_description, cmd_exec, ws): - with open(os.path.join(ws, "commands.log"), "a") as cmdlog: - if cmd_description: - cmdlog.write(f"# {cmd_description}\n") - cmdlog.write(f"{cmd_exec}\n") - - -def run_cmd(cmd, timeout, description, ws): - try: - exec_cmd = f"{' '.join(cmd)}" - sys.stdout.write(f"Running: {exec_cmd}\n") - write_command_log(description, exec_cmd, ws) - p = subprocess.run( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - timeout=timeout, - universal_newlines=True, - ) - except FileNotFoundError as e: - raise RunError('Error: No such file or directory: "' + - e.filename + '"') - except PermissionError as e: - raise RunError('Error: File "' + e.filename + - '" is not an executable.') - - return p - - -def compile(self, clang, input, output, timeout, ws, options=None): - cmd = [] - cmd.append(clang) - if options is not None: - cmd.extend(options) - cmd.extend([input, "-o", output]) - p = run_cmd( - cmd, timeout, description="Original source Clang compile command", ws=ws) - - self.assertEqual(p.returncode, 0, "clang failure") - self.assertEqual( - len(p.stderr), 0, "errors or warnings during compilation: %s" % p.stderr - ) - - return p - - -def specify(self, specifier, input, output, timeout, ws): - cmd = list(specifier) if isinstance(specifier, list) else [specifier] - cmd.extend(["--bin_in", input]) - cmd.extend(["--spec_out", output]) - cmd.extend(["--entrypoint", "main"]) - - p = run_cmd(cmd, timeout, description="Spec generation command", ws=ws) - - self.assertEqual(p.returncode, 0, "specifier failure: %s" % p.stderr) - self.assertEqual( - len(p.stderr), 0, "errors or warnings during specification: %s" % p.stderr - ) - - return p - - -def decompile(self, decompiler, input, output, timeout, ws): - cmd = [decompiler] - cmd.extend(["--spec", input]) - cmd.extend(["--bc_out", output]) - cmd.extend(["-add_names"]) - p = run_cmd(cmd, timeout, description="Decompilation command", ws=ws) - - self.assertEqual(p.returncode, 0, "decompiler failure: %s" % p.stderr) - self.assertEqual( - len(p.stderr), 0, "errors or warnings during decompilation: %s" % p.stderr - ) - - return p - - -def roundtrip(self, specifier, decompiler, filename, testname, clang, timeout, workspace): - - # Python refuses to add delete=False to the TemporaryDirectory constructor - # with tempfile.TemporaryDirectory(prefix=f"{testname}_", dir=workspace) as tempdir: - tempdir = tempfile.mkdtemp(prefix=f"{testname}_", dir=workspace) - - compiled = os.path.join(tempdir, f"{testname}_compiled") - compile(self, clang, filename, compiled, timeout, tempdir) - - # capture binary run outputs - compiled_output = run_cmd( - [compiled], timeout, description="capture compilation output", ws=tempdir) - - rt_json = os.path.join(tempdir, f"{testname}_rt.json") - specify(self, specifier, compiled, rt_json, timeout, tempdir) - - rt_bc = os.path.join(tempdir, f"{testname}_rt.bc") - decompile(self, decompiler, rt_json, rt_bc, timeout, tempdir) - - rebuilt = os.path.join(tempdir, f"{testname}_rebuilt") - compile(self, clang, rt_bc, rebuilt, timeout, tempdir, ["-Wno-everything"]) - # capture outputs of binary after roundtrip - rebuilt_output = run_cmd( - [rebuilt], timeout, description="Capture binary output after roundtrip", ws=tempdir) - - # Clean up tempdir if no workspace specified - # otherwise keep it for debugging purposes - if not workspace: - shutil.rmtree(tempdir) - - self.assertEqual(compiled_output.stderr, - rebuilt_output.stderr, "Different stderr") - self.assertEqual(compiled_output.stdout, - rebuilt_output.stdout, "Different stdout") - self.assertEqual(compiled_output.returncode, - rebuilt_output.returncode, "Different return code") - - -class TestRoundtrip(unittest.TestCase): - pass - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("anvill", help="path to anvill-decompile-spec") - parser.add_argument("tests", help="path to test directory") - parser.add_argument("clang", help="path to clang") - parser.add_argument("workspace", nargs="?", default=None, - help="Where to save temporary unit test outputs") - parser.add_argument("-t", "--timeout", - help="set timeout in seconds", type=int) - - args = parser.parse_args() - - if args.workspace: - os.makedirs(args.workspace) - - def test_generator(path, test_name): - def test(self): - specifier = ["python3", "-m", "anvill"] - roundtrip(self, specifier, args.anvill, path, test_name, - args.clang, args.timeout, args.workspace) - - return test - - for item in os.scandir(args.tests): - test_name = "test_%s" % os.path.splitext(item.name)[0] - test = test_generator(item.path, test_name) - setattr(TestRoundtrip, test_name, test) - - unittest.main(argv=[sys.argv[0], "-v"]) diff --git a/ci/angha_1k_test_settings.json b/ci/angha_50_test_settings.json similarity index 100% rename from ci/angha_1k_test_settings.json rename to ci/angha_50_test_settings.json diff --git a/ci/challenge_bins_test_settings.json b/ci/challenge_bins_test_settings.json index c4313ae90..0eedb5720 100644 --- a/ci/challenge_bins_test_settings.json +++ b/ci/challenge_bins_test_settings.json @@ -1,21 +1,106 @@ { "timeout.seconds": "800", "tests.ignore": [ - "challenge-3_amd64_program_go_patched.elf/output.json", - "challenge-3_amd64_program_go.elf/output.json", - "challenge-3_x86_program_go_patched.elf/output.json", - "challenge-3_x86_program_go.elf/output.json", - "challenge-3_arm64_program_go_patched.elf/output.json", - "challenge-3_arm64_program_go.elf/output.json", - "challenge-3_armv7_program_go_patched.elf/output.json", - "challenge-3_armv7_program_go.elf/output.json", - "challenge-3_arm64_program_go_patched.elf", - "challenge-3_arm64_program_go.elf", - "challenge-3_armv7_program_go_patched.elf", - "challenge-3_armv7_program_go.elf", - "challenge-3_amd64_program_go_patched.elf", - "challenge-3_amd64_program_go.elf", - "challenge-3_x86_program_go_patched.elf", - "challenge-3_x86_program_go.elf" - ] -} \ No newline at end of file + "challenge-6-armv7-program_c.clang-10.patched-nodebug", + "challenge-7-armv7-program_c.clang-10.vuln-nodebug", + "challenge-6-amd64-program_c.clang-10.vuln-nodebug", + "challenge-6-x86-program_c.clang-10.patched-nodebug", + "challenge-7-amd64-program_c.clang-10.vuln", + "challenge-5-armv7-program_c.clang.patched", + "challenge-7-armv7-program_c.clang-10.vuln", + "challenge-8-x86-program_c-nodebug", + "ppc-linflexd_uart_mpc5744p.elf", + "challenge-7-amd64-program_c.clang-10.vuln-nodebug", + "challenge-6-x86-program_c.clang-10.vuln-nodebug", + "challenge-3-arm64-program_go_patched", + "challenge-8-amd64-program_c", + "challenge-9-amd64-program_c.clang-10.vuln", + "challenge-8-armv7-program_c-nodebug", + "challenge-9-armv7-program_c.clang-10.vuln-nodebug", + "challenge-8-x86-program_c", + "challenge-6-armv7-program_c.clang-10.vuln", + "challenge-9-amd64-program_c.clang-10.vuln-nodebug", + "challenge-5-amd64-program_c.clang.vuln", + "challenge-6-amd64-program_c.clang-10.patched", + "challenge-6-x86-program_c.clang-10.patched", + "challenge-6-amd64-program_c.clang-10.patched-nodebug", + "challenge-5-amd64-program_c.clang.vuln-nodebug", + "ppc-tsens_mpc5744p.elf", + "challenge-3-arm64-program_go", + "challenge-5-amd64-program_c.clang.patched-nodebug", + "challenge-9-armv7-program_c.clang-10.vuln", + "challenge-7-x86-program_c.clang-10.vuln-nodebug", + "challenge-5-armv7-program_c.clang.vuln", + "challenge-6-amd64-program_c.clang-10.vuln", + "challenge-5-x86-program_c.clang.vuln", + "challenge-5-amd64-program_c.clang.patched", + "ppc_vle_booke_example.elf", + "challenge-5-x86-program_c.clang.vuln-nodebug", + "challenge-7-x86-program_c.clang-10.vuln", + "challenge-6-armv7-program_c.clang-10.vuln-nodebug", + "challenge-8-amd64-program_c-nodebug", + "challenge-6-x86-program_c.clang-10.vuln", + "challenge-9-x86-program_c.clang-10.vuln-nodebug", + "challenge-5-x86-program_c.clang.patched", + "challenge-5-x86-program_c.clang.patched-nodebug", + "challenge-5-armv7-program_c.clang.patched-nodebug", + "challenge-5-armv7-program_c.clang.vuln-nodebug", + "challenge-6-armv7-program_c.clang-10.patched", + "challenge-8-armv7-program_c", + "challenge-9-x86-program_c.clang-10.vuln", + "challenge-9-arm64-program_c.clang-10.vuln-nodebug", + "challenge-9-arm64-program_c.clang-10.vuln", + "challenge-6-arm64-program_c.clang-10.patched", + "challenge-3-amd64-program_go_patched-nodebug", + "challenge-3-amd64-program_go_patched", + "challenge-3-x86-program_go", + "challenge-5-arm64-program_c.clang.vuln", + "challenge-8-arm64-program_c-nodebug", + "challenge-6-arm64-program_c.clang-10.patched-nodebug", + "challenge-7-arm64-program_c.clang-10.vuln", + "challenge-3-x86-program_go-nodebug", + "challenge-5-arm64-program_c.clang.vuln-nodebug", + "challenge-3-arm64-program_go_patched-nodebug", + "challenge-6-arm64-program_c.clang-10.vuln", + "challenge-3-amd64-program_go", + "challenge-3-amd64-program_go-nodebug", + "challenge-3-x86-program_go_patched", + "challenge-8-arm64-program_c", + "challenge-5-arm64-program_c.clang.patched", + "challenge-7-arm64-program_c.clang-10.vuln-nodebug", + "challenge-6-arm64-program_c.clang-10.vuln-nodebug", + "challenge-5-arm64-program_c.clang.patched-nodebug", + "challenge-3-x86-program_go_patched-nodebug", + "challenge-3-arm64-program_go-nodebug", + "challenge-10-ppc-program_c.elf", + "challenge-10-ppc-vle-program_c.elf", + "challenge-3-armv7-program_go", + "challenge-3-armv7-program_go-nodebug", + "challenge-3-armv7-program_go_patched", + "challenge-3-armv7-program_go_patched-nodebug" + ], + "language_id_overrides": { + "ppc-adc_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-edma_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-etimer_freq_measurement_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-etimer_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-fccu_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-flexcan_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-hello_world.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-hello_world_mpc5777c.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-hello_world_pll.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-linflexd_lin_master_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-linflexd_lin_slave_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-linflexd_uart_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-lp_stop_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-sgen_flexpwm_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-siul_registerprotection_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-spi_dma_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-spi_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-tsens_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc-xbic_dma_mpc5744p.elf": "PowerPC:BE:64:VLE-32addr", + "ppc_vle_booke_example.elf": "PowerPC:BE:64:VLE-32addr", + "challenge-10-ppc-vle-program_c.elf": "PowerPC:BE:64:VLE-32addr", + "challenge-10-ppc-program_c.elf": "PowerPC:BE:64:VLE-32addr" + } +} diff --git a/data_specifications/specification.proto b/data_specifications/specification.proto index ecd4c63a8..f468bea87 100644 --- a/data_specifications/specification.proto +++ b/data_specifications/specification.proto @@ -14,6 +14,7 @@ enum Arch { ARCH_AARCH32 = 8; ARCH_SPARC32 = 9; ARCH_SPARC64 = 10; + ARCH_PPC = 11; } enum OS { @@ -93,21 +94,15 @@ message TypeSpec { message Register { string register_name = 1; + optional uint64 subreg_sz = 2; } message Memory { optional string base_reg = 1; int64 offset = 2; + uint64 size = 3; } -message Value { - oneof inner_value { - Register reg = 1; - Memory mem = 2; - } -} - - message Variable { repeated Value values = 1; TypeSpec type = 2; @@ -158,16 +153,208 @@ message Callable { ReturnStackPointer return_stack_pointer = 10; } + + +message Constant { + uint64 value = 1; + bool is_tainted_by_pc = 2; +} + +message ValueDomain { + oneof inner { + HighSymbol symb = 1; + int64 stack_disp = 2; + Constant constant = 3; + } +} + +// These arent quite affine relations we just store +// relationships of Reg=Reg'+Offset where Reg' is the register value at entry +// to the function. +// Maybe we want to get more complicated when doing the analysis, +// but this is kinda +// what we want at the end to determine stack relationships. +// This very closely matches ghidra's SymbolicPropogator: +// https://ghidra.re/ghidra_docs/api/ghidra/program/util/SymbolicPropogator.Value.html#getRelativeRegister() +message ValueMapping { + Variable target_value = 1; + ValueDomain curr_val = 2; +} + +enum HighLoc { + HIGH_LOC_UNSPECIFIED = 0; + HIGH_LOC_PARAM = 1; + HIGH_LOC_LOCAL = 2; +} + + + +message SymbolMapping { + HighSymbol high_loc = 1; + Variable low_loc = 2; +} + +message HighSymbol { + string name = 1; + HighLoc location = 2; +} + +message Value { + oneof inner_value { + Register reg = 1; + Memory mem = 2; + } +} + +/* + block1: + u1 = param_1 + 3 + STORE u1, [sp-4] + r1 = param_1 + block2: + u1 = LOAD [sp-4] + r2 = r1 + u1 + (live r2) +*/ + +/* + Current lift: + block1(param_1, reg_state): + store param_1 + 3, localSub4 + store param_1, r1(reg_state) + +block2(reg_state): + %1 = load localSub4 + %2 = load r1(reg_state) + %3 = iadd %1, %2 + store %3, r2(reg_state) + + This is a world where high variables are exactly bound to locations +*/ + +/* + With affine equalities + So r1 and the var are still live so need to copy their state + block1(param_1, reg_state): + store param_1 + 3, localSub4 + store param_1, r1(reg_state) + + + // here tho we have the affine equalities r1 = param1 local_sub=param_1+3 + // So we could do +block2(param_1, reg_state): + %1 = param_1+3 + %2 = iadd param_1, %1 + store %2, r2(reg_state) + + +But then what hppens if the user patches block1 +to + block1(param_1, reg_state): + store param_1 + 2, localSub4 + store r2, r1(reg_state) .... we cant really support multiple block patches at once + */ + +message BlockContext { + // Affine equalities between values + // and high symbols at entry to + // the block + repeated ValueMapping symvals_at_entry = 1; + repeated ValueMapping symvals_at_exit = 2; + + repeated Parameter live_at_entries = 3; + repeated Parameter live_at_exits = 4; +} + + + +message CodeBlock { + uint64 address = 1; + string name = 2; + // Incoming block(s) by uid + repeated uint64 incoming_blocks = 3; + // Outgoing block(s) by uid + repeated uint64 outgoing_blocks = 4; + uint32 size = 5; + map context_assignments = 6; + uint64 uid = 7; +} + +message Variables { + repeated Variable vars = 1; +} + +message StackEffects { + map allocations = 1; + map frees = 2; + repeated Variable missed_allocs = 3; + repeated Variable missed_frees = 4; +} + + +message StackFrame { + // The size of the "static frame": locals + params + return_addr + uint64 frame_size = 1; + // Stack frame pointer depths are relative to the return address offset + int64 return_address_offset = 2; + // parameter size allows + // us to compute the + // stack depth of the lowest address when the stack grows down + uint64 parameter_size = 3; + + // the maximum depth the stack reaches beyond the return_addr + uint64 max_frame_depth = 4; + + int64 parameter_offset = 5; +} + +message TypeHint { + uint64 target_addr = 1; + Variable target_var = 2; +} + +message RelativeAddress { + uint64 entry_vaddr = 1; + int64 displacement = 2; +} + +message ProgramAddress { + oneof inner { + uint64 internal_address = 1; + RelativeAddress ext_address = 2; + } +} + message Function { uint64 entry_address = 1; - map context_assignments = 2; - FunctionLinkage func_linkage = 3; + uint64 entry_uid = 12; + FunctionLinkage func_linkage = 3; Callable callable = 4; + // Mapping of unique ID to codeblock + map blocks = 5; + map local_variables = 6; + + // Keys are unique IDs of code blocks, each block + // may have a corresponding context + map block_context = 7; + StackEffects stack_effects = 8; + StackFrame frame = 9; + + repeated Parameter in_scope_vars = 10; + + // an instruction can have a set of typehints that says this loc is known + // to have this type after this instruction, these will be translated into + // a low lifting of that location with spec type metadata + repeated TypeHint type_hints = 11; + ProgramAddress binary_addr = 13; } + + message GlobalVariable { TypeSpec type = 1; uint64 address = 2; + ProgramAddress binary_address = 3; } message Symbol { @@ -184,7 +371,6 @@ message MemoryRange { message JumpTarget { uint64 address = 1; - map context_assignments = 2; } message Jump { @@ -198,7 +384,8 @@ message Call { optional uint64 return_address = 2; bool is_tailcall = 3; bool stop = 4; - optional uint64 target_address = 5; + bool noreturn = 5; + optional uint64 target_address = 6; } message Return { @@ -230,4 +417,8 @@ message Specification { repeated MemoryRange memory_ranges = 7; ControlFlowOverrides overrides = 8; map type_aliases = 9; + string image_name = 10; + uint64 image_base = 11; + repeated string required_globals = 12; + map type_names = 13; } diff --git a/include/anvill/ABI.h b/include/anvill/ABI.h index ef3c4689a..ef836ff60 100644 --- a/include/anvill/ABI.h +++ b/include/anvill/ABI.h @@ -82,4 +82,18 @@ extern const std::string kAnvillDataProvenanceFunc; // `alloca`. extern const std::string kAnvillStackZero; +// The alloca for the abstract stack prior to splitting at the return address. TODO(Ian): maybe we are fine to +// use this to queue off of then just move it after the split +extern const std::string kStackMetadata; + +extern const std::string kBasicBlockUidMetadata; + + +/// Intrinsic that acts like a return instruction but leaves both the basic block and the parent function. +extern const std::string kAnvillBasicBlockReturn; + + +// Instrinsic that acts as a goto to an address +extern const std::string kAnvillGoto; + } // namespace anvill diff --git a/include/anvill/CrossReferenceFolder.h b/include/anvill/CrossReferenceFolder.h index b3f834095..d536f6c11 100644 --- a/include/anvill/CrossReferenceFolder.h +++ b/include/anvill/CrossReferenceFolder.h @@ -120,6 +120,10 @@ class CrossReferenceFolder { CrossReferenceFolder &operator=(const CrossReferenceFolder &) = default; CrossReferenceFolder &operator=(CrossReferenceFolder &&) noexcept = default; + protected: + virtual std::optional + ResolveValueCallback(llvm::Value *) const; + private: CrossReferenceFolder(void) = delete; diff --git a/include/anvill/Declarations.h b/include/anvill/Declarations.h index c650f429b..a95bc328e 100644 --- a/include/anvill/Declarations.h +++ b/include/anvill/Declarations.h @@ -8,12 +8,19 @@ #pragma once +#include +#include +#include + +#include #include +#include #include #include #include #include #include +#include #include #include "Result.h" @@ -39,8 +46,56 @@ struct Register; } // namespace remill namespace anvill { +struct Uid { + std::uint64_t value; + bool operator==(const Uid &) const = default; +}; + +} // namespace anvill + +template <> +struct std::hash { + size_t operator()(const anvill::Uid &uid) const noexcept { + return std::hash()(uid.value); + } +}; + +namespace anvill { + +struct CodeBlock { + uint64_t addr; + uint32_t size; + std::unordered_set outgoing_edges; + // The set of context assignments that occur at the entry point to this block. + // A block may have specific decoding context properties such as "TM=1" (the thumb bit is set) + // So we declare the context assignments that occur at the entry point to a block. + std::unordered_map context_assignments; + Uid uid; +}; + + class TypeDictionary; + +struct LowLoc { + const remill::Register *reg{nullptr}; + const remill::Register *mem_reg{nullptr}; + std::int64_t mem_offset{0}; + std::optional size{std::nullopt}; + + std::uint64_t Size() const; + + bool operator==(const LowLoc &loc) const = default; +}; + + +struct RelAddr { + uint64_t vaddr; + std::int64_t disp; +}; + +using MachineAddr = std::variant; + // A value, such as a parameter or a return value. Values are resident // in one of two locations: either in a register, represented by a non- // nullptr `reg` value, or in memory, at `[mem_reg + mem_offset]`. @@ -57,21 +112,24 @@ class TypeDictionary; // the caller allocate the space, and pass a pointer to that space into // the callee, and so that should be represented using a parameter. struct ValueDecl { - const remill::Register *reg{nullptr}; - const remill::Register *mem_reg{nullptr}; - std::int64_t mem_offset{0}; + std::vector ordered_locs; TypeSpec spec_type; // Type of this value. llvm::Type *type{nullptr}; + + bool operator==(const ValueDecl &) const = default; }; + // A value declaration corresponding with a named parameter. struct ParameterDecl : public ValueDecl { // Name of the parameter. std::string name; + + bool operator==(const ParameterDecl &) const = default; }; // A typed location in memory, that isn't actually code. This roughly @@ -90,11 +148,30 @@ struct VariableDecl { // Address of this global variable. std::uint64_t address{0}; + MachineAddr binary_addr{}; + // Declare this global variable in an LLVM module. llvm::GlobalVariable *DeclareInModule(const std::string &name, llvm::Module &) const; }; +struct OffsetDomain { + ValueDecl target_value; + std::int64_t stack_offset; +}; + +struct ConstantDomain { + ValueDecl target_value; + std::uint64_t value; + bool should_taint_by_pc; + + bool operator==(const ConstantDomain &) const = default; +}; + +struct SpecStackOffsets { + std::vector affine_equalities; +}; + // A declaration for a callable entity. struct CallableDecl { private: @@ -136,7 +213,7 @@ struct CallableDecl { // NOTE(pag): In the case of the AMD64 Itanium ABI, we expect the // specification to include `RDX` as an explicit return // value when the function might throw an exception. - std::vector returns; + ValueDecl returns; // Is this a noreturn function, e.g. like `abort`? bool is_noreturn{false}; @@ -152,11 +229,11 @@ struct CallableDecl { // Interpret `target` as being the function to call, and call it from within // a basic block in a lifted bitcode function. Returns the new value of the // memory pointer. - llvm::Value * - CallFromLiftedBlock(llvm::Value *target, const anvill::TypeDictionary &types, - const remill::IntrinsicTable &intrinsics, - llvm::BasicBlock *block, llvm::Value *state_ptr, - llvm::Value *mem_ptr) const; + llvm::Value *CallFromLiftedBlock(llvm::Value *target, + const anvill::TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + llvm::IRBuilder<> &, llvm::Value *state_ptr, + llvm::Value *mem_ptr) const; // Try to create a callable decl from a protobuf default callable decl // specification. Returns a string error if something went wrong. @@ -168,6 +245,158 @@ struct CallableDecl { DecodeFromPB(const remill::Arch *arch, const std::string &pb); }; + +// Basic block contexts impose an ordering on live values s.t. shared Parameters between +// live exits and entries +struct BasicBlockVariable { + ParameterDecl param; + bool live_at_entry; + bool live_at_exit; +}; + +class BasicBlockContext { + public: + size_t GetParamIndex(const ParameterDecl &decl) const; + + llvm::Value *ProvidePointerFromStruct(llvm::IRBuilder<> &ir, + llvm::StructType *sty, llvm::Value *, + const ParameterDecl &decl) const; + + llvm::Argument * + ProvidePointerFromFunctionArgs(llvm::Function *, + const ParameterDecl &decl) const; + + virtual ~BasicBlockContext() = default; + + virtual const SpecStackOffsets &GetStackOffsetsAtEntry() const = 0; + virtual const SpecStackOffsets &GetStackOffsetsAtExit() const = 0; + + virtual const std::vector &GetConstantsAtEntry() const = 0; + virtual const std::vector &GetConstantsAtExit() const = 0; + + virtual size_t GetStackSize() const = 0; + + virtual size_t GetMaxStackSize() const = 0; + + virtual size_t GetPointerDisplacement() const = 0; + + virtual uint64_t GetParentFunctionAddress() const = 0; + + virtual ValueDecl ReturnValue() const = 0; + + virtual const std::vector &GetParams() const = 0; + + // Deduplicates locations and ensures there are no overlapping decls + // A valid parameter list is a set of non overlapping a-locs with distinct names. + std::vector LiveParamsAtEntryAndExit() const; + + + std::vector LiveBBParamsAtEntry() const; + std::vector LiveBBParamsAtExit() const; + + protected: + virtual const std::vector &LiveParamsAtEntry() const = 0; + virtual const std::vector &LiveParamsAtExit() const = 0; +}; + + +/// An abstract stack is made up of components with a bytesize, these components allow us to split the stack at offsets +/// in particular this is helpful for splitting out stack space beyond the locals for things like return addresses +struct StackComponent { + size_t size; + llvm::Value *stackptr; +}; + +class AbstractStack { + private: + llvm::LLVMContext &context; + bool stack_grows_down; + std::vector stack_types; + std::vector components; + size_t total_size; + size_t pointer_displacement; + + public: + // The displacement required to make all offset accesses positive + size_t GetPointerDisplacement() const { + return pointer_displacement; + }; + + + // The pointer displacement is the size above the zero point of the stack, typically return pointer offset + parameter size + AbstractStack(llvm::LLVMContext &context, + std::vector components, bool stack_grows_down, + size_t pointer_displacement); + + std::optional + StackOffsetFromStackPointer(std::int64_t stack_off) const; + + + std::int64_t StackPointerFromStackOffset(size_t offset) const; + + + std::optional + StackPointerFromStackCompreference(llvm::Value *) const; + + static llvm::Type *StackTypeFromSize(llvm::LLVMContext &context, size_t size); + + //llvm::Type *StackType() const; + + std::optional + PointerToStackMemberFromOffset(llvm::IRBuilder<> &ir, + std::int64_t stack_off) const; +}; + +struct FunctionDecl; +class SpecBlockContext : public BasicBlockContext { + private: + const FunctionDecl &decl; + SpecStackOffsets offsets_at_entry; + SpecStackOffsets offsets_at_exit; + std::vector constants_at_entry; + std::vector constants_at_exit; + std::vector live_params_at_entry; + std::vector live_params_at_exit; + std::vector params; + + public: + SpecBlockContext(const FunctionDecl &decl, SpecStackOffsets offsets_at_entry, + SpecStackOffsets offsets_at_exit, + std::vector constants_at_entry, + std::vector constants_at_exit, + std::vector live_params_at_entry, + std::vector live_params_at_exit); + + virtual const SpecStackOffsets &GetStackOffsetsAtEntry() const override; + virtual const SpecStackOffsets &GetStackOffsetsAtExit() const override; + + virtual const std::vector & + GetConstantsAtEntry() const override; + virtual const std::vector & + GetConstantsAtExit() const override; + + virtual ValueDecl ReturnValue() const override; + + virtual uint64_t GetParentFunctionAddress() const override; + + virtual size_t GetStackSize() const override; + + virtual size_t GetMaxStackSize() const override; + + virtual size_t GetPointerDisplacement() const override; + + virtual const std::vector &GetParams() const override; + + protected: + virtual const std::vector &LiveParamsAtEntry() const override; + virtual const std::vector &LiveParamsAtExit() const override; +}; + + +struct TypeHint { + uint64_t target_addr; + ValueDecl hint; +}; // A function decl, as represented at a "near ABI" level. To be specific, // not all C, and most C++ decls, as written would be directly translatable // to this. This ought nearly represent how LLVM represents a C/C++ function @@ -183,9 +412,13 @@ struct CallableDecl { // Thumb code in an Arm program, or x86 code in a bootloader that // brings up amd64 code, etc.). struct FunctionDecl : public CallableDecl { + friend class SpecBlockContext; + public: // Address of this function in memory. std::uint64_t address{0}; + // Entry block UID + Uid entry_uid{0}; // The maximum number of bytes of redzone afforded to this function // (if it doesn't change the stack pointer, or, for example, writes @@ -195,10 +428,40 @@ struct FunctionDecl : public CallableDecl { bool lift_as_decl{false}; bool is_extern{false}; - // The set of context assignments that occur at the entry point to this function. - // A called function may have specific decoding context properties such as "TM=1" (the thumb bit is set) - // So we declare the context assignments that occur at the entry point to a function. - std::unordered_map context_assignments; + // These are the blocks contained within the function representing the CFG. + std::unordered_map cfg; + + std::unordered_map locals; + + std::unordered_map stack_offsets_at_entry; + + std::unordered_map stack_offsets_at_exit; + + std::unordered_map> live_regs_at_entry; + + std::unordered_map> live_regs_at_exit; + + std::unordered_map> constant_values_at_entry; + + std::unordered_map> constant_values_at_exit; + + // sorted vector of hints + std::vector type_hints; + + std::uint64_t stack_depth; + + std::uint64_t maximum_depth; + + std::int64_t ret_ptr_offset{0}; + + std::int64_t parameter_offset{0}; + + std::size_t parameter_size{0}; + + MachineAddr binary_addr{}; + + + std::vector in_scope_variables; // Declare this function in an LLVM module. llvm::Function *DeclareInModule(std::string_view name, llvm::Module &) const; @@ -210,9 +473,15 @@ struct FunctionDecl : public CallableDecl { return Create(func, arch.get()); } + size_t GetPointerDisplacement() const; + // Create a function declaration from an LLVM function. static Result Create(llvm::Function &func, const remill::Arch *arch); + + SpecBlockContext GetBlockContext(Uid uid) const; + + void AddBBContexts(std::unordered_map &contexts) const; }; // A call site decl, as represented at a "near ABI" level. This is like a diff --git a/include/anvill/Lifters.h b/include/anvill/Lifters.h index 2f021cda0..6107def57 100644 --- a/include/anvill/Lifters.h +++ b/include/anvill/Lifters.h @@ -8,6 +8,8 @@ #pragma once +#include + #include #include #include @@ -148,6 +150,15 @@ class StackFrameRecoveryOptions { bool stack_pointer_is_negative{false}; }; +using ProgramCounterInitProcedure = + std::function; + +using StackPointerInitProcedure = std::function; + +using ReturnAddressInitProcedure = std::function; + // Options that direct the behavior of the code and data lifters. class LifterOptions { public: @@ -155,31 +166,37 @@ class LifterOptions { // // (ptrtoint __anvill_sp) // - static llvm::Value *SymbolicStackPointerInit( + static llvm::Value *SymbolicStackPointerInitWithOffset( llvm::IRBuilderBase &ir, const remill::Register *sp_reg, - uint64_t func_address); + uint64_t func_address, std::int64_t offset); + + static llvm::Value *SymbolicStackPointerInit(llvm::IRBuilderBase &ir, + const remill::Register *sp_reg, + uint64_t func_address); // Initialize the program counter with a constant expression of the form: // // (add (ptrtoint __anvill_pc), ) // - static llvm::Value *SymbolicProgramCounterInit( - llvm::IRBuilderBase &ir, const remill::Register *pc_reg, - uint64_t func_address); + static llvm::Value *SymbolicProgramCounterInit(llvm::IRBuilderBase &ir, + llvm::Type *address_type, + uint64_t func_address); // Initialize the return address with a constant expression of the form: // // (ptrtoint __anvill_ra) // - static llvm::Value *SymbolicReturnAddressInit( - llvm::IRBuilderBase &ir, llvm::IntegerType *type, uint64_t func_address); + static llvm::Value *SymbolicReturnAddressInit(llvm::IRBuilderBase &ir, + llvm::IntegerType *type, + uint64_t func_address); // Initialize the return address with the result of: // // call llvm.returnaddress(0) // - static llvm::Value *ConcreteReturnAddressInit( - llvm::IRBuilderBase &ir, llvm::IntegerType *type, uint64_t func_address); + static llvm::Value *ConcreteReturnAddressInit(llvm::IRBuilderBase &ir, + llvm::IntegerType *type, + uint64_t func_address); inline explicit LifterOptions( @@ -200,7 +217,9 @@ class LifterOptions { add_breakpoints(false), track_provenance(false), //TODO(ian): This should be initialized by an OS + arch pair - stack_pointer_is_signed(false), should_remove_anvill_pc(true) { + stack_pointer_is_signed(false), + should_remove_anvill_pc(true), + should_inline_basic_blocks(false) { CheckModuleContextMatchesArch(); } @@ -241,23 +260,17 @@ class LifterOptions { // (add (ptrtoint __anvill_pc)
) // // Otherwise, a concrete integer is used, i.e. `
`. - std::function - program_counter_init_procedure; + ProgramCounterInitProcedure program_counter_init_procedure; // Procedure for producing an initial value of the stack pointer on entry // to a function. An `IRBuilderBase` is provided for building values within // the entry block of the function at the given address. - std::function - stack_pointer_init_procedure; + StackPointerInitProcedure stack_pointer_init_procedure; // Procedure for producing an initial value of the return address on entry // to a function. An `IRBuilderBase` is provided for building values within // the entry block of the function at the given address. - std::function - return_address_init_procedure; + ReturnAddressInitProcedure return_address_init_procedure; StackFrameRecoveryOptions stack_frame_recovery_options; @@ -276,7 +289,9 @@ class LifterOptions { // Should we treat the stack pointer as signed when simplifying sign flags. bool stack_pointer_is_signed : 1; - bool should_remove_anvill_pc: 1; + bool should_remove_anvill_pc : 1; + + bool should_inline_basic_blocks : 1; private: LifterOptions(void) = delete; @@ -350,7 +365,8 @@ class ValueLifter { // Interpret `data` as the backing bytes to initialize an `llvm::Constant` // of type `type_of_data`. `loc_ea`, if non-null, is the address at which // `data` appears. - llvm::Constant *Lift(std::string_view data, llvm::Type *type_of_data) const; + llvm::Constant *Lift(llvm::ArrayRef data, + llvm::Type *type_of_data) const; // Interpret `ea` as being a pointer to a value of type `value_type` in the // address space `address_space`. @@ -358,7 +374,7 @@ class ValueLifter { // Returns an `llvm::Constant *` if the pointer is associated with a // known or plausible entity, and an `nullptr` otherwise. llvm::Constant *Lift(std::uint64_t ea, llvm::Type *value_type, - unsigned address_space=0u) const; + unsigned address_space = 0u) const; private: std::shared_ptr impl; diff --git a/include/anvill/Optimize.h b/include/anvill/Optimize.h index 77327888d..0dda52ac8 100644 --- a/include/anvill/Optimize.h +++ b/include/anvill/Optimize.h @@ -8,6 +8,10 @@ #pragma once +#include + +#include "anvill/Passes/BasicBlockPass.h" + namespace llvm { class Module; } // namespace llvm @@ -21,7 +25,8 @@ class EntityLifter; // Optimize a module. This can be a module with semantics code, lifted // code, etc. -void OptimizeModule(const EntityLifter &lifter_context, - llvm::Module &module); +void OptimizeModule(const EntityLifter &lifter_context, llvm::Module &module, + const BasicBlockContexts &contexts, + const anvill::Specification &spec); } // namespace anvill diff --git a/include/anvill/Passes/BasicBlockPass.h b/include/anvill/Passes/BasicBlockPass.h new file mode 100644 index 000000000..e3f5aeee0 --- /dev/null +++ b/include/anvill/Passes/BasicBlockPass.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include "anvill/Declarations.h" + +namespace anvill { + + +class BasicBlockContexts { + public: + virtual std::optional> + GetBasicBlockContextForUid(Uid uid) const = 0; + virtual const FunctionDecl &GetFunctionAtAddress(uint64_t addr) const = 0; +}; + +template +class BasicBlockPass : public llvm::PassInfoMixin> { + private: + const BasicBlockContexts &contexts; + + + public: + static llvm::StringRef name(void) { + return T::name(); + } + + llvm::PreservedAnalyses run(llvm::Function &F, + llvm::FunctionAnalysisManager &AM) { + auto &bb_pass = *static_cast(this); + auto bbuid = anvill::GetBasicBlockUid(&F); + if (bbuid.has_value()) { + auto maybe_bb_cont = contexts.GetBasicBlockContextForUid(*bbuid); + if (maybe_bb_cont) { + const BasicBlockContext &bb_cont = *maybe_bb_cont; + auto &parent_func = + contexts.GetFunctionAtAddress(bb_cont.GetParentFunctionAddress()); + return bb_pass.runOnBasicBlockFunction(F, AM, bb_cont, parent_func); + } + } + + return llvm::PreservedAnalyses::all(); + } + + protected: + BasicBlockPass(const BasicBlockContexts &contexts) : contexts(contexts) {} +}; +} // namespace anvill \ No newline at end of file diff --git a/include/anvill/Passes/CodeQualityStatCollector.h b/include/anvill/Passes/CodeQualityStatCollector.h index 16c082864..75ed4ef9a 100644 --- a/include/anvill/Passes/CodeQualityStatCollector.h +++ b/include/anvill/Passes/CodeQualityStatCollector.h @@ -7,9 +7,9 @@ namespace anvill { class CodeQualityStatCollector : public llvm::PassInfoMixin { public: - llvm::PreservedAnalyses run(llvm::Function &function, - llvm::FunctionAnalysisManager &analysisManager); + llvm::PreservedAnalyses run(llvm::Module &module, + llvm::ModuleAnalysisManager &analysisManager); static llvm::StringRef name(void); }; -} // namespace anvill \ No newline at end of file +} // namespace anvill diff --git a/include/anvill/Passes/ConvertAddressesToEntityUses.h b/include/anvill/Passes/ConvertAddressesToEntityUses.h index 885ab8776..6e794c3b6 100644 --- a/include/anvill/Passes/ConvertAddressesToEntityUses.h +++ b/include/anvill/Passes/ConvertAddressesToEntityUses.h @@ -10,6 +10,7 @@ #include #include + #include #include @@ -44,7 +45,6 @@ using EntityUsages = std::vector; class ConvertAddressesToEntityUses final : public llvm::PassInfoMixin { private: - // Resolve addresses to entities and vice versa. const CrossReferenceResolver &xref_resolver; @@ -52,7 +52,6 @@ class ConvertAddressesToEntityUses final const std::optional pc_metadata_id; public: - // Function pass entry point llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); @@ -60,13 +59,15 @@ class ConvertAddressesToEntityUses final // Returns the pass name static llvm::StringRef name(void); + bool IsPointerLike(llvm::Use &use); + // Enumerates some of the possible entity usages that are isolated to // specific instruction operand uses. EntityUsages EnumeratePossibleEntityUsages(llvm::Function &function); ConvertAddressesToEntityUses( const CrossReferenceResolver &xref_resolver_, - std::optional pc_metadata_id_=std::nullopt); + std::optional pc_metadata_id_ = std::nullopt); }; } // namespace anvill diff --git a/include/anvill/Passes/ConvertPointerArithmeticToGEP.h b/include/anvill/Passes/ConvertPointerArithmeticToGEP.h new file mode 100644 index 000000000..01b143f2b --- /dev/null +++ b/include/anvill/Passes/ConvertPointerArithmeticToGEP.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2022-present, Trail of Bits, Inc. + * All rights reserved. + * + * This source code is licensed in accordance with the terms specified in + * the LICENSE file found in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "BasicBlockPass.h" + +namespace anvill { + +class ConvertPointerArithmeticToGEP final + : public BasicBlockPass { + private: + struct Impl; + std::unique_ptr impl; + + public: + using StructMap = std::unordered_map; + using TypeMap = std::unordered_map; + using MDMap = std::unordered_map; + + llvm::PreservedAnalyses + runOnBasicBlockFunction(llvm::Function &F, llvm::FunctionAnalysisManager &AM, + const anvill::BasicBlockContext &, + const FunctionDecl &); + + // Returns the pass name + static llvm::StringRef name(void); + + ConvertPointerArithmeticToGEP(const BasicBlockContexts &contexts, + TypeMap &types, StructMap &structs, MDMap &md); + ConvertPointerArithmeticToGEP(const ConvertPointerArithmeticToGEP &); + ~ConvertPointerArithmeticToGEP(); +}; + +} // namespace anvill diff --git a/include/anvill/Passes/InlineBasicBlockFunctions.h b/include/anvill/Passes/InlineBasicBlockFunctions.h new file mode 100644 index 000000000..76f5bb3c1 --- /dev/null +++ b/include/anvill/Passes/InlineBasicBlockFunctions.h @@ -0,0 +1,29 @@ + +#pragma once + +#include +#include +#include + +#include "anvill/Lifters.h" + + +namespace anvill { + +// attempts to replace assignments to next pc with idiomatic control flow that terminates the block +// with the goto intrinsic +class InlineBasicBlockFunctions final + : public BasicBlockPass { + public: + InlineBasicBlockFunctions(const BasicBlockContexts &contexts) + : BasicBlockPass(contexts) {} + + static llvm::StringRef name(void); + + + llvm::PreservedAnalyses + runOnBasicBlockFunction(llvm::Function &F, llvm::FunctionAnalysisManager &AM, + const anvill::BasicBlockContext &, + const anvill::FunctionDecl &); +}; +} // namespace anvill \ No newline at end of file diff --git a/include/anvill/Passes/LowerSwitchIntrinsics.h b/include/anvill/Passes/LowerSwitchIntrinsics.h deleted file mode 100644 index a0116206a..000000000 --- a/include/anvill/Passes/LowerSwitchIntrinsics.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2019-present, Trail of Bits, Inc. - * All rights reserved. - * - * This source code is licensed in accordance with the terms specified in - * the LICENSE file found in the root directory of this source tree. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include - -// The goal here is to lower anvill_complete_switch to an llvm switch when we -// can recover the cases. This analysis must be sound but -// `anvill_complete_switch` maybe used for any complete set of indirect targets -// so cases may not even exist. -// -// The analysis has to prove to us that this transformation is semantically -// preserving. -// -// This pass focuses on lowering switch statements where a jump table does exist - -namespace anvill { - -class LowerSwitchIntrinsics - : public IndirectJumpPass, - public llvm::PassInfoMixin { - - private: - const MemoryProvider &memProv; - - public: - LowerSwitchIntrinsics(const MemoryProvider &memProv) - : memProv(memProv) {} - - static llvm::StringRef name(void); - - llvm::PreservedAnalyses runOnIndirectJump(llvm::CallInst *indirectJump, - llvm::FunctionAnalysisManager &am, - llvm::PreservedAnalyses); - - - static llvm::PreservedAnalyses BuildInitialResult(); -}; -} // namespace anvill diff --git a/include/anvill/Passes/RecoverBasicStackFrame.h b/include/anvill/Passes/RecoverBasicStackFrame.h deleted file mode 100644 index 022225609..000000000 --- a/include/anvill/Passes/RecoverBasicStackFrame.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2019-present, Trail of Bits, Inc. - * All rights reserved. - * - * This source code is licensed in accordance with the terms specified in - * the LICENSE file found in the root directory of this source tree. - */ - -#pragma once - -#include -#include - -namespace anvill { - -class StackFrameRecoveryOptions; - -// This function pass recovers stack information by analyzing the usage -// of the `__anvill_sp` symbol -class RecoverBasicStackFrame final - : public llvm::PassInfoMixin { - - // Lifting options - const StackFrameRecoveryOptions &options; - - public: - - // Function pass entry point - llvm::PreservedAnalyses run(llvm::Function &func, - llvm::FunctionAnalysisManager &fam); - - // Returns the pass name - static llvm::StringRef name(void); - - inline explicit RecoverBasicStackFrame( - const StackFrameRecoveryOptions &options_) - : options(options_) {} -}; - -} // namespace anvill diff --git a/include/anvill/Passes/RemoveAnvillReturns.h b/include/anvill/Passes/RemoveAnvillReturns.h new file mode 100644 index 000000000..dd362b6ad --- /dev/null +++ b/include/anvill/Passes/RemoveAnvillReturns.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace anvill { +class RemoveAnvillReturns final + : public llvm::PassInfoMixin { + public: + RemoveAnvillReturns(void) {} + + static llvm::StringRef name(void); + + llvm::PreservedAnalyses run(llvm::Function &F, + llvm::FunctionAnalysisManager &AM); +}; +} // namespace anvill diff --git a/include/anvill/Passes/RemoveCallIntrinsics.h b/include/anvill/Passes/RemoveCallIntrinsics.h new file mode 100644 index 000000000..2c3658b5b --- /dev/null +++ b/include/anvill/Passes/RemoveCallIntrinsics.h @@ -0,0 +1,44 @@ + +#pragma once + +#include +#include +#include + +#include "anvill/Lifters.h" +#include "anvill/Specification.h" + + +namespace anvill { +/** + * @brief Attempts to remove call intrinsics by identifying a type for the target of a remill_call and lifting the arguments + * types are either provided by a recovered entity or folding the reference to an address that has an override type. + */ +class RemoveCallIntrinsics final + : public IntrinsicPass, + public llvm::PassInfoMixin { + private: + const CrossReferenceResolver &xref_resolver; + const Specification &spec; + const EntityLifter &lifter; + + public: + RemoveCallIntrinsics(const CrossReferenceResolver &xref_resolver, + const Specification &spec, const EntityLifter &lifter) + : xref_resolver(xref_resolver), + spec(spec), + lifter(lifter) {} + + llvm::PreservedAnalyses runOnIntrinsic(llvm::CallInst *indirectJump, + llvm::FunctionAnalysisManager &am, + llvm::PreservedAnalyses); + + + static llvm::PreservedAnalyses INIT_RES; + + + static bool isTargetInstrinsic(const llvm::CallInst *callinsn); + static llvm::StringRef name(); +}; + +} // namespace anvill \ No newline at end of file diff --git a/include/anvill/Passes/RemoveRemillFunctionReturns.h b/include/anvill/Passes/RemoveRemillFunctionReturns.h deleted file mode 100644 index 1c10f20f3..000000000 --- a/include/anvill/Passes/RemoveRemillFunctionReturns.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2019-present, Trail of Bits, Inc. - * All rights reserved. - * - * This source code is licensed in accordance with the terms specified in - * the LICENSE file found in the root directory of this source tree. - */ - -#pragma once - -#include - -namespace anvill { - -class CrossReferenceFolder; -class CrossReferenceResolver; -class StackPointerResolver; - -enum ReturnAddressResult { - - // We've found a case where a value returned by `llvm.returnaddress`, or - // casted from `__anvill_ra`, reaches into the `pc` argument of the - // `__remill_function_return` intrinsic. This is the ideal case that we - // want to handle. - kFoundReturnAddress, - - // We've found a case where we're seeing a load from something derived from - // `__anvill_sp`, our "symbolic stack pointer", is reaching into the `pc` - // argument of `__remill_function_return`. This suggests that stack frame - // recovery has not happened yet, and thus we haven't really given stack - // frame recovery or stack frame splitting a chance to work. - kFoundSymbolicStackPointerLoad, - - // We've found a `load` or something else. This is probably a sign that - // stack frame recovery has happened, and that the actual return address - // is not necessarily the expected value, and so we need to try to swap - // out the return address with whatever we loaded. - kUnclassifiableReturnAddress -}; - -class RemoveRemillFunctionReturns final - : public llvm::PassInfoMixin { - private: - const CrossReferenceResolver &xref_resolver; - - public: - inline explicit RemoveRemillFunctionReturns( - const CrossReferenceResolver &xref_resolver_) - : xref_resolver(xref_resolver_) {} - - static llvm::StringRef name(void); - - llvm::PreservedAnalyses run(llvm::Function &F, - llvm::FunctionAnalysisManager &AM); - - private: - ReturnAddressResult QueryReturnAddress( - const CrossReferenceFolder &xref_folder, - const StackPointerResolver &sp_resolver, - llvm::Module *module, - llvm::Value *val) const; -}; -} // namespace anvill diff --git a/include/anvill/Passes/ReplaceStackReferences.h b/include/anvill/Passes/ReplaceStackReferences.h new file mode 100644 index 000000000..45b6588f4 --- /dev/null +++ b/include/anvill/Passes/ReplaceStackReferences.h @@ -0,0 +1,34 @@ + +#pragma once + +#include +#include +#include +#include + + +namespace anvill { +/** + * @brief Replaces references to anvill_pc +- disp with a pointer to the represented local variable. + * If variable information seperatates variables that are actually overlapping this pass may separate variables in an unsound way. + */ +class ReplaceStackReferences final + : public BasicBlockPass { + private: + const EntityLifter &lifter; + + public: + ReplaceStackReferences(const BasicBlockContexts &contexts, + const EntityLifter &lifter) + : BasicBlockPass(contexts), + lifter(lifter) {} + + static llvm::StringRef name(void); + + + llvm::PreservedAnalyses + runOnBasicBlockFunction(llvm::Function &F, llvm::FunctionAnalysisManager &AM, + const anvill::BasicBlockContext &, + const FunctionDecl &); +}; +} // namespace anvill \ No newline at end of file diff --git a/include/anvill/Passes/LowerTypeHintIntrinsics.h b/include/anvill/Passes/RewriteVectorOps.h similarity index 74% rename from include/anvill/Passes/LowerTypeHintIntrinsics.h rename to include/anvill/Passes/RewriteVectorOps.h index 27228e189..621635d57 100644 --- a/include/anvill/Passes/LowerTypeHintIntrinsics.h +++ b/include/anvill/Passes/RewriteVectorOps.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-present, Trail of Bits, Inc. + * Copyright (c) 2023-present, Trail of Bits, Inc. * All rights reserved. * * This source code is licensed in accordance with the terms specified in @@ -12,8 +12,7 @@ namespace anvill { -class LowerTypeHintIntrinsics final - : public llvm::PassInfoMixin { +class RewriteVectorOps final : public llvm::PassInfoMixin { public: static llvm::StringRef name(void); diff --git a/include/anvill/Providers.h b/include/anvill/Providers.h index 58edb809a..ceffc0b96 100644 --- a/include/anvill/Providers.h +++ b/include/anvill/Providers.h @@ -9,6 +9,7 @@ #pragma once #include +#include #include #include @@ -45,11 +46,6 @@ class TypeProvider { std::optional TryGetFunctionTypeOrDefault(uint64_t address) const; - std::optional - TryGetCalledFunctionTypeOrDefault(uint64_t function_address, - const remill::Instruction &from_inst, - uint64_t to_address) const; - std::optional TryGetVariableTypeOrDefault(uint64_t address, llvm::Type *hinted_value_type = nullptr) const; @@ -60,19 +56,6 @@ class TypeProvider { virtual std::optional TryGetFunctionType(uint64_t address) const = 0; - // Try to return the type of a function that has been called from `from_isnt`. - virtual std::optional - TryGetCalledFunctionType(uint64_t function_address, - const remill::Instruction &from_inst) const; - - // Try to return the type of a function starting at address `to_address`. This - // type is the prototype of the function. The type can be call site specific, - // where the call site is `from_inst`. - virtual std::optional - TryGetCalledFunctionType(uint64_t function_address, - const remill::Instruction &from_inst, - uint64_t to_address) const; - // Try to return the variable at given address or containing the address virtual std::optional TryGetVariableType(uint64_t address, @@ -89,6 +72,8 @@ class TypeProvider { virtual const ::anvill::TypeDictionary &Dictionary(void) const = 0; + virtual std::vector NamedTypes(void) const = 0; + virtual ~TypeProvider() = default; }; @@ -135,6 +120,9 @@ class NullTypeProvider : public BaseTypeProvider { std::optional TryGetVariableType(uint64_t, llvm::Type *hinted_value_type = nullptr) const override; + std::vector NamedTypes(void) const override { + return {}; + } }; // Delegates to an underlying tye provider to provide the data. Derived from @@ -153,19 +141,6 @@ class ProxyTypeProvider : public TypeProvider { std::optional TryGetFunctionType(uint64_t address) const override; - // Try to return the type of a function that has been called from `from_isnt`. - std::optional - TryGetCalledFunctionType(uint64_t function_address, - const remill::Instruction &from_inst) const override; - - // Try to return the type of a function starting at address `to_address`. This - // type is the prototype of the function. The type can be call site specific, - // where the call site is `from_inst`. - std::optional - TryGetCalledFunctionType(uint64_t function_address, - const remill::Instruction &from_inst, - uint64_t to_address) const override; - // Try to return the variable at given address or containing the address std::optional TryGetVariableType(uint64_t address, @@ -180,6 +155,8 @@ class ProxyTypeProvider : public TypeProvider { std::optional)> typed_reg_cb) const override; + std::vector NamedTypes(void) const override; + const ::anvill::TypeDictionary &Dictionary(void) const override; }; @@ -206,11 +183,6 @@ class DefaultCallableTypeProvider : public ProxyTypeProvider { // Set `decl` to the default callable type for `arch`. void SetDefault(remill::ArchName arch, CallableDecl decl); - // Try to return the type of a function that has been called from `from_isnt`. - std::optional - TryGetCalledFunctionType(uint64_t function_address, - const remill::Instruction &from_inst) const override; - std::optional TryGetFunctionType(uint64_t address) const override; }; @@ -219,17 +191,13 @@ class DefaultCallableTypeProvider : public ProxyTypeProvider { class SpecificationTypeProvider : public BaseTypeProvider { private: std::shared_ptr impl; + llvm::DataLayout layout; public: virtual ~SpecificationTypeProvider(void); explicit SpecificationTypeProvider(const Specification &spec); - // Try to return the type of a function that has been called from `from_isnt`. - std::optional - TryGetCalledFunctionType(uint64_t function_address, - const remill::Instruction &from_inst) const override; - // Try to return the type of a function starting at address `address`. This // type is the prototype of the function. std::optional @@ -239,6 +207,8 @@ class SpecificationTypeProvider : public BaseTypeProvider { TryGetVariableType(uint64_t address, llvm::Type *hinted_value_type = nullptr) const override; + std::vector NamedTypes(void) const override; + private: SpecificationTypeProvider(void) = delete; }; diff --git a/include/anvill/Result.h b/include/anvill/Result.h index ea7e97dc6..ba6c6eec1 100644 --- a/include/anvill/Result.h +++ b/include/anvill/Result.h @@ -18,6 +18,7 @@ #pragma once #include +#include #include namespace anvill { @@ -44,10 +45,10 @@ class Result final { const ValueType *operator->(void) const; Result(const ValueType &value); - Result(ValueType &&value): destroyed(false), data(std::move(value)) {} + Result(ValueType &&value) : destroyed(false), data(std::move(value)) {} Result(const ErrorType &error); - Result(ErrorType &&error): destroyed(false), data(std::move(error)) {} + Result(ErrorType &&error) : destroyed(false), data(std::move(error)) {} Result(Result &&other) noexcept; Result &operator=(Result &&other) noexcept; diff --git a/include/anvill/Specification.h b/include/anvill/Specification.h index a412c0afd..5e66f480d 100644 --- a/include/anvill/Specification.h +++ b/include/anvill/Specification.h @@ -8,6 +8,8 @@ #pragma once +#include + #include #include #include @@ -61,7 +63,6 @@ struct ControlFlowOverrideSpec { struct JumpTarget { std::uint64_t address; - std::unordered_map context_assignments; }; struct Jump : ControlFlowOverrideSpec { @@ -71,6 +72,7 @@ struct Jump : ControlFlowOverrideSpec { struct Call : ControlFlowOverrideSpec { std::optional return_address; bool is_tailcall; + bool is_noreturn; std::optional target_address; }; @@ -78,7 +80,8 @@ struct Return : ControlFlowOverrideSpec {}; struct Misc : ControlFlowOverrideSpec {}; -using ControlFlowOverride = std::variant; +using ControlFlowOverride = + std::variant; struct CallSiteDecl; struct FunctionDecl; @@ -86,6 +89,23 @@ struct VariableDecl; struct ParameterDecl; struct ValueDecl; + +class Specification; +class SpecBlockContexts : public BasicBlockContexts { + std::unordered_map contexts; + std::unordered_map> funcs; + + public: + SpecBlockContexts(const Specification &spec); + + virtual std::optional> + GetBasicBlockContextForUid(Uid uid) const override; + + virtual const FunctionDecl & + GetFunctionAtAddress(uint64_t addr) const override; +}; + + // Represents the data pulled out of a JSON (sub-)program specification. class Specification { private: @@ -105,6 +125,12 @@ class Specification { // Return the architecture used by this specification. std::shared_ptr Arch(void) const; + // Return the image name used by this specification. + const std::string &ImageName(void) const; + + // Return the image base address used by this specification. + std::uint64_t ImageBase(void) const; + // Return the type dictionary used by this specification. const ::anvill::TypeDictionary &TypeDictionary(void) const; @@ -121,9 +147,16 @@ class Specification { static anvill::Result DecodeFromPB(llvm::LLVMContext &context, std::istream &pb); + // Return the call site at a given function address, instruction address pair, or an empty `shared_ptr`. + std::shared_ptr + CallSiteAt(const std::pair &loc) const; + // Return the function beginning at `address`, or an empty `shared_ptr`. std::shared_ptr FunctionAt(std::uint64_t address) const; + // Return the basic block at `uid`, or an empty `shared_ptr`. + std::shared_ptr BlockAt(Uid uid) const; + // Return the global variable beginning at `address`, or an empty `shared_ptr`. std::shared_ptr VariableAt(std::uint64_t address) const; @@ -161,8 +194,7 @@ class Specification { void ForEachReturn(std::function cb) const; // Call `cb` on each miscellaneous control flow override, until `cb` returns `false`. - void ForEachMiscOverride( - std::function cb) const; + void ForEachMiscOverride(std::function cb) const; inline bool operator==(const Specification &that) const noexcept { return impl.get() == that.impl.get(); @@ -171,6 +203,12 @@ class Specification { inline bool operator!=(const Specification &that) const noexcept { return impl.get() == that.impl.get(); } + + SpecBlockContexts GetBlockContexts() const { + return SpecBlockContexts(*this); + } + + const std::unordered_set &GetRequiredGlobals() const; }; } // namespace anvill diff --git a/include/anvill/Transforms.h b/include/anvill/Transforms.h index 2758b77ae..4db6d6eb0 100644 --- a/include/anvill/Transforms.h +++ b/include/anvill/Transforms.h @@ -178,18 +178,6 @@ void AddRemoveUnusedFPClassificationCalls(llvm::FunctionPassManager &fpm); // various atomic read-modify-write variants into LLVM loads and stores. void AddLowerRemillMemoryAccessIntrinsics(llvm::FunctionPassManager &fpm); -// Type information from prior lifting efforts, or from front-end tools -// (e.g. Binary Ninja) is plumbed through the system by way of calls to -// intrinsic functions such as `__anvill_type`. These function calls -// don't interfere (too much) with optimizations, and they also survive -// optimizations. In general, the key role that they serve is to enable us to -// propagate through pointer type information at an instruction/register -// granularity. -// -// These function calls need to be removed/lowered into `inttoptr` or `bitcast` -// instructions. -void AddLowerTypeHintIntrinsics(llvm::FunctionPassManager &fpm); - // Transforms the bitcode to eliminate calls to `__remill_function_return`, // where appropriate. This will not succeed for all architectures, but is // likely to always succeed for x86(-64) and aarch64, due to their support @@ -245,7 +233,7 @@ void AddRecoverBasicStackFrame(llvm::FunctionPassManager &fpm, // for later passes to benefit from. void AddConvertAddressesToEntityUses( llvm::FunctionPassManager &fpm, const CrossReferenceResolver &resolver, - std::optional pc_annot_id=std::nullopt); + std::optional pc_annot_id = std::nullopt); // Some machine code instructions explicitly introduce undefined values / // behavior. Often, this is a result of the CPUs of different steppings of @@ -365,9 +353,6 @@ void AddBranchRecovery(llvm::FunctionPassManager &fpm); void AddRemoveFailedBranchHints(llvm::FunctionPassManager &fpm); -void AddLowerSwitchIntrinsics(llvm::FunctionPassManager &fpm, - const MemoryProvider &memprov); - // Remove constant expressions of the stack pointer that are not themselves // resolvable to references. For example, comparisons between one or two // stack pointer values. diff --git a/include/anvill/Type.h b/include/anvill/Type.h index 6622596eb..ac00c9968 100644 --- a/include/anvill/Type.h +++ b/include/anvill/Type.h @@ -8,6 +8,9 @@ #pragma once +#include +#include + #include #include #include @@ -17,6 +20,13 @@ #include "Result.h" +template +inline void hash_combine(std::size_t &seed, const T &v) { + std::hash hasher; + seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); +} + + namespace llvm { class DataLayout; class IntegerType; @@ -32,6 +42,10 @@ class Arch; } // namespace remill namespace anvill { +llvm::StructType *getOrCreateNamedStruct(llvm::LLVMContext &context, + llvm::StringRef Name); + + struct TypeSpecificationError final { enum class ErrorCode { InvalidSpecFormat, @@ -91,13 +105,32 @@ struct FunctionType; struct UnknownType { unsigned size; + + bool operator==(const UnknownType &) const = default; +}; + + +class TypeName { + public: + std::string name; + + bool operator==(const TypeName &) const = default; + + explicit TypeName(std::string name) : name(name) {} }; using TypeSpec = std::variant, std::shared_ptr, std::shared_ptr, std::shared_ptr, std::shared_ptr, - UnknownType>; + UnknownType, TypeName>; + +bool operator==(std::shared_ptr, std::shared_ptr); +bool operator==(std::shared_ptr, std::shared_ptr); +bool operator==(std::shared_ptr, std::shared_ptr); +bool operator==(std::shared_ptr, std::shared_ptr); +bool operator==(std::shared_ptr, std::shared_ptr); + struct PointerType { template @@ -106,6 +139,8 @@ struct PointerType { is_const(is_const) {} TypeSpec pointee; bool is_const; + + bool operator==(const PointerType &) const = default; }; struct VectorType { @@ -115,6 +150,8 @@ struct VectorType { size(size) {} TypeSpec base; unsigned size; + + bool operator==(const VectorType &) const = default; }; struct ArrayType { @@ -124,10 +161,14 @@ struct ArrayType { size(size) {} TypeSpec base; unsigned size; + + bool operator==(const ArrayType &) const = default; }; struct StructType { std::vector members; + + bool operator==(const StructType &) const = default; }; struct FunctionType { @@ -140,6 +181,8 @@ struct FunctionType { TypeSpec return_type; std::vector arguments; bool is_variadic; + + bool operator==(const FunctionType &) const = default; }; // Dictionary of types to be used by the type specifier. @@ -246,8 +289,130 @@ class TypeTranslator { EncodeToString(llvm::Type *type, EncodingFormat alphanum = EncodingFormat::kDefault) const; + llvm::MDNode *EncodeToMetadata(TypeSpec spec) const; + TypeSpec DecodeFromMetadata(llvm::MDNode *md) const; + Result DecodeFromSpec(TypeSpec spec) const; }; } // namespace anvill + + +namespace std { +template <> +struct hash { + size_t operator()(const anvill::TypeName &unk) const { + return std::hash()(unk.name); + } +}; + +template <> +struct hash { + size_t operator()(const anvill::UnknownType &unk) const { + return std::hash()(unk.size); + } +}; + +template <> +struct hash { + size_t operator()(const anvill::PointerType &unk) const { + std::size_t result = 0; + + hash_combine(result, unk.is_const); + hash_combine(result, unk.pointee); + + return result; + } +}; + + +template <> +struct hash> { + size_t operator()(const std::shared_ptr &unk) const { + + return std::hash()(*unk); + } +}; + +template <> +struct hash { + size_t operator()(const anvill::VectorType &unk) const { + + std::size_t result = 0; + + hash_combine(result, unk.size); + hash_combine(result, unk.base); + + return result; + } +}; +template <> +struct hash> { + size_t operator()(const std::shared_ptr &unk) const { + return std::hash()(*unk); + } +}; +template <> +struct hash { + size_t operator()(const anvill::ArrayType &unk) const { + std::size_t result = 0; + + hash_combine(result, unk.size); + hash_combine(result, unk.base); + + return result; + } +}; +template <> +struct hash { + size_t operator()(const anvill::StructType &unk) const { + std::size_t result = 0; + + + for (auto ty : unk.members) { + hash_combine(result, ty); + } + + return result; + } +}; +template <> +struct hash { + size_t operator()(const anvill::FunctionType &unk) const { + std::size_t result = 0; + + + for (auto ty : unk.arguments) { + hash_combine(result, ty); + } + + hash_combine(result, unk.is_variadic); + hash_combine(result, unk.return_type); + + return result; + } +}; + +template <> +struct hash> { + size_t operator()(const std::shared_ptr &unk) const { + return std::hash()(*unk); + } +}; + +template <> +struct hash> { + size_t operator()(const std::shared_ptr &unk) const { + return std::hash()(*unk); + } +}; + +template <> +struct hash> { + size_t operator()(const std::shared_ptr &unk) const { + return std::hash()(*unk); + } +}; + +} // namespace std \ No newline at end of file diff --git a/include/anvill/Utils.h b/include/anvill/Utils.h index 5c1eec6ec..fa18aa4c2 100644 --- a/include/anvill/Utils.h +++ b/include/anvill/Utils.h @@ -8,10 +8,20 @@ #pragma once +#include +#include +#include +#include +#include + #include #include #include +#include "anvill/Declarations.h" +#include "anvill/Lifters.h" + + namespace llvm { class BasicBlock; class Instruction; @@ -39,6 +49,14 @@ std::string CreateFunctionName(std::uint64_t addr); // Creates a `data_
` name from an address std::string CreateVariableName(std::uint64_t addr); +// Get metadata for an instruction +std::optional GetMetadata(llvm::StringRef tag, + const llvm::Instruction &instr); + +// Set metadata for an instruction +void SetMetadata(llvm::StringRef tag, llvm::Instruction &insn, + std::uint64_t pc_val); + // Looks for any constant expressions in the operands of `inst` and unfolds // them into other instructions in the same block. void UnfoldConstantExpressions(llvm::Instruction *inst); @@ -62,10 +80,12 @@ class StackPointerResolver { public: ~StackPointerResolver(void); - explicit StackPointerResolver(llvm::Module *module); + explicit StackPointerResolver( + llvm::Module *module, + llvm::ArrayRef additional_base_stack_ptrst); // Returns `true` if it looks like `val` is derived from a symbolic stack - // pointer representation. + // pointer representation, a basic block variable that is stack derived, or the abstract stack itself. bool IsRelatedToStackPointer(llvm::Value *) const; }; @@ -79,12 +99,40 @@ bool CanBeAliased(llvm::Value *val); // Produce one or more instructions in `in_block` to load and return // the lifted value associated with `decl`. -llvm::Value *LoadLiftedValue(const ValueDecl &decl, - const TypeDictionary &types, +llvm::Value *LoadLiftedValue(const ValueDecl &decl, const TypeDictionary &types, const remill::IntrinsicTable &intrinsics, + const remill::Arch *arch, llvm::BasicBlock *in_block, llvm::Value *state_ptr, llvm::Value *mem_ptr); +llvm::Value *LoadLiftedValue(const ValueDecl &decl, const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + const remill::Arch *arch, llvm::IRBuilder<> &ir, + llvm::Value *state_ptr, llvm::Value *mem_ptr); + +void CloneIntrinsicsFromModule(llvm::Module &from, llvm::Module &into); + +void StoreNativeValueToRegister(llvm::Value *native_val, + const remill::Register *reg, + const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + llvm::IRBuilder<> &ir, llvm::Value *state_ptr); + +void StoreNativeValueToRegister(llvm::Value *native_val, + const remill::Register *reg, + const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + llvm::BasicBlock *in_block, + llvm::Value *state_ptr); + + +llvm::Value *StoreNativeValue(llvm::Value *native_val, const ValueDecl &decl, + const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + llvm::IRBuilder<> &ir, llvm::Value *state_ptr, + llvm::Value *mem_ptr); + + // Produce one or more instructions in `in_block` to store the // native value `native_val` into the lifted state associated // with `decl`. @@ -94,4 +142,12 @@ llvm::Value *StoreNativeValue(llvm::Value *native_val, const ValueDecl &decl, llvm::BasicBlock *in_block, llvm::Value *state_ptr, llvm::Value *mem_ptr); +std::optional GetBasicBlockUid(llvm::Function *func); + +llvm::Argument *GetBasicBlockStackPtr(llvm::Function *func); + +bool HasMemLoc(const ValueDecl &v); + +bool HasRegLoc(const ValueDecl &v); + } // namespace anvill diff --git a/lib/ABI.cpp b/lib/ABI.cpp index ba862ee42..1df984d2e 100644 --- a/lib/ABI.cpp +++ b/lib/ABI.cpp @@ -65,21 +65,30 @@ const std::string kGlobalAliasNamePrefix("data_"); const std::string kSymbolicStackFrameValuePrefix(kAnvillNamePrefix + "stack_"); // The anvill function used to handle complete switch cases -const std::string kAnvillSwitchCompleteFunc( - kAnvillNamePrefix + "complete_switch"); +const std::string kAnvillSwitchCompleteFunc(kAnvillNamePrefix + + "complete_switch"); // The anvill function used to handle incomplete switch cases -const std::string kAnvillSwitchIncompleteFunc( - kAnvillNamePrefix + "incomplete_switch"); +const std::string kAnvillSwitchIncompleteFunc(kAnvillNamePrefix + + "incomplete_switch"); // The name of the uninterpreted function that implements data provenance // tracking. -const std::string kAnvillDataProvenanceFunc( - kAnvillNamePrefix + "data_provenance"); +const std::string kAnvillDataProvenanceFunc(kAnvillNamePrefix + + "data_provenance"); // Metadata ID for annotating stack frame `alloca` instructions, and telling // us that what the logical "zero offset" is away from the beginning of the // `alloca`. const std::string kAnvillStackZero(kAnvillNamePrefix + "stack_zero"); +const std::string kBasicBlockUidMetadata(kAnvillNamePrefix + "basic_block_uid_md"); + +const std::string kStackMetadata(kAnvillNamePrefix + "stack_alloc"); + +const std::string kAnvillBasicBlockReturn(kAnvillNamePrefix + + "basic_block_function_return"); + +const std::string kAnvillGoto(kAnvillNamePrefix + "goto"); + } // namespace anvill diff --git a/lib/Arch/AArch64_C.cpp b/lib/Arch/AArch64_C.cpp index 2778a65a9..426b5524c 100644 --- a/lib/Arch/AArch64_C.cpp +++ b/lib/Arch/AArch64_C.cpp @@ -206,7 +206,7 @@ AArch64_C::BindReturnValues(llvm::Function &function, bool &injected_sret, std::vector &ret_values) { llvm::Type *ret_type = function.getReturnType(); - LOG(INFO) << "Binding on return " << remill::LLVMThingToString(ret_type); + DLOG(INFO) << "Binding on return " << remill::LLVMThingToString(ret_type); injected_sret = false; // If there is an sret parameter then it is a special case. diff --git a/lib/Arch/Arch.cpp b/lib/Arch/Arch.cpp index 94643f819..84f2b24f5 100644 --- a/lib/Arch/Arch.cpp +++ b/lib/Arch/Arch.cpp @@ -44,7 +44,7 @@ bool RegisterConstraint::ContainsVariant(const std::string &name) const { Result CallingConvention::CreateCCFromArch(const remill::Arch *arch) { - switch (arch->arch_name) { + /*switch (arch->arch_name) { case remill::kArchInvalid: { return kInvalidArch; } @@ -105,14 +105,15 @@ CallingConvention::CreateCCFromArch(const remill::Arch *arch) { std::stringstream ss; ss << "Unsupported architecture/OS pair: " << arch_name << " and " << os_name; - return ss.str(); + return ss.str();*/ + return CreateStubABI(); } // Still need the arch to be passed in so we can create the calling convention Result -CallingConvention::CreateCCFromArchAndID( - const remill::Arch *arch, llvm::CallingConv::ID cc_id) { - switch (cc_id) { +CallingConvention::CreateCCFromArchAndID(const remill::Arch *arch, + llvm::CallingConv::ID cc_id) { + /* switch (cc_id) { case llvm::CallingConv::C: if (arch->IsX86()) { return CreateX86_C(arch); @@ -166,7 +167,8 @@ CallingConvention::CreateCCFromArchAndID( std::stringstream ss; ss << "Unsupported calling convention ID: " << static_cast(cc_id); - return ss.str(); + return ss.str();*/ + return CreateStubABI(); } Result @@ -182,7 +184,7 @@ CallingConvention::AllocateSignature(llvm::Function &func) { if (remill::IsError(maybe_decl)) { return remill::GetErrorString(maybe_decl); } else { - // Here we override the return type of the extern declaration to match how it was allocated + // Here we override the return type of the extern declaration to match how it was allocated // In the future instead of doing this we should store information about how to extract return values at the llvm // level into the abi returns. // TODO(ian): Dont dont do this. @@ -197,8 +199,7 @@ CallingConvention::AllocateSignature(llvm::Function &func) { // positional starting at 1. std::vector TryRecoverParamNames(const llvm::Function &function) { std::vector param_names; - param_names.reserve( - function.getFunctionType()->getNumParams()); + param_names.reserve(function.getFunctionType()->getNumParams()); auto i = 0u; for (auto ¶m : function.args()) { diff --git a/lib/Arch/Arch.h b/lib/Arch/Arch.h index d832e6bb8..d07996c77 100644 --- a/lib/Arch/Arch.h +++ b/lib/Arch/Arch.h @@ -8,13 +8,12 @@ #pragma once +#include #include #include #include -#include - namespace llvm { class Function; namespace CallingConv { @@ -132,11 +131,10 @@ class CallingConvention { static Result CreateCCFromArch(const remill::Arch *arch); - static Result CreateCCFromArchAndID( - const remill::Arch *arch, llvm::CallingConv::ID cc_id); + static Result + CreateCCFromArchAndID(const remill::Arch *arch, llvm::CallingConv::ID cc_id); - Result - AllocateSignature(llvm::Function &func); + Result AllocateSignature(llvm::Function &func); virtual llvm::Error AllocateSignature(FunctionDecl &fdecl, llvm::Function &func) = 0; @@ -147,7 +145,7 @@ class CallingConvention { protected: const remill::Arch *const arch; - + /* static std::unique_ptr CreateX86_C(const remill::Arch *arch); @@ -175,6 +173,11 @@ class CallingConvention { static std::unique_ptr CreateSPARC64_C(const remill::Arch *arch); + static std::unique_ptr + CreatePPC_SysV(const remill::Arch *arch);*/ + + static std::unique_ptr CreateStubABI(); + private: const llvm::CallingConv::ID identity; }; diff --git a/lib/Arch/PPC_SysV.cpp b/lib/Arch/PPC_SysV.cpp new file mode 100644 index 000000000..3e4fd511c --- /dev/null +++ b/lib/Arch/PPC_SysV.cpp @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2022-present Trail of Bits, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "AllocationState.h" +#include "Arch.h" + +namespace anvill { +namespace { + +// The PowerPC SystemV ABI documentation (which Freescale's EABI is based off) describes the +// parameter and return registers: +// https://math-atlas.sourceforge.net/devel/assembly/elfspec_ppc.pdf +// +// Despite having 64-bit GPRs, the e200 series toolchain conforms to Freescale's 32-bit PowerPC EABI +// meaning that 32-bit values should be stored in the parameter and return registers: +// https://www.nxp.com/files-static/32bit/doc/ref_manual/e200z759CRM.pdf +static const std::vector kParamRegConstraints = { + // GPRs + RegisterConstraint({VariantConstraint("R3", kTypeIntegral, kMaxBit32)}), + RegisterConstraint({VariantConstraint("R4", kTypeIntegral, kMaxBit32)}), + RegisterConstraint({VariantConstraint("R5", kTypeIntegral, kMaxBit32)}), + RegisterConstraint({VariantConstraint("R6", kTypeIntegral, kMaxBit32)}), + RegisterConstraint({VariantConstraint("R7", kTypeIntegral, kMaxBit32)}), + RegisterConstraint({VariantConstraint("R8", kTypeIntegral, kMaxBit32)}), + RegisterConstraint({VariantConstraint("R9", kTypeIntegral, kMaxBit32)}), + RegisterConstraint({VariantConstraint("R10", kTypeIntegral, kMaxBit32)}), + // FPRs + RegisterConstraint({VariantConstraint("F1", kTypeFloat, kMaxBit64)}), + RegisterConstraint({VariantConstraint("F2", kTypeFloat, kMaxBit64)}), + RegisterConstraint({VariantConstraint("F3", kTypeFloat, kMaxBit64)}), + RegisterConstraint({VariantConstraint("F4", kTypeFloat, kMaxBit64)}), + RegisterConstraint({VariantConstraint("F5", kTypeFloat, kMaxBit64)}), + RegisterConstraint({VariantConstraint("F6", kTypeFloat, kMaxBit64)}), + RegisterConstraint({VariantConstraint("F7", kTypeFloat, kMaxBit64)}), + RegisterConstraint({VariantConstraint("F8", kTypeFloat, kMaxBit64)}), +}; + +static const std::vector kReturnRegConstraints = { + // GPRs + RegisterConstraint({VariantConstraint("R3", kTypeIntegral, kMaxBit32)}), + RegisterConstraint({VariantConstraint("R4", kTypeIntegral, kMaxBit32)}), + // FPRs + RegisterConstraint({VariantConstraint("F1", kTypeFloat, kMaxBit64)}), +}; + +// Used to split things like `i64`s into multiple `i32`s. +static llvm::Type *IntegerTypeSplitter(llvm::Type *type) { + auto int_ty = llvm::dyn_cast(type); + if (!int_ty) { + return nullptr; + } + + auto width = int_ty->getPrimitiveSizeInBits(); + if (width <= 32) { + return nullptr; + } + + auto num_elements = (width + 31) / 32; + auto i32_ty = llvm::Type::getInt32Ty(type->getContext()); + return llvm::ArrayType::get(i32_ty, num_elements); +} + +} // namespace + +class PPC_SysV : public CallingConvention { + public: + explicit PPC_SysV(const remill::Arch *arch); + virtual ~PPC_SysV() = default; + + llvm::Error AllocateSignature(FunctionDecl &fdecl, + llvm::Function &func) override; + + private: + llvm::Error BindParameters(llvm::Function &function, + std::vector ¶m_decls); + + llvm::Error BindReturnValues(llvm::Function &function, + std::vector &ret_decls); + + const std::vector ¶meter_register_constraints; + const std::vector &return_register_constraints; +}; + +std::unique_ptr +CallingConvention::CreatePPC_SysV(const remill::Arch *arch) { + return std::make_unique(arch); +} + +PPC_SysV::PPC_SysV(const remill::Arch *arch) + : CallingConvention(llvm::CallingConv::C, arch), + parameter_register_constraints(kParamRegConstraints), + return_register_constraints(kReturnRegConstraints) {} + +llvm::Error PPC_SysV::AllocateSignature(FunctionDecl &fdecl, + llvm::Function &func) { + + auto err = BindReturnValues(func, fdecl.returns); + if (remill::IsError(err)) { + return err; + } + + err = BindParameters(func, fdecl.params); + if (remill::IsError(err)) { + return err; + } + + fdecl.return_stack_pointer_offset = 0; + fdecl.return_stack_pointer = arch->RegisterByName("R1"); + + fdecl.return_address.reg = arch->RegisterByName("LR"); + fdecl.return_address.type = fdecl.return_address.reg->type; + + return llvm::Error::success(); +} + +llvm::Error +PPC_SysV::BindReturnValues(llvm::Function &function, + std::vector &ret_values) { + + auto ret_type = function.getReturnType(); + DLOG(INFO) << "Binding on return " << remill::LLVMThingToString(ret_type); + + // If there is an sret parameter then it is a special case. + if (function.hasStructRetAttr()) { + auto &value_declaration = ret_values.emplace_back(); + + value_declaration.type = llvm::PointerType::get(function.getContext(), 0); + + if (!ret_type->isVoidTy()) { + return llvm::createStringError( + std::errc::invalid_argument, + "Function '%s' with sret-attributed parameter has non-void return type '%s'", + function.getName().str().c_str(), + remill::LLVMThingToString(ret_type).c_str()); + } + + // Indirect return values are passed by pointer through `R3`. + value_declaration.reg = arch->RegisterByName("R3"); + return llvm::Error::success(); + } + + switch (ret_type->getTypeID()) { + case llvm::Type::VoidTyID: return llvm::Error::success(); + + case llvm::Type::IntegerTyID: { + const auto *int_ty = llvm::dyn_cast(ret_type); + const auto int64_ty = llvm::Type::getInt64Ty(int_ty->getContext()); + const auto bit_width = int_ty->getBitWidth(); + if (bit_width <= 64) { + auto &value_declaration = ret_values.emplace_back(); + value_declaration.reg = arch->RegisterByName("R3"); + value_declaration.type = ret_type; + return llvm::Error::success(); + + // Split the integer across `R3` and `R4`. + } else if (bit_width <= 128) { + const char *ret_names[] = {"R3", "R4"}; + for (auto i = 0u; i < 2 && (64 * i) < bit_width; ++i) { + auto &value_declaration = ret_values.emplace_back(); + value_declaration.reg = arch->RegisterByName(ret_names[i]); + value_declaration.type = int64_ty; + } + return llvm::Error::success(); + + // The integer is too big to be split across registers, fall back to + // return-value optimization. + } else { + auto &value_declaration = ret_values.emplace_back(); + value_declaration.type = + llvm::PointerType::get(function.getContext(), 0); + value_declaration.reg = arch->RegisterByName("R3"); + return llvm::Error::success(); + } + } + + // Pointers always fit into `R3`. + case llvm::Type::PointerTyID: { + auto &value_declaration = ret_values.emplace_back(); + value_declaration.reg = arch->RegisterByName("R3"); + value_declaration.type = ret_type; + return llvm::Error::success(); + } + + case llvm::Type::HalfTyID: + case llvm::Type::FloatTyID: + case llvm::Type::DoubleTyID: { + auto &value_declaration = ret_values.emplace_back(); + value_declaration.reg = arch->RegisterByName("F1"); + value_declaration.type = ret_type; + return llvm::Error::success(); + } + + case llvm::Type::FP128TyID: { + + // double types gets split into two integer registers + const auto fp128_ty = llvm::Type::getDoubleTy(ret_type->getContext()); + + // get the primitive type size to split them to registers + const auto bit_width = fp128_ty->getScalarSizeInBits(); + const char *reg_names[] = {"F1", "F2"}; + for (auto i = 0u; i < 2 && (64 * i) < bit_width; ++i) { + auto &value_declaration = ret_values.emplace_back(); + value_declaration.reg = arch->RegisterByName(reg_names[i]); + value_declaration.type = fp128_ty; + } + return llvm::Error::success(); + } + + // Try to split the composite type over registers, and fall back on RVO + // if it's not possible. + case llvm::Type::FixedVectorTyID: + case llvm::Type::ArrayTyID: + case llvm::Type::StructTyID: { + AllocationState alloc_ret(return_register_constraints, arch, this); + alloc_ret.config.type_splitter = IntegerTypeSplitter; + auto mapping = alloc_ret.TryRegisterAllocate(*ret_type); + + // There is a valid split over registers, so add the mapping + if (mapping) { + return alloc_ret.CoalescePacking(mapping.getValue(), ret_values); + + // Composite type splitting; Unlike with x86, LLVM doesn't naturally + // perform RVO on large structures returned by value from bitcode. + } else { + break; + } + } + + default: break; + } + + return llvm::createStringError( + std::errc::invalid_argument, + "Could not allocate unsupported type '%s' to return register in function '%s'", + remill::LLVMThingToString(ret_type).c_str(), + function.getName().str().c_str()); +} + +llvm::Error +PPC_SysV::BindParameters(llvm::Function &function, + std::vector ¶meter_declarations) { + + const auto param_names = TryRecoverParamNames(function); + llvm::DataLayout dl(function.getParent()); + + // Used to keep track of which registers have been allocated + AllocationState alloc_param(parameter_register_constraints, arch, this); + alloc_param.config.type_splitter = IntegerTypeSplitter; + + unsigned stack_offset = 0; + const auto sp_reg = arch->RegisterByName("R1"); + + for (auto &argument : function.args()) { + const auto ¶m_name = param_names[argument.getArgNo()]; + const auto param_type = argument.getType(); + + auto allocation = alloc_param.TryRegisterAllocate(*param_type); + + // Try to allocate from a register. If a register is not available then + // allocate from the stack. + if (allocation) { + auto prev_size = parameter_declarations.size(); + + for (const auto ¶m_decl : allocation.getValue()) { + auto &declaration = parameter_declarations.emplace_back(); + declaration.type = param_decl.type; + if (param_decl.reg) { + declaration.reg = param_decl.reg; + } else { + declaration.mem_offset = param_decl.mem_offset; + declaration.mem_reg = param_decl.mem_reg; + } + } + + // The parameter fit in one register / stack slot. + if ((prev_size + 1u) == parameter_declarations.size()) { + if (!param_name.empty()) { + parameter_declarations[prev_size].name = param_name; + } + + // The parameter was spread across multiple registers. + } else if (!param_name.empty()) { + for (auto i = 0u; i < (parameter_declarations.size() - prev_size); + ++i) { + parameter_declarations[prev_size + i].name = + param_name + std::to_string(i); + } + } + + } else { + auto &declaration = parameter_declarations.emplace_back(); + declaration.type = param_type; + declaration.mem_offset = static_cast(stack_offset); + declaration.mem_reg = sp_reg; + stack_offset += dl.getTypeAllocSize(argument.getType()); + + if (!param_name.empty()) { + declaration.name = param_name; + } + } + } + + return llvm::Error::success(); +} + +} // namespace anvill diff --git a/lib/Arch/StubABI.cpp b/lib/Arch/StubABI.cpp new file mode 100644 index 000000000..92b0652c0 --- /dev/null +++ b/lib/Arch/StubABI.cpp @@ -0,0 +1,28 @@ +#include +#include +#include +#include + +#include + +#include "Arch.h" + +namespace anvill { + +class Stub : public CallingConvention { + public: + Stub() : CallingConvention(0, nullptr) {} + + llvm::Error AllocateSignature(FunctionDecl &fdecl, + llvm::Function &func) override { + return llvm::createStringError( + std::errc::invalid_argument, + "No longer supporting allocating signatures"); + } +}; + +std::unique_ptr CallingConvention::CreateStubABI() { + + return std::make_unique(); +} +} // namespace anvill \ No newline at end of file diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index 96fabdb8c..882775249 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -32,6 +32,7 @@ protobuf_generate_cpp( set(anvill_passes ConvertAddressesToEntityUses ConvertMasksToCasts + ConvertPointerArithmeticToGEP ConvertSymbolicReturnAddressToConcreteReturnAddress ConvertXorsToCmps HoistUsersOfSelectsAndPhis @@ -40,13 +41,9 @@ set(anvill_passes Constraints LowerRemillMemoryAccessIntrinsics LowerRemillUndefinedIntrinsics - LowerSwitchIntrinsics - LowerTypeHintIntrinsics - RecoverBasicStackFrame RemoveCompilerBarriers RemoveDelaySlotIntrinsics RemoveErrorIntrinsics - RemoveRemillFunctionReturns RemoveStackPointerCExprs RemoveTrivialPhisAndSelects RemoveUnusedFPClassificationCalls @@ -60,31 +57,42 @@ set(anvill_passes SpreadPCMetadata TransformRemillJumpIntrinsics CombineAdjacentShifts + ReplaceStackReferences + RemoveCallIntrinsics + InlineBasicBlockFunctions + RewriteVectorOps ) set(anvill_arch_HEADERS - "Arch/AllocationState.h" + + # "Arch/AllocationState.h" "Arch/Arch.h" ) set(anvill_arch_SOURCES - "Arch/AArch32_C.cpp" - "Arch/AArch64_C.cpp" - "Arch/AllocationState.cpp" + + # "Arch/AArch32_C.cpp" + # "Arch/AArch64_C.cpp" + # "Arch/AllocationState.cpp" "Arch/Arch.cpp" - "Arch/SPARC32_C.cpp" - "Arch/SPARC64_C.cpp" - "Arch/X86_64_SysV.cpp" - "Arch/X86_C.cpp" - "Arch/X86_FastCall.cpp" - "Arch/X86_StdCall.cpp" - "Arch/X86_ThisCall.cpp" + "Arch/StubABI.cpp" + + # "Arch/PPC_SysV.cpp" + # "Arch/SPARC32_C.cpp" + # "Arch/SPARC64_C.cpp" + # "Arch/X86_64_SysV.cpp" + # "Arch/X86_C.cpp" + # "Arch/X86_FastCall.cpp" + # "Arch/X86_StdCall.cpp" + # "Arch/X86_ThisCall.cpp" ) set(anvill_lifters_HEADERS "Lifters/DataLifter.h" "Lifters/EntityLifter.h" "Lifters/FunctionLifter.h" + "Lifters/BasicBlockLifter.h" + "Lifters/CodeLifter.h" "Lifters/ValueLifter.h" ) @@ -92,6 +100,8 @@ set(anvill_lifters_SOURCES "Lifters/DataLifter.cpp" "Lifters/EntityLifter.cpp" "Lifters/FunctionLifter.cpp" + "Lifters/CodeLifter.cpp" + "Lifters/BasicBlockLifter.cpp" "Lifters/Options.cpp" "Lifters/ValueLifter.cpp" ) @@ -169,8 +179,8 @@ add_library(anvill STATIC set_target_properties(anvill PROPERTIES - PUBLIC_HEADER "${anvill_PUBLIC_HEADERS}" - LINKER_LANGUAGE CXX + PUBLIC_HEADER "${anvill_PUBLIC_HEADERS}" + LINKER_LANGUAGE CXX ) target_include_directories(anvill PUBLIC @@ -181,46 +191,41 @@ target_include_directories(anvill PUBLIC target_link_libraries(anvill PUBLIC - remill + remill PRIVATE - protobuf::libprotobuf + protobuf::libprotobuf ) add_dependencies(anvill check_git_anvill) -if (ANVILL_ENABLE_PYTHON3_LIBS) - add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/../python" python) -endif() - -#if(ANVILL_ENABLE_TESTS) -# add_subdirectory("tests") -#endif() - +# if(ANVILL_ENABLE_TESTS) +# add_subdirectory("tests") +# endif() if(ANVILL_ENABLE_INSTALL) install( TARGETS - anvill + anvill EXPORT - anvillTargets + anvillTargets LIBRARY DESTINATION - lib + lib ARCHIVE DESTINATION - lib + lib INCLUDES DESTINATION - include + include PUBLIC_HEADER DESTINATION - "${CMAKE_INSTALL_INCLUDEDIR}/anvill" + "${CMAKE_INSTALL_INCLUDEDIR}/anvill" ) install( FILES - ${anvill_passes_HEADERS} + ${anvill_passes_HEADERS} DESTINATION - "${CMAKE_INSTALL_INCLUDEDIR}/anvill/Passes" + "${CMAKE_INSTALL_INCLUDEDIR}/anvill/Passes" ) endif(ANVILL_ENABLE_INSTALL) diff --git a/lib/CrossReferenceFolder.cpp b/lib/CrossReferenceFolder.cpp index 2da91e739..b57077983 100644 --- a/lib/CrossReferenceFolder.cpp +++ b/lib/CrossReferenceFolder.cpp @@ -25,6 +25,8 @@ #include #include +#include +#include #include namespace anvill { @@ -56,10 +58,13 @@ using ResolvedCrossReferenceCache = class CrossReferenceFolderImpl { public: - CrossReferenceFolderImpl(const CrossReferenceResolver &xref_resolver_, - const llvm::DataLayout &dl_) + CrossReferenceFolderImpl( + const CrossReferenceResolver &xref_resolver_, const llvm::DataLayout &dl_, + std::function(llvm::Value *)> + value_cb) : xref_resolver(xref_resolver_), - dl(dl_) {} + dl(dl_), + callback_resolve_value(std::move(value_cb)) {} ResolvedCrossReference ResolveInstruction(llvm::Instruction *inst_val); ResolvedCrossReference ResolveConstant(llvm::Constant *const_val); @@ -171,6 +176,10 @@ class CrossReferenceFolderImpl { // Discovered entities. std::vector entities; + + // Callback + std::function(llvm::Value *)> + callback_resolve_value; }; @@ -376,7 +385,7 @@ CrossReferenceFolderImpl::ResolveConstant(llvm::Constant *const_val) { xr.is_valid = false; if (val.isNegative()) { - if (val.getMinSignedBits() <= 64) { + if (val.getSignificantBits() <= 64) { xr.u.address = static_cast(val.getSExtValue()); xr.is_valid = true; } @@ -640,9 +649,13 @@ CrossReferenceFolderImpl::ResolveCall(llvm::CallInst *call) { // Try to resolve `val` as a cross-reference. ResolvedCrossReference CrossReferenceFolderImpl::ResolveValue(llvm::Value *val) { + auto cb_res = this->callback_resolve_value(val); + if (cb_res) { + return *cb_res; + } + if (auto const_val = llvm::dyn_cast(val)) { return ResolveConstant(const_val); - } else if (auto inst_val = llvm::dyn_cast(val)) { return ResolveInstruction(inst_val); } else { @@ -669,7 +682,9 @@ CrossReferenceFolder::~CrossReferenceFolder(void) {} // lifter that can resolve global references on our behalf. CrossReferenceFolder::CrossReferenceFolder( const CrossReferenceResolver &resolver, const llvm::DataLayout &dl) - : impl(std::make_shared(resolver, dl)) {} + : impl(std::make_shared( + resolver, dl, + [this](llvm::Value *v) { return this->ResolveValueCallback(v); })) {} // Return a reference to the data layout used by the cross-reference folder. const llvm::DataLayout &CrossReferenceFolder::DataLayout(void) const { @@ -721,4 +736,9 @@ ResolvedCrossReference::Displacement(const llvm::DataLayout &dl) const { return displacement; } +std::optional +CrossReferenceFolder::ResolveValueCallback(llvm::Value *) const { + return std::nullopt; +} + } // namespace anvill diff --git a/lib/Declarations.cpp b/lib/Declarations.cpp index 44c83be3f..4dba43764 100644 --- a/lib/Declarations.cpp +++ b/lib/Declarations.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -29,8 +30,32 @@ #include #include +#include +#include +#include +#include +#include +#include +#include + #include "Arch/Arch.h" #include "Protobuf.h" +#include "anvill/Specification.h" + +namespace std { +template <> +struct std::hash { + std::size_t operator()(const anvill::LowLoc &c) const { + std::size_t result = 0; + + hash_combine(result, c.mem_reg); + hash_combine(result, c.mem_offset); + hash_combine(result, c.reg); + hash_combine(result, c.size); + return result; + } +}; +} // namespace std namespace anvill { @@ -51,6 +76,118 @@ VariableDecl::DeclareInModule(const std::string &name, name); } +void FunctionDecl::AddBBContexts( + std::unordered_map &contexts) const { + for (const auto &[uid, _] : this->cfg) { + contexts.insert({uid, this->GetBlockContext(uid)}); + } +} + +std::uint64_t LowLoc::Size() const { + if (this->size) { + return *this->size; + } else { + return this->reg->size; + } +} + + +// need to be careful here about overlapping values +std::vector +BasicBlockContext::LiveParamsAtEntryAndExit() const { + auto live_exits = this->LiveParamsAtExit(); + auto live_entries = this->LiveParamsAtEntry(); + + + auto add_to_set = [](const std::vector ¶ms, + std::unordered_set &locs_to_add) { + for (const auto &p : params) { + std::copy(p.ordered_locs.begin(), p.ordered_locs.end(), + std::inserter(locs_to_add, locs_to_add.end())); + } + }; + + std::unordered_set covered_live_ent; + add_to_set(live_entries, covered_live_ent); + std::unordered_set covered_live_exit; + add_to_set(live_exits, covered_live_exit); + + std::vector res; + std::unordered_set covered; + auto add_all_from_vector = + [&res, &covered, &covered_live_ent, + &covered_live_exit](std::vector params) { + for (auto p : params) { + auto completely_covered = + std::all_of(p.ordered_locs.begin(), p.ordered_locs.end(), + [&covered](const LowLoc &loc) -> bool { + return covered.find(loc) != covered.end(); + }); + auto live_at_ent = std::any_of( + p.ordered_locs.begin(), p.ordered_locs.end(), + [&covered_live_ent](const LowLoc &loc) -> bool { + return covered_live_ent.find(loc) != covered_live_ent.end(); + }); + auto live_at_exit = std::any_of( + p.ordered_locs.begin(), p.ordered_locs.end(), + [&covered_live_exit](const LowLoc &loc) -> bool { + return covered_live_exit.find(loc) != covered_live_exit.end(); + }); + + if (!completely_covered) { + std::copy(p.ordered_locs.begin(), p.ordered_locs.end(), + std::inserter(covered, covered.end())); + res.push_back({p, live_at_ent, live_at_exit}); + } + } + }; + + add_all_from_vector(live_entries); + add_all_from_vector(live_exits); + return res; +} + + +std::vector BasicBlockContext::LiveBBParamsAtEntry() const { + auto alllive = this->LiveParamsAtEntryAndExit(); + std::vector res; + std::copy_if( + alllive.begin(), alllive.end(), std::back_inserter(res), + [](const BasicBlockVariable &bbvar) { return bbvar.live_at_entry; }); + return res; +} + +std::vector BasicBlockContext::LiveBBParamsAtExit() const { + auto alllive = this->LiveParamsAtEntryAndExit(); + std::vector res; + std::copy_if( + alllive.begin(), alllive.end(), std::back_inserter(res), + [&](const BasicBlockVariable &bbvar) { + if (!bbvar.live_at_exit) { + return false; + } + auto &consts_at_exit = GetConstantsAtExit(); + if (std::find_if(consts_at_exit.begin(), consts_at_exit.end(), + [&](const ConstantDomain &cdomain) { + return cdomain.target_value == bbvar.param; + }) != consts_at_exit.end()) { + return false; + } + + auto &offset_at_exit = GetStackOffsetsAtExit(); + if (std::find_if(offset_at_exit.affine_equalities.begin(), + offset_at_exit.affine_equalities.end(), + [&](const OffsetDomain &odomain) { + return odomain.target_value == bbvar.param; + }) != offset_at_exit.affine_equalities.end()) { + return false; + } + + return true; + }); + return res; +} + // Declare this function in an LLVM module. llvm::Function * FunctionDecl::DeclareInModule(std::string_view name, @@ -92,34 +229,123 @@ FunctionDecl::DeclareInModule(std::string_view name, return func; } +size_t BasicBlockContext::GetParamIndex(const ParameterDecl &decl) const { + auto stack_var = std::find(GetParams().begin(), GetParams().end(), decl); + CHECK(stack_var != GetParams().end()); + return stack_var - GetParams().begin(); +} + +llvm::Value *BasicBlockContext::ProvidePointerFromStruct( + llvm::IRBuilder<> &ir, llvm::StructType *sty, llvm::Value *target_sty, + const ParameterDecl &decl) const { + auto i32 = llvm::IntegerType::get(ir.getContext(), 32); + auto index = GetParamIndex(decl); + auto ptr = ir.CreateGEP( + sty, target_sty, + {llvm::ConstantInt::get(i32, 0), llvm::ConstantInt::get(i32, index)}); + return ptr; +} + +llvm::Argument *BasicBlockContext::ProvidePointerFromFunctionArgs( + llvm::Function *func, const ParameterDecl ¶m) const { + return func->getArg(GetParamIndex(param) + remill::kNumBlockArgs); +} + +ValueDecl SpecBlockContext::ReturnValue() const { + return this->decl.returns; +} + +uint64_t SpecBlockContext::GetParentFunctionAddress() const { + return this->decl.address; +} + +size_t SpecBlockContext::GetStackSize() const { + return decl.stack_depth; +} + +size_t SpecBlockContext::GetMaxStackSize() const { + return decl.maximum_depth; +} + + +SpecBlockContext::SpecBlockContext( + const FunctionDecl &decl, SpecStackOffsets offsets_at_entry, + SpecStackOffsets offsets_at_exit, + std::vector constants_at_entry, + std::vector constants_at_exit, + std::vector live_params_at_entry, + std::vector live_params_at_exit) + : decl(decl), + offsets_at_entry(std::move(offsets_at_entry)), + offsets_at_exit(std::move(offsets_at_exit)), + constants_at_entry(std::move(constants_at_entry)), + constants_at_exit(std::move(constants_at_exit)), + live_params_at_entry(std::move(live_params_at_entry)), + live_params_at_exit(std::move(live_params_at_exit)), + params(decl.in_scope_variables) {} + +size_t SpecBlockContext::GetPointerDisplacement() const { + return this->decl.GetPointerDisplacement(); +} + +const std::vector &SpecBlockContext::LiveParamsAtExit() const { + return this->live_params_at_exit; +} + +const std::vector &SpecBlockContext::LiveParamsAtEntry() const { + return this->live_params_at_entry; +} + +const SpecStackOffsets &SpecBlockContext::GetStackOffsetsAtEntry() const { + return this->offsets_at_entry; +} + +const SpecStackOffsets &SpecBlockContext::GetStackOffsetsAtExit() const { + return this->offsets_at_exit; +} + +const std::vector & +SpecBlockContext::GetConstantsAtEntry() const { + return this->constants_at_entry; +} + +const std::vector & +SpecBlockContext::GetConstantsAtExit() const { + return this->constants_at_exit; +} + +const std::vector &SpecBlockContext::GetParams() const { + return this->params; +} + // Interpret `target` as being the function to call, and call it from within // a basic block in a lifted bitcode function. Returns the new value of the // memory pointer. llvm::Value *CallableDecl::CallFromLiftedBlock( llvm::Value *target, const anvill::TypeDictionary &types, - const remill::IntrinsicTable &intrinsics, llvm::BasicBlock *block, + const remill::IntrinsicTable &intrinsics, llvm::IRBuilder<> &ir, llvm::Value *state_ptr, llvm::Value *mem_ptr) const { - auto module = block->getModule(); + auto module = ir.GetInsertBlock()->getModule(); auto &context = module->getContext(); CHECK_EQ(&context, &(target->getContext())); CHECK_EQ(&context, &(state_ptr->getContext())); CHECK_EQ(&context, &(mem_ptr->getContext())); CHECK_EQ(&context, &(types.u.named.void_->getContext())); - llvm::IRBuilder<> ir(block); - // Go and get a pointer to the stack pointer register, so that we can // later store our computed return value stack pointer to it. auto sp_reg = arch->RegisterByName(arch->StackPointerRegisterName()); - const auto ptr_to_sp = sp_reg->AddressOf(state_ptr, block); - ir.SetInsertPoint(block); + const auto ptr_to_sp = sp_reg->AddressOf(state_ptr, ir); + // Go and compute the value of the stack pointer on exit from // the function, which will be based off of the register state // on entry to the function. - auto new_sp_base = return_stack_pointer->AddressOf(state_ptr, block); - ir.SetInsertPoint(block); + auto new_sp_base = return_stack_pointer->AddressOf(state_ptr, ir); + DLOG(INFO) << "Modifying ret stack pointer by: " + << return_stack_pointer_offset; + // TODO(Ian): this could go in the wrong direction if stack option is set to go up const auto sp_val_on_exit = ir.CreateAdd( ir.CreateLoad(return_stack_pointer->type, new_sp_base), llvm::ConstantInt::get(return_stack_pointer->type, @@ -129,14 +355,14 @@ llvm::Value *CallableDecl::CallFromLiftedBlock( llvm::SmallVector param_vals; // Get the return address. - auto ret_addr = LoadLiftedValue(return_address, types, intrinsics, block, - state_ptr, mem_ptr); + auto ret_addr = LoadLiftedValue(return_address, types, intrinsics, this->arch, + ir, state_ptr, mem_ptr); CHECK(ret_addr && !llvm::isa_and_nonnull(ret_addr)); // Get the parameters. for (const auto ¶m_decl : params) { - const auto val = LoadLiftedValue(param_decl, types, intrinsics, block, - state_ptr, mem_ptr); + const auto val = LoadLiftedValue(param_decl, types, intrinsics, this->arch, + ir, state_ptr, mem_ptr); if (auto inst_val = llvm::dyn_cast(val)) { inst_val->setName(param_decl.name); } @@ -156,33 +382,16 @@ llvm::Value *CallableDecl::CallFromLiftedBlock( ret_val->setDoesNotReturn(); } - // There is a single return value, store it to the lifted state. - if (returns.size() == 1) { - auto call_ret = ret_val; - - mem_ptr = StoreNativeValue(call_ret, returns.front(), types, intrinsics, - block, state_ptr, mem_ptr); - - // There are possibly multiple return values (or zero). Unpack the - // return value (it will be a struct type) into its components and - // write each one out into the lifted state. - } else { - unsigned index = 0; - for (const auto &ret_decl : returns) { - unsigned indexes[] = {index}; - auto elem_val = ir.CreateExtractValue(ret_val, indexes); - mem_ptr = StoreNativeValue(elem_val, ret_decl, types, intrinsics, block, - state_ptr, mem_ptr); - index += 1; - } + auto call_ret = ret_val; + if (!call_ret->getType()->isVoidTy()) { + mem_ptr = StoreNativeValue(call_ret, this->returns, types, intrinsics, ir, + state_ptr, mem_ptr); } - // Store the return address, and computed return stack pointer. - ir.SetInsertPoint(block); - - ir.CreateStore( - ret_addr, - remill::FindVarInFunction(block, remill::kNextPCVariableName).first); + // TODO(Ian): ... well ok so we already did stuff assuming the PC was one way since we lifted below it. + //ir.CreateStore(ret_addr, remill::FindVarInFunction( + // ir.GetInsertBlock(), remill::kNextPCVariableName) + // .first); ir.CreateStore(sp_val_on_exit, ptr_to_sp); if (is_noreturn) { @@ -202,7 +411,8 @@ CallableDecl::DecodeFromPB(const remill::Arch *arch, const std::string &pb) { const TypeDictionary type_dictionary(*(arch->context)); const TypeTranslator type_translator(type_dictionary, arch); std::unordered_map type_map; - ProtobufTranslator translator(type_translator, arch, type_map); + std::unordered_map type_names; + ProtobufTranslator translator(type_translator, arch, type_map, type_names); auto default_callable_decl_res = translator.DecodeDefaultCallableDecl(function); @@ -259,26 +469,128 @@ void CallableDecl::OverrideFunctionTypeWithABIParamLayout() { } void CallableDecl::OverrideFunctionTypeWithABIReturnLayout() { - if (this->returns.size() < 1) { - return; - } else if (this->returns.size() == 1) { - // Override the return type with the type of the last return - auto new_func_type = - llvm::FunctionType::get(this->returns.front().type, - this->type->params(), this->type->isVarArg()); - this->type = new_func_type; + auto new_func_type = llvm::FunctionType::get( + this->returns.type, this->type->params(), this->type->isVarArg()); + this->type = new_func_type; +} + +namespace { +template +V GetWithDef(Uid uid, const std::unordered_map &map, V def) { + if (map.find(uid) == map.end()) { + return def; + } + + return map.find(uid)->second; +} +} // namespace + +size_t FunctionDecl::GetPointerDisplacement() const { + return this->parameter_size + this->parameter_offset; +} + +SpecBlockContext FunctionDecl::GetBlockContext(Uid uid) const { + return SpecBlockContext( + *this, GetWithDef(uid, this->stack_offsets_at_entry, SpecStackOffsets()), + GetWithDef(uid, this->stack_offsets_at_exit, SpecStackOffsets()), + GetWithDef(uid, this->constant_values_at_entry, + std::vector()), + GetWithDef(uid, this->constant_values_at_exit, + std::vector()), + GetWithDef(uid, this->live_regs_at_entry, std::vector()), + GetWithDef(uid, this->live_regs_at_exit, std::vector())); +} + +std::optional +AbstractStack::StackOffsetFromStackPointer(std::int64_t stack_off) const { + if (this->stack_grows_down) { + auto displaced_offset = + stack_off - static_cast(this->pointer_displacement); + DLOG(INFO) << this->total_size; + DLOG(INFO) << "disp: " << this->pointer_displacement; + DLOG(INFO) << "Displaced offset: " << displaced_offset; + if (!(static_cast(this->total_size) >= + llabs(displaced_offset))) { + return std::nullopt; + } + return this->total_size + displaced_offset; } else { - // Create a structure that has a field for each return - std::vector elems; - for (const auto &ret : this->returns) { - elems.push_back(ret.type); + return this->pointer_displacement + stack_off; + } +} + +std::int64_t AbstractStack::StackPointerFromStackOffset(size_t offset) const { + if (stack_grows_down) { + return (static_cast(offset) - this->total_size) + + this->pointer_displacement; + } else { + return offset - this->pointer_displacement; + } +} + + +std::optional +AbstractStack::StackPointerFromStackCompreference(llvm::Value *tgt) const { + size_t curr_off = 0; + for (auto comp : this->components) { + if (comp.stackptr == tgt) { + return this->StackPointerFromStackOffset(curr_off); } + curr_off += comp.size; + } - auto ret_type_struct = llvm::StructType::create(elems); + return std::nullopt; +} + +std::optional +AbstractStack::PointerToStackMemberFromOffset(llvm::IRBuilder<> &ir, + std::int64_t stack_off) const { + auto off = this->StackOffsetFromStackPointer(stack_off); + if (!off) { + return std::nullopt; + } + + auto i32 = llvm::IntegerType::getInt32Ty(this->context); + DLOG(INFO) << "Looking for offset" << *off; + auto curr_off = 0; + auto curr_ind = 0; + for (auto [sz, ptr] : this->components) { + if (off < curr_off + sz) { + DLOG(INFO) << "Found for " << remill::LLVMThingToString(ptr); + DLOG(INFO) << curr_off << " " << sz; + return ir.CreateGEP(this->stack_types[curr_ind], ptr, + {llvm::ConstantInt::get(i32, 0), + llvm::ConstantInt::get(i32, *off - curr_off)}); + } + curr_off += sz; + curr_ind++; + } + + return std::nullopt; +} + +llvm::Type *AbstractStack::StackTypeFromSize(llvm::LLVMContext &context, + size_t size) { + return llvm::ArrayType::get(llvm::IntegerType::getInt8Ty(context), size); +} + + +AbstractStack::AbstractStack(llvm::LLVMContext &context, + std::vector components, + bool stack_grows_down, size_t pointer_displacement) + : context(context), + stack_grows_down(stack_grows_down), + components(std::move(components)), + total_size(0), + pointer_displacement(pointer_displacement) { + + if (stack_grows_down) { + std::reverse(this->components.begin(), this->components.end()); + } - auto new_func_type = llvm::FunctionType::get( - ret_type_struct, this->type->params(), this->type->isVarArg()); - this->type = new_func_type; + for (const auto &[k, v] : this->components) { + this->stack_types.push_back(this->StackTypeFromSize(context, k)); + total_size += k; } } diff --git a/lib/Lifters/BasicBlockLifter.cpp b/lib/Lifters/BasicBlockLifter.cpp new file mode 100644 index 000000000..ec402cfbc --- /dev/null +++ b/lib/Lifters/BasicBlockLifter.cpp @@ -0,0 +1,852 @@ +#include "BasicBlockLifter.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "Lifters/CodeLifter.h" +#include "Lifters/FunctionLifter.h" +#include "anvill/Declarations.h" +#include "anvill/Optimize.h" +#include "anvill/Utils.h" + +namespace anvill { + +void BasicBlockLifter::LiftBasicBlockFunction() { + auto bbfunc = this->CreateBasicBlockFunction(); + this->LiftInstructionsIntoLiftedFunction(); + DCHECK(!llvm::verifyFunction(*this->lifted_func, &llvm::errs())); + DCHECK(!llvm::verifyFunction(*bbfunc.func, &llvm::errs())); + + this->RecursivelyInlineFunctionCallees(bbfunc.func); +} + + +remill::DecodingContext BasicBlockLifter::ApplyContextAssignments( + const std::unordered_map &assignments, + remill::DecodingContext prev_context) { + for (const auto &[k, v] : assignments) { + prev_context.UpdateContextReg(k, v); + } + return prev_context; +} + + +llvm::CallInst *BasicBlockLifter::AddCallFromBasicBlockFunctionToLifted( + llvm::BasicBlock *source_block, llvm::Function *dest_func, + const remill::IntrinsicTable &intrinsics, llvm::Value *pc_hint) { + auto func = source_block->getParent(); + llvm::IRBuilder<> ir(source_block); + std::array args; + args[remill::kMemoryPointerArgNum] = + NthArgument(func, remill::kMemoryPointerArgNum); + args[remill::kStatePointerArgNum] = + NthArgument(func, remill::kStatePointerArgNum); + + if (pc_hint) { + args[remill::kPCArgNum] = pc_hint; + } else { + args[remill::kPCArgNum] = + remill::LoadNextProgramCounter(source_block, this->intrinsics); + } + + return ir.CreateCall(dest_func, args); +} + + +// Helper to figure out the address where execution will resume after a +// function call. In practice this is the instruction following the function +// call, encoded in `inst.branch_not_taken_pc`. However, SPARC has a terrible +// ABI where they inject an invalid instruction following some calls as a way +// of communicating to the callee that they should return an object of a +// particular, hard-coded size. Thus, we want to actually identify then ignore +// that instruction, and present the following address for where execution +// should resume after a `call`. +std::pair +BasicBlockLifter::LoadFunctionReturnAddress(const remill::Instruction &inst, + llvm::BasicBlock *block) { + + const auto pc = inst.branch_not_taken_pc; + + // The semantics for handling a call save the expected return program counter + // into a local variable. + auto ret_pc = this->op_lifter->LoadRegValue(block, state_ptr, + remill::kReturnPCVariableName); + if (!is_sparc) { + return {pc, ret_pc}; + } + + uint8_t bytes[4] = {}; + + for (auto i = 0u; i < 4u; ++i) { + auto [byte, accessible, perms] = memory_provider.Query(pc + i); + switch (accessible) { + case ByteAvailability::kUnknown: + case ByteAvailability::kUnavailable: + LOG(ERROR) + << "Byte at address " << std::hex << (pc + i) + << " is not available for inspection to figure out return address " + << " of call instruction at address " << pc << std::dec; + return {pc, ret_pc}; + + default: bytes[i] = byte; break; + } + + switch (perms) { + case BytePermission::kUnknown: + case BytePermission::kReadableExecutable: + case BytePermission::kReadableWritableExecutable: break; + case BytePermission::kReadable: + case BytePermission::kReadableWritable: + LOG(ERROR) + << "Byte at address " << std::hex << (pc + i) << " being inspected " + << "to figure out return address of call instruction at address " + << pc << " is not executable" << std::dec; + return {pc, ret_pc}; + } + } + + union Format0a { + uint32_t flat; + struct { + uint32_t imm22 : 22; + uint32_t op2 : 3; + uint32_t rd : 5; + uint32_t op : 2; + } u __attribute__((packed)); + } __attribute__((packed)) enc = {}; + static_assert(sizeof(Format0a) == 4, " "); + + enc.flat |= bytes[0]; + enc.flat <<= 8; + enc.flat |= bytes[1]; + enc.flat <<= 8; + enc.flat |= bytes[2]; + enc.flat <<= 8; + enc.flat |= bytes[3]; + + // This looks like an `unimp ` instruction, where the `imm22` encodes + // the size of the value to return. See "Specificationming Note" in v8 manual, + // B.31, p 137. + // + // TODO(pag, kumarak): Does a zero value in `enc.u.imm22` imply a no-return + // function? Try this on Compiler Explorer! + if (!enc.u.op && !enc.u.op2) { + DLOG(INFO) << "Found structure return of size " << enc.u.imm22 << " to " + << std::hex << pc << " at " << inst.pc << std::dec; + + llvm::IRBuilder<> ir(block); + return {pc + 4u, + ir.CreateAdd(ret_pc, llvm::ConstantInt::get(ret_pc->getType(), 4))}; + + } else { + return {pc, ret_pc}; + } +} + + +bool BasicBlockLifter::DoInterProceduralControlFlow( + const remill::Instruction &insn, llvm::BasicBlock *block, + const anvill::ControlFlowOverride &override) { + // only handle inter-proc since intra-proc are handled implicitly by the CFG. + llvm::IRBuilder<> builder(block); + if (std::holds_alternative(override)) { + + auto cc = std::get(override); + + llvm::CallInst *call = nullptr; + if (cc.target_address.has_value()) { + call = this->AddCallFromBasicBlockFunctionToLifted( + block, this->intrinsics.function_call, this->intrinsics, + this->options.program_counter_init_procedure( + builder, this->address_type, *cc.target_address)); + } else { + call = this->AddCallFromBasicBlockFunctionToLifted( + block, this->intrinsics.function_call, this->intrinsics); + } + SetMetadata(options.pc_metadata_name, *call, insn.pc); + if (!cc.stop) { + auto [_, raddr] = this->LoadFunctionReturnAddress(insn, block); + auto npc = remill::LoadNextProgramCounterRef(block); + auto pc = remill::LoadProgramCounterRef(block); + builder.CreateStore(raddr, npc); + builder.CreateStore(raddr, pc); + } else { + if (cc.is_noreturn) { + call->setDoesNotReturn(); + remill::AddTerminatingTailCall(block, intrinsics.error, intrinsics); + } else { + // a call that stops that is not noreturn should be a call + return + auto func = block->getParent(); + auto should_return = func->getArg(kShouldReturnArgNum); + builder.CreateStore(llvm::Constant::getAllOnesValue( + llvm::IntegerType::getInt1Ty(llvm_context)), + should_return); + } + } + return !cc.stop || !cc.is_noreturn; + } else if (std::holds_alternative(override)) { + auto func = block->getParent(); + auto should_return = func->getArg(kShouldReturnArgNum); + builder.CreateStore(llvm::Constant::getAllOnesValue( + llvm::IntegerType::getInt1Ty(llvm_context)), + should_return); + } + + return true; +} + + +bool BasicBlockLifter::ApplyInterProceduralControlFlowOverride( + const remill::Instruction &insn, llvm::BasicBlock *&block) { + + + // if this instruction is conditional and interprocedural then we are going to split the block into a case were we do take it and a branch where we dont and then rejoin + + auto override = options.control_flow_provider.GetControlFlowOverride(insn.pc); + + if ((std::holds_alternative(override) || + std::holds_alternative(override))) { + if (std::holds_alternative( + insn.flows)) { + auto btaken = remill::LoadBranchTaken(block); + llvm::IRBuilder<> builder(block); + auto do_control_flow = + llvm::BasicBlock::Create(block->getContext(), "", block->getParent()); + auto continuation = + llvm::BasicBlock::Create(block->getContext(), "", block->getParent()); + builder.CreateCondBr(btaken, do_control_flow, continuation); + + // if the interprocedural control flow block isnt terminal link it back up + if (this->DoInterProceduralControlFlow(insn, do_control_flow, override)) { + llvm::BranchInst::Create(continuation, do_control_flow); + } + + block = continuation; + return true; + } else { + return this->DoInterProceduralControlFlow(insn, block, override); + } + } + + return true; +} + +remill::DecodingContext +BasicBlockLifter::CreateDecodingContext(const CodeBlock &blk) { + auto init_context = this->options.arch->CreateInitialContext(); + return this->ApplyContextAssignments(blk.context_assignments, + std::move(init_context)); +} + +// Try to decode an instruction at address `addr` into `*inst_out`. Returns +// the context map of the decoded instruction if successful and std::nullopt otherwise. `is_delayed` tells the decoder +// whether or not the instruction being decoded is being decoded inside of a +// delay slot of another instruction. +bool BasicBlockLifter::DecodeInstructionInto(const uint64_t addr, + bool is_delayed, + remill::Instruction *inst_out, + remill::DecodingContext context) { + static const auto max_inst_size = options.arch->MaxInstructionSize(context); + inst_out->Reset(); + + // Read the maximum number of bytes possible for instructions on this + // architecture. For x86(-64), this is 15 bytes, whereas for fixed-width + // architectures like AArch32/AArch64 and SPARC32/SPARC64, this is 4 bytes. + inst_out->bytes.reserve(max_inst_size); + + auto accumulate_inst_byte = [=](auto byte, auto accessible, auto perms) { + switch (accessible) { + case ByteAvailability::kUnknown: + case ByteAvailability::kUnavailable: return false; + default: + switch (perms) { + case BytePermission::kUnknown: + case BytePermission::kReadableExecutable: + case BytePermission::kReadableWritableExecutable: + inst_out->bytes.push_back(static_cast(byte)); + return true; + case BytePermission::kReadable: + case BytePermission::kReadableWritable: return false; + } + } + }; + + for (auto i = 0u; i < max_inst_size; ++i) { + if (!std::apply(accumulate_inst_byte, memory_provider.Query(addr + i))) { + break; + } + } + + if (is_delayed) { + return options.arch->DecodeDelayedInstruction( + addr, inst_out->bytes, *inst_out, std::move(context)); + } else { + return options.arch->DecodeInstruction(addr, inst_out->bytes, *inst_out, + std::move(context)); + } +} + + +void BasicBlockLifter::ApplyTypeHint(llvm::IRBuilder<> &bldr, + const ValueDecl &type_hint) { + + auto ty_hint = this->GetTypeHintFunction(); + auto state_ptr_internal = + this->lifted_func->getArg(remill::kStatePointerArgNum); + auto mem_ptr = + remill::LoadMemoryPointer(bldr.GetInsertBlock(), this->intrinsics); + auto curr_value = + anvill::LoadLiftedValue(type_hint, options.TypeDictionary(), intrinsics, + options.arch, bldr, state_ptr_internal, mem_ptr); + + if (curr_value->getType()->isPointerTy()) { + auto call = bldr.CreateCall(ty_hint, {curr_value}); + call->setMetadata("anvill.type", this->type_specifier.EncodeToMetadata( + type_hint.spec_type)); + curr_value = call; + } + + auto new_mem_ptr = + StoreNativeValue(curr_value, type_hint, options.TypeDictionary(), + intrinsics, bldr, state_ptr_internal, mem_ptr); + bldr.CreateStore(new_mem_ptr, + remill::LoadMemoryPointerRef(bldr.GetInsertBlock())); +} + + +void BasicBlockLifter::LiftInstructionsIntoLiftedFunction() { + auto entry_block = &this->lifted_func->getEntryBlock(); + + auto bb = llvm::BasicBlock::Create(this->lifted_func->getContext(), "", + this->lifted_func); + + + llvm::BranchInst::Create(bb, entry_block); + + remill::Instruction inst; + + auto reached_addr = this->block_def.addr; + // TODO(Ian): use a different context + + auto init_context = this->CreateDecodingContext(this->block_def); + + DLOG(INFO) << "Decoding block at addr: " << std::hex << this->block_def.addr + << " with size " << this->block_def.size; + bool ended_on_terminal = false; + while (reached_addr < this->block_def.addr + this->block_def.size && + !ended_on_terminal) { + auto addr = reached_addr; + DLOG(INFO) << "Decoding at addr " << std::hex << addr; + auto res = this->DecodeInstructionInto(addr, false, &inst, init_context); + if (!res) { + remill::AddTerminatingTailCall(bb, this->intrinsics.error, + this->intrinsics); + LOG(ERROR) << "Failed to decode insn in block " << std::hex << addr; + return; + } + + reached_addr += inst.bytes.size(); + + // Even when something isn't supported or is invalid, we still lift + // a call to a semantic, e.g.`INVALID_INSTRUCTION`, so we really want + // to treat instruction lifting as an operation that can't fail. + + + std::ignore = inst.GetLifter()->LiftIntoBlock( + inst, bb, this->lifted_func->getArg(remill::kStatePointerArgNum), + false /* is_delayed */); + + llvm::IRBuilder<> builder(bb); + + auto start = + std::lower_bound(decl.type_hints.begin(), decl.type_hints.end(), + inst.pc, [](const TypeHint &hint_rhs, uint64_t addr) { + return hint_rhs.target_addr < addr; + }); + auto end = + std::upper_bound(decl.type_hints.begin(), decl.type_hints.end(), + inst.pc, [](uint64_t addr, const TypeHint &hint_rhs) { + return addr < hint_rhs.target_addr; + }); + for (; start != end; start++) { + this->ApplyTypeHint(builder, start->hint); + } + + ended_on_terminal = + !this->ApplyInterProceduralControlFlowOverride(inst, bb); + DLOG_IF(INFO, ended_on_terminal) + << "On terminal at addr: " << std::hex << addr; + } + + if (!ended_on_terminal) { + llvm::IRBuilder<> builder(bb); + + builder.CreateStore(remill::LoadNextProgramCounter(bb, this->intrinsics), + this->lifted_func->getArg(kNextPCArgNum)); + + + llvm::ReturnInst::Create( + bb->getContext(), remill::LoadMemoryPointer(bb, this->intrinsics), bb); + } +} + +llvm::MDNode *BasicBlockLifter::GetBasicBlockUidAnnotation(Uid uid) const { + return this->GetUidAnnotation(uid, this->semantics_module->getContext()); +} + +llvm::Function *BasicBlockLifter::DeclareBasicBlockFunction() { + std::string name_ = "func" + std::to_string(decl.address) + "basic_block" + + std::to_string(this->block_def.addr) + "_" + + std::to_string(this->block_def.uid.value); + auto &context = this->semantics_module->getContext(); + llvm::FunctionType *lifted_func_type = + llvm::dyn_cast(remill::RecontextualizeType( + this->options.arch->LiftedFunctionType(), context)); + + std::vector params = std::vector( + lifted_func_type->param_begin(), lifted_func_type->param_end()); + + // pointer to state pointer + params[remill::kStatePointerArgNum] = llvm::PointerType::get(context, 0); + + + for (size_t i = 0; i < this->var_struct_ty->getNumElements(); i++) { + // pointer to each param + params.push_back(llvm::PointerType::get(context, 0)); + } + + auto ret_type = this->block_context->ReturnValue(); + llvm::FunctionType *func_type = llvm::FunctionType::get( + this->flifter.curr_decl->type->getReturnType(), params, false); + + llvm::StringRef name(name_.data(), name_.size()); + return llvm::Function::Create(func_type, llvm::GlobalValue::ExternalLinkage, + 0u, name, this->semantics_module); +} + +BasicBlockFunction BasicBlockLifter::CreateBasicBlockFunction() { + auto func = bb_func; + func->setMetadata(anvill::kBasicBlockUidMetadata, + GetBasicBlockUidAnnotation(this->block_def.uid)); + + auto &context = this->semantics_module->getContext(); + llvm::FunctionType *lifted_func_type = + llvm::dyn_cast(remill::RecontextualizeType( + this->options.arch->LiftedFunctionType(), context)); + auto start_ind = lifted_func_type->getNumParams(); + for (auto var : decl.in_scope_variables) { + auto arg = remill::NthArgument(func, start_ind); + if (!var.name.empty()) { + arg->setName(var.name); + } + + if (std::all_of(var.ordered_locs.begin(), var.ordered_locs.end(), + [](const LowLoc &loc) -> bool { return loc.reg; })) { + // Registers should not have aliases, or be captured + arg->addAttr(llvm::Attribute::get(llvm_context, + llvm::Attribute::AttrKind::NoAlias)); + arg->addAttr(llvm::Attribute::get(llvm_context, + llvm::Attribute::AttrKind::NoCapture)); + } + + start_ind += 1; + } + + auto memory = remill::NthArgument(func, remill::kMemoryPointerArgNum); + auto state = remill::NthArgument(func, remill::kStatePointerArgNum); + auto pc = remill::NthArgument(func, remill::kPCArgNum); + + memory->setName("memory"); + memory->addAttr( + llvm::Attribute::get(llvm_context, llvm::Attribute::AttrKind::NoAlias)); + memory->addAttr( + llvm::Attribute::get(llvm_context, llvm::Attribute::AttrKind::NoCapture)); + pc->setName("program_counter"); + state->setName("stack"); + + + auto liftedty = this->options.arch->LiftedFunctionType(); + + std::vector new_params; + new_params.reserve(liftedty->getNumParams() + 2); + + for (auto param : liftedty->params()) { + new_params.push_back(param); + } + auto ptr_ty = llvm::PointerType::get(context, 0); + new_params.push_back(ptr_ty); + new_params.push_back(ptr_ty); + + + llvm::FunctionType *new_func_type = llvm::FunctionType::get( + lifted_func_type->getReturnType(), new_params, false); + + + this->lifted_func = llvm::Function::Create( + new_func_type, llvm::GlobalValue::ExternalLinkage, 0u, + func->getName() + "lowlift", this->semantics_module); + + options.arch->InitializeEmptyLiftedFunction(this->lifted_func); + + + llvm::BasicBlock::Create(context, "", func); + auto &blk = func->getEntryBlock(); + llvm::IRBuilder<> ir(&blk); + auto next_pc = ir.CreateAlloca(llvm::IntegerType::getInt64Ty(context), + nullptr, "next_pc"); + auto should_return = ir.CreateAlloca(llvm::IntegerType::getInt1Ty(context), + nullptr, "should_return"); + ir.CreateStore(llvm::ConstantInt::getFalse(context), should_return); + auto lded_mem = + ir.CreateLoad(llvm::PointerType::get(this->llvm_context, 0), memory); + + ir.CreateStore(lded_mem, + ir.CreateAlloca(llvm::PointerType::get(this->llvm_context, 0), + nullptr, "MEMORY")); + + this->state_ptr = + this->AllocateAndInitializeStateStructure(&blk, options.arch); + + // Put registers that are referencing the stack in terms of their displacement so that we + // Can resolve these stack references later . + + auto sp_value = + options.stack_pointer_init_procedure(ir, sp_reg, this->block_def.addr); + auto sp_ptr = sp_reg->AddressOf(this->state_ptr, ir); + // Initialize the stack pointer. + ir.CreateStore(sp_value, sp_ptr); + + auto stack_offsets = this->block_context->GetStackOffsetsAtEntry(); + for (auto ®_off : stack_offsets.affine_equalities) { + auto *new_value = LifterOptions::SymbolicStackPointerInitWithOffset( + ir, this->sp_reg, this->block_def.addr, reg_off.stack_offset); + auto *target_type = reg_off.target_value.type; + if (new_value->getType() != target_type) { + new_value = AdaptToType(ir, new_value, target_type); + } + auto nmem = StoreNativeValue( + new_value, reg_off.target_value, type_provider.Dictionary(), intrinsics, + ir, this->state_ptr, remill::LoadMemoryPointer(ir, intrinsics)); + ir.CreateStore(nmem, remill::LoadMemoryPointerRef(ir.GetInsertBlock())); + } + + PointerProvider ptr_provider = + [this, func](const ParameterDecl ¶m) -> llvm::Value * { + return this->block_context->ProvidePointerFromFunctionArgs(func, param); + }; + + DLOG(INFO) << "Live values at entry to function " + << this->block_context->LiveBBParamsAtEntry().size(); + this->UnpackLiveValues(ir, ptr_provider, this->state_ptr, + this->block_context->LiveBBParamsAtEntry()); + + for (auto ®_const : block_context->GetConstantsAtEntry()) { + llvm::Value *new_value = nullptr; + llvm::Type *target_type = reg_const.target_value.type; + if (reg_const.should_taint_by_pc) { + new_value = this->options.program_counter_init_procedure( + ir, this->address_type, reg_const.value); + + if (this->address_type != target_type) { + new_value = AdaptToType(ir, new_value, target_type); + } + } else { + new_value = llvm::ConstantInt::get(target_type, reg_const.value, false); + } + + + //DLOG_IF(INFO, reg_const.target_value.reg) + // << "Dumping " << reg_const.target_value.reg->name << " " << std::hex + // << reg_const.value; + auto nmem = StoreNativeValue(new_value, reg_const.target_value, + type_provider.Dictionary(), intrinsics, ir, + this->state_ptr, + remill::LoadMemoryPointer(ir, intrinsics)); + ir.CreateStore(nmem, remill::LoadMemoryPointerRef(ir.GetInsertBlock())); + } + + auto pc_arg = remill::NthArgument(func, remill::kPCArgNum); + auto mem_arg = remill::NthArgument(func, remill::kMemoryPointerArgNum); + + func->addFnAttr(llvm::Attribute::NoInline); + //func->setLinkage(llvm::GlobalValue::InternalLinkage); + + auto mem_res = remill::LoadMemoryPointer(ir, this->intrinsics); + + // Initialize the program counter + auto pc_ptr = pc_reg->AddressOf(this->state_ptr, ir); + auto pc_val = this->options.program_counter_init_procedure( + ir, this->address_type, this->block_def.addr); + + ir.CreateStore(ir.CreateZExtOrTrunc(pc_val, pc_reg_type), pc_ptr); + + std::array args = { + this->state_ptr, pc_val, mem_res, next_pc, should_return}; + + auto ret_mem = ir.CreateCall(this->lifted_func, args); + + this->PackLiveValues(ir, this->state_ptr, ptr_provider, + this->block_context->LiveBBParamsAtExit()); + + + CHECK(ir.GetInsertPoint() == func->getEntryBlock().end()); + + BasicBlockFunction bbf{func, pc_arg, mem_arg, next_pc, state}; + + + ir.CreateStore(ret_mem, memory); + ir.CreateStore(ret_mem, remill::LoadMemoryPointerRef(ir.GetInsertBlock())); + TerminateBasicBlockFunction(func, ir, ret_mem, should_return, bbf); + + return bbf; +} + +// Setup the returns for this function we tail call all successors +void BasicBlockLifter::TerminateBasicBlockFunction( + llvm::Function *caller, llvm::IRBuilder<> &ir, llvm::Value *next_mem, + llvm::Value *should_return, const BasicBlockFunction &bbfunc) { + auto &context = this->bb_func->getContext(); + this->invalid_successor_block = + llvm::BasicBlock::Create(context, "invalid_successor", this->bb_func); + auto jump_block = llvm::BasicBlock::Create(context, "", this->bb_func); + auto ret_block = llvm::BasicBlock::Create(context, "", this->bb_func); + + // TODO(Ian): maybe want to call remill_error here + new llvm::UnreachableInst(next_mem->getContext(), + this->invalid_successor_block); + + auto should_return_value = + ir.CreateLoad(llvm::IntegerType::getInt1Ty(context), should_return); + ir.CreateCondBr(should_return_value, ret_block, jump_block); + + ir.SetInsertPoint(jump_block); + auto pc = ir.CreateLoad(address_type, bbfunc.next_pc_out); + auto sw = ir.CreateSwitch(pc, this->invalid_successor_block); + + for (auto edge_uid : this->block_def.outgoing_edges) { + auto calling_bb = + llvm::BasicBlock::Create(next_mem->getContext(), "", bbfunc.func); + llvm::IRBuilder<> calling_bb_builder(calling_bb); + auto edge_bb = this->decl.cfg.find(edge_uid); + CHECK(edge_bb != this->decl.cfg.end()); + auto &child_lifter = + this->flifter.GetOrCreateBasicBlockLifter(edge_bb->second.uid); + auto retval = child_lifter.ControlFlowCallBasicBlockFunction( + caller, calling_bb_builder, this->state_ptr, bbfunc.stack, next_mem); + if (this->flifter.curr_decl->type->getReturnType()->isVoidTy()) { + calling_bb_builder.CreateRetVoid(); + } else { + calling_bb_builder.CreateRet(retval); + } + + auto succ_const = llvm::ConstantInt::get( + llvm::cast(this->address_type), + edge_bb->second.addr); + sw->addCase(succ_const, calling_bb); + } + + ir.SetInsertPoint(ret_block); + if (this->flifter.curr_decl->type->getReturnType()->isVoidTy()) { + ir.CreateRetVoid(); + } else { + auto retval = anvill::LoadLiftedValue( + block_context->ReturnValue(), options.TypeDictionary(), intrinsics, + options.arch, ir, this->state_ptr, next_mem); + ir.CreateRet(retval); + } +} + +llvm::StructType *BasicBlockLifter::StructTypeFromVars() const { + std::vector field_types; + std::transform(decl.in_scope_variables.begin(), decl.in_scope_variables.end(), + std::back_inserter(field_types), + [](auto ¶m) { return param.type; }); + + return llvm::StructType::get(llvm_context, field_types, + "sty_for_basic_block_function"); +} + +// Packs in scope variables into a struct +void BasicBlockLifter::PackLiveValues( + llvm::IRBuilder<> &bldr, llvm::Value *from_state_ptr, + PointerProvider into_vars, + const std::vector &decls) const { + + for (auto decl : decls) { + + if (!HasMemLoc(decl.param)) { + auto ptr = into_vars(decl.param); + + auto state_loaded_value = LoadLiftedValue( + decl.param, this->type_provider.Dictionary(), this->intrinsics, + this->options.arch, bldr, from_state_ptr, + remill::LoadMemoryPointer(bldr, this->intrinsics)); + + bldr.CreateStore(state_loaded_value, ptr); + } else { + // TODO(Ian): The assumption is we dont have live values split between the stack and a register for now... + // Maybe at some point we can just go ahead and store everything + CHECK(!HasRegLoc(decl.param)); + } + } +} + + +void BasicBlockLifter::UnpackLiveValues( + llvm::IRBuilder<> &bldr, PointerProvider returned_value, + llvm::Value *into_state_ptr, + const std::vector &decls) const { + auto blk = bldr.GetInsertBlock(); + + for (auto decl : decls) { + // is this how we want to do this.... now the value really doesnt live in memory anywhere but the frame. + if (!HasMemLoc(decl.param)) { + auto ptr = returned_value(decl.param); + auto loaded_var_val = + bldr.CreateLoad(decl.param.type, ptr, decl.param.name); + loaded_var_val->setMetadata( + "anvill.type", + this->type_specifier.EncodeToMetadata(decl.param.spec_type)); + + auto mem_ptr = remill::LoadMemoryPointer(bldr, this->intrinsics); + auto new_mem_ptr = StoreNativeValue( + loaded_var_val, decl.param, this->type_provider.Dictionary(), + this->intrinsics, bldr, into_state_ptr, mem_ptr); + bldr.SetInsertPoint(bldr.GetInsertBlock()); + + bldr.CreateStore(new_mem_ptr, + remill::LoadMemoryPointerRef(bldr.GetInsertBlock())); + } else { + // TODO(Ian): The assumption is we dont have live values split between the stack and a register for now... + // Maybe at some point we can just go ahead and store everything + CHECK(!HasRegLoc(decl.param)); + } + } + CHECK(bldr.GetInsertPoint() == blk->end()); +} + +// TODO(Ian): dependent on calling context we need fetch the memory and next program counter +// ref either from the args or from the parent func state +llvm::CallInst *BasicBlockLifter::CallBasicBlockFunction( + llvm::IRBuilder<> &builder, llvm::Value *parent_state, + llvm::Value *parent_stack, llvm::Value *memory_pointer) const { + + std::vector args(remill::kNumBlockArgs); + auto out_param_locals = builder.CreateAlloca(this->var_struct_ty); + args[0] = parent_stack; + + args[remill::kPCArgNum] = options.program_counter_init_procedure( + builder, this->address_type, block_def.addr); + args[remill::kMemoryPointerArgNum] = memory_pointer; + + AbstractStack stack( + builder.getContext(), {{decl.maximum_depth, parent_stack}}, + this->options.stack_frame_recovery_options.stack_grows_down, + decl.GetPointerDisplacement()); + PointerProvider ptr_provider = + [&builder, this, out_param_locals, + &stack](const ParameterDecl &repr_var) -> llvm::Value * { + DLOG(INFO) << "Lifting: " << repr_var.name << " for call"; + if (HasMemLoc(repr_var)) { + // TODO(Ian): the assumption here since we are able to build a single pointer here into the frame is that + // svars are single valuedecl contigous + CHECK(repr_var.ordered_locs.size() == 1); + auto stack_ptr = stack.PointerToStackMemberFromOffset( + builder, repr_var.ordered_locs[0].mem_offset); + if (stack_ptr) { + return *stack_ptr; + } else { + LOG(FATAL) + << "Unable to create a ptr to the stack, the stack is too small to represent the param."; + } + } + + // ok so this should be provide pointer from args in a way + // stack probably shouldnt be passed at all, if we dont have a loc + // then it's not live + return block_context->ProvidePointerFromStruct(builder, var_struct_ty, + out_param_locals, repr_var); + }; + + this->PackLiveValues(builder, parent_state, ptr_provider, + this->block_context->LiveBBParamsAtEntry()); + + for (auto ¶m : block_context->GetParams()) { + auto ptr = ptr_provider(param); + CHECK(ptr != nullptr); + args.push_back(ptr); + } + + auto retval = builder.CreateCall(bb_func, args); + retval->setTailCall(true); + + return retval; +} + +llvm::CallInst *BasicBlockLifter::ControlFlowCallBasicBlockFunction( + llvm::Function *caller, llvm::IRBuilder<> &builder, + llvm::Value *parent_state, llvm::Value *parent_stack, + llvm::Value *memory_pointer) const { + + std::vector args; + std::transform(caller->arg_begin(), caller->arg_end(), + std::back_inserter(args), + [](llvm::Argument &arg) -> llvm::Value * { return &arg; }); + + auto retval = builder.CreateCall(bb_func, args); + retval->setTailCall(true); + + return retval; +} + +BasicBlockLifter::BasicBlockLifter( + std::unique_ptr block_context, const FunctionDecl &decl, + CodeBlock block_def, const LifterOptions &options_, + llvm::Module *semantics_module, const TypeTranslator &type_specifier, + FunctionLifter &flifter) + : CodeLifter(options_, semantics_module, type_specifier), + block_context(std::move(block_context)), + block_def(std::move(block_def)), + decl(decl), + flifter(flifter) { + this->var_struct_ty = this->StructTypeFromVars(); + this->bb_func = this->DeclareBasicBlockFunction(); +} + +CallableBasicBlockFunction::CallableBasicBlockFunction( + llvm::Function *func, CodeBlock block, BasicBlockLifter bb_lifter) + : func(func), + block(block), + bb_lifter(std::move(bb_lifter)) {} + + +const CodeBlock &CallableBasicBlockFunction::GetBlock() const { + return this->block; +} + +llvm::Function *CallableBasicBlockFunction::GetFunction() const { + return this->func; +} + +} // namespace anvill diff --git a/lib/Lifters/BasicBlockLifter.h b/lib/Lifters/BasicBlockLifter.h new file mode 100644 index 000000000..4c574c798 --- /dev/null +++ b/lib/Lifters/BasicBlockLifter.h @@ -0,0 +1,175 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "CodeLifter.h" +#include "anvill/Declarations.h" + +namespace anvill { + +enum : size_t { + kNextPCArgNum = remill::kNumBlockArgs, + kShouldReturnArgNum, + kNumLiftedBasicBlockArgs +}; + +struct BasicBlockFunction { + llvm::Function *func; + llvm::Argument *pc_arg; + llvm::Argument *mem_ptr; + llvm::AllocaInst *next_pc_out; + llvm::Argument *stack; +}; + +class CallableBasicBlockFunction; + +/** + * @brief A BasicBlockLifter lifts a basic block as a native function that takes + * in scope variables and returns in scope variables (essentially an SSAed form of the entire block) + * In addition to variables a basic block also returns the successor of this block (if it exists, ie. function returns are terminating tail calls) to the caller given the input state. + */ +class BasicBlockLifter : public CodeLifter { + private: + std::unique_ptr block_context; + CodeBlock block_def; + + llvm::StructType *var_struct_ty{nullptr}; + + // The allocated state ptr for the function. + llvm::Value *state_ptr{nullptr}; + + llvm::Function *lifted_func{nullptr}; + + const FunctionDecl &decl; + + llvm::Function *bb_func{nullptr}; + + FunctionLifter &flifter; + + llvm::BasicBlock *invalid_successor_block{nullptr}; + + llvm::Function *DeclareBasicBlockFunction(); + + llvm::StructType *StructTypeFromVars() const; + + remill::DecodingContext ApplyContextAssignments( + const std::unordered_map &assignments, + remill::DecodingContext prev_context); + + remill::DecodingContext CreateDecodingContext(const CodeBlock &blk); + + + void ApplyTypeHint(llvm::IRBuilder<> &bldr, const ValueDecl &type_hint); + + void LiftInstructionsIntoLiftedFunction(); + + BasicBlockFunction CreateBasicBlockFunction(); + + void TerminateBasicBlockFunction(llvm::Function *caller, + llvm::IRBuilder<> &ir, llvm::Value *next_mem, + llvm::Value *should_return, + const BasicBlockFunction &bbfunc); + + bool ApplyInterProceduralControlFlowOverride(const remill::Instruction &insn, + llvm::BasicBlock *&block); + + bool + DoInterProceduralControlFlow(const remill::Instruction &insn, + llvm::BasicBlock *block, + const anvill::ControlFlowOverride &override); + + llvm::CallInst *AddCallFromBasicBlockFunctionToLifted( + llvm::BasicBlock *source_block, llvm::Function *dest_func, + const remill::IntrinsicTable &intrinsics, + llvm::Value *next_pc_hint = nullptr); + + std::pair + LoadFunctionReturnAddress(const remill::Instruction &inst, + llvm::BasicBlock *block); + + bool DecodeInstructionInto(const uint64_t addr, bool is_delayed, + remill::Instruction *inst_out, + remill::DecodingContext context); + + + llvm::MDNode *GetBasicBlockUidAnnotation(Uid uid) const; + + public: + BasicBlockLifter(std::unique_ptr block_context, + const FunctionDecl &decl, CodeBlock block_def, + const LifterOptions &options_, + llvm::Module *semantics_module, + const TypeTranslator &type_specifier, + FunctionLifter &flifter); + + + void LiftBasicBlockFunction(); + + + using PointerProvider = + std::function; + + + // Packs in scope variables into a struct + void PackLiveValues(llvm::IRBuilder<> &bldr, llvm::Value *from_state_ptr, + PointerProvider into_vars, + const std::vector &decls) const; + + void UnpackLiveValues(llvm::IRBuilder<> &, PointerProvider returned_value, + llvm::Value *into_state_ptr, + const std::vector &) const; + + + // Calls a basic block function and unpacks the result into the state + llvm::CallInst *CallBasicBlockFunction(llvm::IRBuilder<> &, + llvm::Value *state_ptr, + llvm::Value *parent_stack, + llvm::Value *memory_pointer) const; + + llvm::CallInst *ControlFlowCallBasicBlockFunction( + llvm::Function *caller, llvm::IRBuilder<> &, llvm::Value *state_ptr, + llvm::Value *parent_stack, llvm::Value *memory_pointer) const; + + BasicBlockLifter(BasicBlockLifter &&) = default; +}; + +class CallableBasicBlockFunction { + + private: + llvm::Function *func; + CodeBlock block; + BasicBlockLifter bb_lifter; + + + public: + CallableBasicBlockFunction(llvm::Function *func, CodeBlock block, + BasicBlockLifter bb_lifter); + + llvm::Function *GetFunction() const; + + llvm::StructType *GetRetType() const; + + const CodeBlock &GetBlock() const; + + // Calls a basic block function and unpacks the result into the state + void CallBasicBlockFunction(llvm::IRBuilder<> &, llvm::Value *state_ptr, + llvm::Value *stack_ptr) const; +}; + + +} // namespace anvill \ No newline at end of file diff --git a/lib/Lifters/CodeLifter.cpp b/lib/Lifters/CodeLifter.cpp new file mode 100644 index 000000000..1a3b0309f --- /dev/null +++ b/lib/Lifters/CodeLifter.cpp @@ -0,0 +1,352 @@ +#include "CodeLifter.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "anvill/Declarations.h" + +namespace anvill { +namespace { +// Clear out LLVM variable names. They're usually not helpful. +static void ClearVariableNames(llvm::Function *func) { + for (auto &block : *func) { + // block.setName(llvm::Twine::createNull()); + for (auto &inst : block) { + if (inst.hasName()) { + inst.setName(llvm::Twine::createNull()); + } + } + } +} +} // namespace + + +CodeLifter::CodeLifter(const LifterOptions &options, + llvm::Module *semantics_module, + const TypeTranslator &type_specifier) + : options(options), + semantics_module(semantics_module), + intrinsics(semantics_module), + llvm_context(semantics_module->getContext()), + op_lifter(options.arch->DefaultLifter(intrinsics)), + is_sparc(options.arch->IsSPARC32() || options.arch->IsSPARC64()), + is_x86_or_amd64(options.arch->IsX86() || options.arch->IsAMD64()), + pc_reg(options.arch->RegisterByName( + options.arch->ProgramCounterRegisterName())), + sp_reg(options.arch->RegisterByName( + options.arch->StackPointerRegisterName())), + memory_provider(options.memory_provider), + type_provider(options.type_provider), + type_specifier(type_specifier), + address_type( + llvm::Type::getIntNTy(llvm_context, options.arch->address_size)), + uid_type(llvm::Type::getInt64Ty(llvm_context)), + i8_type(llvm::Type::getInt8Ty(llvm_context)), + i8_zero(llvm::Constant::getNullValue(i8_type)), + i32_type(llvm::Type::getInt32Ty(llvm_context)), + mem_ptr_type( + llvm::dyn_cast(remill::RecontextualizeType( + options.arch->MemoryPointerType(), llvm_context))), + state_ptr_type( + llvm::dyn_cast(remill::RecontextualizeType( + options.arch->StatePointerType(), llvm_context))), + pc_reg_type(pc_reg->type) { + if (options.pc_metadata_name) { + pc_annotation_id = llvm_context.getMDKindID(options.pc_metadata_name); + } +} + +// Perform architecture-specific initialization of the state structure +// in `block`. +void CodeLifter::ArchSpecificStateStructureInitialization( + llvm::BasicBlock *block, llvm::Value *new_state_ptr) { + + if (is_x86_or_amd64) { + llvm::IRBuilder<> ir(block); + + const auto ssbase_reg = options.arch->RegisterByName("SSBASE"); + const auto fsbase_reg = options.arch->RegisterByName("FSBASE"); + const auto gsbase_reg = options.arch->RegisterByName("GSBASE"); + const auto dsbase_reg = options.arch->RegisterByName("DSBASE"); + const auto esbase_reg = options.arch->RegisterByName("ESBASE"); + const auto csbase_reg = options.arch->RegisterByName("CSBASE"); + + if (gsbase_reg) { + const auto gsbase_val = llvm::ConstantExpr::getPtrToInt( + llvm::ConstantExpr::getAddrSpaceCast( + llvm::ConstantExpr::getNullValue( + llvm::PointerType::get(block->getContext(), 256)), + llvm::PointerType::get(block->getContext(), 0)), + pc_reg_type); + ir.CreateStore(gsbase_val, gsbase_reg->AddressOf(new_state_ptr, ir)); + } + + if (fsbase_reg) { + const auto fsbase_val = llvm::ConstantExpr::getPtrToInt( + llvm::ConstantExpr::getAddrSpaceCast( + llvm::ConstantExpr::getNullValue( + llvm::PointerType::get(block->getContext(), 257)), + llvm::PointerType::get(block->getContext(), 0)), + pc_reg_type); + ir.CreateStore(fsbase_val, fsbase_reg->AddressOf(new_state_ptr, ir)); + } + + if (ssbase_reg) { + ir.CreateStore(llvm::Constant::getNullValue(pc_reg_type), + ssbase_reg->AddressOf(new_state_ptr, ir)); + } + + if (dsbase_reg) { + ir.CreateStore(llvm::Constant::getNullValue(pc_reg_type), + dsbase_reg->AddressOf(new_state_ptr, ir)); + } + + if (esbase_reg) { + ir.CreateStore(llvm::Constant::getNullValue(pc_reg_type), + esbase_reg->AddressOf(new_state_ptr, ir)); + } + + if (csbase_reg) { + ir.CreateStore(llvm::Constant::getNullValue(pc_reg_type), + csbase_reg->AddressOf(new_state_ptr, ir)); + } + } +} + + +// Initialize the state structure with default values, loaded from global +// variables. The purpose of these global variables is to show that there are +// some unmodelled external dependencies inside of a lifted function. +void CodeLifter::InitializeStateStructureFromGlobalRegisterVariables( + llvm::BasicBlock *block, llvm::Value *state_ptr) { + + // Get or create globals for all top-level registers. The idea here is that + // the spec could feasibly miss some dependencies, and so after optimization, + // we'll be able to observe uses of `__anvill_reg_*` globals, and handle + // them appropriately. + + llvm::IRBuilder<> ir(block); + + options.arch->ForEachRegister([=, &ir](const remill::Register *reg_) { + if (auto reg = reg_->EnclosingRegister(); + reg_ == reg && reg != sp_reg && reg != pc_reg) { + + std::stringstream ss; + ss << kUnmodelledRegisterPrefix << reg->name; + const auto reg_name = ss.str(); + + auto reg_global = semantics_module->getGlobalVariable(reg_name); + if (!reg_global) { + reg_global = new llvm::GlobalVariable( + *semantics_module, reg->type, false, + llvm::GlobalValue::ExternalLinkage, nullptr, reg_name); + } + + const auto reg_ptr = reg->AddressOf(state_ptr, block); + ir.CreateStore(ir.CreateLoad(reg->type, reg_global), reg_ptr); + } + }); +} + +llvm::Function *CodeLifter::GetTypeHintFunction() { + const auto &func_name = kTypeHintFunctionPrefix; + + auto func = semantics_module->getFunction(func_name); + if (func != nullptr) { + return func; + } + + auto ptr = llvm::PointerType::get(this->semantics_module->getContext(), 0); + llvm::Type *func_parameters[] = {ptr}; + + auto func_type = llvm::FunctionType::get(ptr, func_parameters, false); + + func = llvm::Function::Create(func_type, llvm::GlobalValue::ExternalLinkage, + func_name, this->semantics_module); + + return func; +} + +llvm::MDNode *CodeLifter::GetAddrAnnotation(uint64_t addr, + llvm::LLVMContext &context) const { + auto pc_val = llvm::ConstantInt::get( + remill::RecontextualizeType(address_type, context), addr); + auto pc_md = llvm::ValueAsMetadata::get(pc_val); + return llvm::MDNode::get(context, pc_md); +} + +llvm::MDNode *CodeLifter::GetUidAnnotation(Uid uid, + llvm::LLVMContext &context) const { + auto uid_val = llvm::ConstantInt::get( + remill::RecontextualizeType(uid_type, context), uid.value); + auto uid_md = llvm::ValueAsMetadata::get(uid_val); + return llvm::MDNode::get(context, uid_md); +} + +// Allocate and initialize the state structure. +llvm::Value * +CodeLifter::AllocateAndInitializeStateStructure(llvm::BasicBlock *block, + const remill::Arch *arch) { + llvm::IRBuilder<> ir(block); + const auto state_type = arch->StateStructType(); + llvm::Value *new_state_ptr = nullptr; + + switch (options.state_struct_init_procedure) { + case StateStructureInitializationProcedure::kNone: + new_state_ptr = ir.CreateAlloca(state_type); + break; + case StateStructureInitializationProcedure::kZeroes: + new_state_ptr = ir.CreateAlloca(state_type); + ir.CreateStore(llvm::Constant::getNullValue(state_type), new_state_ptr); + break; + case StateStructureInitializationProcedure::kUndef: + new_state_ptr = ir.CreateAlloca(state_type); + ir.CreateStore(llvm::UndefValue::get(state_type), new_state_ptr); + break; + case StateStructureInitializationProcedure::kGlobalRegisterVariables: + new_state_ptr = ir.CreateAlloca(state_type); + InitializeStateStructureFromGlobalRegisterVariables(block, new_state_ptr); + break; + case StateStructureInitializationProcedure:: + kGlobalRegisterVariablesAndZeroes: + new_state_ptr = ir.CreateAlloca(state_type); + ir.CreateStore(llvm::Constant::getNullValue(state_type), new_state_ptr); + InitializeStateStructureFromGlobalRegisterVariables(block, new_state_ptr); + break; + case StateStructureInitializationProcedure:: + kGlobalRegisterVariablesAndUndef: + new_state_ptr = ir.CreateAlloca(state_type); + ir.CreateStore(llvm::UndefValue::get(state_type), new_state_ptr); + InitializeStateStructureFromGlobalRegisterVariables(block, new_state_ptr); + break; + } + + ArchSpecificStateStructureInitialization(block, new_state_ptr); + return new_state_ptr; +} + +void CodeLifter::RecursivelyInlineFunctionCallees(llvm::Function *inf) { + std::vector calls_to_inline; + + // Set of instructions that we should not annotate because we can't tie them + // to a particular instruction address. + std::unordered_set insts_without_provenance; + if (options.pc_metadata_name) { + for (auto &inst : llvm::instructions(*inf)) { + if (!inst.getMetadata(pc_annotation_id)) { + insts_without_provenance.insert(&inst); + } + } + } + + for (auto changed = true; changed; changed = !calls_to_inline.empty()) { + calls_to_inline.clear(); + + for (auto &inst : llvm::instructions(*inf)) { + if (auto call_inst = llvm::dyn_cast(&inst); call_inst) { + if (auto called_func = call_inst->getCalledFunction(); + called_func && !called_func->isDeclaration() && + !called_func->hasFnAttribute(llvm::Attribute::NoInline)) { + calls_to_inline.push_back(call_inst); + } + } + } + + for (llvm::CallInst *call_inst : calls_to_inline) { + llvm::MDNode *call_pc = nullptr; + if (options.pc_metadata_name) { + call_pc = call_inst->getMetadata(pc_annotation_id); + } + + llvm::InlineFunctionInfo info; + auto res = llvm::InlineFunction(*call_inst, info); + + CHECK(res.isSuccess()); + + // Propagate PC metadata from call sites into inlined call bodies. + if (options.pc_metadata_name) { + for (auto &inst : llvm::instructions(*inf)) { + if (!inst.getMetadata(pc_annotation_id)) { + if (insts_without_provenance.count(&inst)) { + continue; + + // This call site had no associated PC metadata, and so we want + // to exclude any inlined code from accidentally being associated + // with other PCs on future passes. + } else if (!call_pc) { + insts_without_provenance.insert(&inst); + + // We can propagate the annotation. + } else { + inst.setMetadata(pc_annotation_id, call_pc); + } + } + } + } + } + } + + // Initialize cleanup optimizations + + + DCHECK(!llvm::verifyFunction(*inf, &llvm::errs())); + + llvm::ModuleAnalysisManager mam; + llvm::FunctionAnalysisManager fam; + llvm::LoopAnalysisManager lam; + llvm::CGSCCAnalysisManager cam; + + llvm::ModulePassManager mpm; + llvm::FunctionPassManager fpm; + + + llvm::PassBuilder pb; + pb.registerModuleAnalyses(mam); + pb.registerFunctionAnalyses(fam); + pb.registerLoopAnalyses(lam); + pb.registerCGSCCAnalyses(cam); + pb.crossRegisterProxies(lam, fam, cam, mam); + + fpm.addPass(llvm::SimplifyCFGPass()); + fpm.addPass(llvm::PromotePass()); + fpm.addPass(llvm::ReassociatePass()); + fpm.addPass(llvm::DSEPass()); + fpm.addPass(llvm::DCEPass()); + fpm.addPass(llvm::SROAPass(llvm::SROAOptions::ModifyCFG)); + fpm.addPass(llvm::DCEPass()); + fpm.addPass(llvm::InstCombinePass()); + + fpm.run(*inf, fam); + + mam.clear(); + fam.clear(); + lam.clear(); + cam.clear(); + ClearVariableNames(inf); +} + +} // namespace anvill diff --git a/lib/Lifters/CodeLifter.h b/lib/Lifters/CodeLifter.h new file mode 100644 index 000000000..2df69cf6f --- /dev/null +++ b/lib/Lifters/CodeLifter.h @@ -0,0 +1,94 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "anvill/Declarations.h" +#include "anvill/Lifters.h" + +namespace anvill { +/** + * @brief A class that lifts machine level semantics to llvm + * + */ +class CodeLifter { + protected: + const LifterOptions &options; + + // Remill intrinsics inside of `module`. + + + llvm::Module *semantics_module; + + remill::IntrinsicTable intrinsics; + + llvm::LLVMContext &llvm_context; + + remill::OperandLifter::OpLifterPtr op_lifter; + + + // Are we lifting SPARC code? This affects whether or not we need to do + // double checking on function return addresses; + const bool is_sparc; + + // Are we lifting x86(-64) code? + const bool is_x86_or_amd64; + // Specification counter and stack pointer registers. + const remill::Register *const pc_reg; + const remill::Register *const sp_reg; + + + const MemoryProvider &memory_provider; + const TypeProvider &type_provider; + const TypeTranslator &type_specifier; + llvm::IntegerType *const address_type; + llvm::IntegerType *const uid_type; + + + // Convenient to keep around. + llvm::Type *const i8_type; + llvm::Constant *const i8_zero; + llvm::Type *const i32_type; + llvm::PointerType *const mem_ptr_type; + llvm::PointerType *const state_ptr_type; + + llvm::Type *const pc_reg_type{nullptr}; + + + void RecursivelyInlineFunctionCallees(llvm::Function *inf); + + // Allocate and initialize the state structure. + llvm::Value *AllocateAndInitializeStateStructure(llvm::BasicBlock *block, + const remill::Arch *arch); + + + void + InitializeStateStructureFromGlobalRegisterVariables(llvm::BasicBlock *block, + llvm::Value *state_ptr); + + void ArchSpecificStateStructureInitialization(llvm::BasicBlock *block, + llvm::Value *new_state_ptr); + + unsigned pc_annotation_id; + + + llvm::Function *GetTypeHintFunction(); + + llvm::MDNode *GetAddrAnnotation(uint64_t addr, + llvm::LLVMContext &context) const; + llvm::MDNode *GetUidAnnotation(Uid uid, + llvm::LLVMContext &context) const; + + public: + CodeLifter(const LifterOptions &options, llvm::Module *semantics_module, + const TypeTranslator &type_specifier); + + + CodeLifter(CodeLifter &&) = default; +}; + +} // namespace anvill \ No newline at end of file diff --git a/lib/Lifters/DataLifter.cpp b/lib/Lifters/DataLifter.cpp index dd665aa64..202b4b0bd 100644 --- a/lib/Lifters/DataLifter.cpp +++ b/lib/Lifters/DataLifter.cpp @@ -180,24 +180,27 @@ llvm::Constant *DataLifter::LiftData(const VariableDecl &decl, << std::dec; break; } - bytes.push_back(byte); } } - if (bytes_accessable) { - value = lifter_context.value_lifter.Lift( - std::string_view(reinterpret_cast(bytes.data()), bytes.size()), - type, lifter_context, decl.address); - } - - auto is_constant = first_byte_perms == BytePermission::kReadable || first_byte_perms == BytePermission::kReadableExecutable; - return new llvm::GlobalVariable(*options.module, type, is_constant, - llvm::GlobalValue::ExternalLinkage, value, - var_name); + auto md = type_specifier.EncodeToMetadata(decl.spec_type); + auto gvar = new llvm::GlobalVariable(*options.module, type, is_constant, + llvm::GlobalValue::ExternalLinkage, + nullptr, var_name); + gvar->setMetadata("anvill.type", md); + lifter_context.AddEntity(gvar, decl.address); + + if (bytes_accessable) { + value = lifter_context.value_lifter.Lift(bytes, type, lifter_context, + decl.address); + } + gvar->setInitializer(value); + + return gvar; } // Declare a lifted a variable. Will return `nullptr` if the memory is diff --git a/lib/Lifters/EntityLifter.cpp b/lib/Lifters/EntityLifter.cpp index b88fea994..900b0ef82 100644 --- a/lib/Lifters/EntityLifter.cpp +++ b/lib/Lifters/EntityLifter.cpp @@ -8,19 +8,22 @@ #include "EntityLifter.h" -#include #include #include #include #include +#include #include #include +#include #include #include #include #include +#include "Lifters/FunctionLifter.h" + namespace anvill { EntityLifterImpl::~EntityLifterImpl(void) {} @@ -30,10 +33,18 @@ EntityLifterImpl::EntityLifterImpl(const LifterOptions &options_) memory_provider(&(options.memory_provider)), type_provider(&(options.type_provider)), value_lifter(options), - function_lifter(options), + function_lifter(FunctionLifter::CreateFunctionLifter(options_)), data_lifter(options) { CHECK_EQ(options.arch->context, &(options.module->getContext())); options.arch->PrepareModule(options.module); + + // Lift named types + for (auto sty : this->type_provider->NamedTypes()) { + auto gv = new llvm::GlobalVariable(*options.module, sty, false, + llvm::GlobalValue::ExternalLinkage, + nullptr, sty->getName() + "_var_repr"); + llvm::appendToUsed(*options.module, gv); + } } // Tells the entity lifter that `entity` is the lifted function/data at @@ -79,8 +90,7 @@ void EntityLifterImpl::ForEachEntityAtAddress( EntityLifter::~EntityLifter(void) {} -EntityLifter::EntityLifter( - const LifterOptions &options_) +EntityLifter::EntityLifter(const LifterOptions &options_) : impl(std::make_shared(options_)) {} // Assuming that `entity` is an entity that was lifted by this `EntityLifter`, diff --git a/lib/Lifters/FunctionLifter.cpp b/lib/Lifters/FunctionLifter.cpp index 26e5b4ae9..d601aebf1 100644 --- a/lib/Lifters/FunctionLifter.cpp +++ b/lib/Lifters/FunctionLifter.cpp @@ -9,12 +9,20 @@ #include "FunctionLifter.h" #include +#include #include #include #include #include #include +#include +#include +#include +#include +#include +#include #include +#include #include #include #include @@ -23,43 +31,47 @@ #include #include #include +#include #include #include +#include #include #include +#include +#include #include #include #include +#include #include #include +#include #include +#include #include #include #include #include +#include +#include +#include +#include #include +#include #include #include #include +#include +#include "BasicBlockLifter.h" #include "EntityLifter.h" +#include "anvill/Declarations.h" +#include "anvill/Specification.h" namespace anvill { namespace { -// Clear out LLVM variable names. They're usually not helpful. -static void ClearVariableNames(llvm::Function *func) { - for (auto &block : *func) { - // block.setName(llvm::Twine::createNull()); - for (auto &inst : block) { - if (inst.hasName()) { - inst.setName(llvm::Twine::createNull()); - } - } - } -} - // A function that ensures that the memory pointer escapes, and thus none of // the memory writes at the end of a function are lost. @@ -81,44 +93,6 @@ GetMemoryEscapeFunc(const remill::IntrinsicTable &intrinsics) { kMemoryPointerEscapeFunction.data(), module); } -// We're calling a remill intrinsic and we want to "mute" the escape of the -// `State` pointer by replacing it with an `undef` value. This permits -// optimizations while allowing us to still observe what reaches the `pc` -// argument of the intrinsic. This is valuable for function return intrinsics, -// because it lets us verify that the value that we initialize into the return -// address location actually reaches the `pc` parameter of the -// `__remill_function_return`. -static void MuteStateEscape(llvm::CallInst *call) { - auto state_ptr_arg = call->getArgOperand(remill::kStatePointerArgNum); - auto undef_val = llvm::UndefValue::get(state_ptr_arg->getType()); - call->setArgOperand(remill::kStatePointerArgNum, undef_val); -} - -// This returns a special anvill built-in used to describe jumps tables -// inside lifted code; It takes the address type to generate the function -// parameters of correct type. -static llvm::Function *GetAnvillSwitchFunc(llvm::Module &module, - llvm::Type *type) { - - const auto &func_name = kAnvillSwitchCompleteFunc; - - auto func = module.getFunction(func_name); - if (func != nullptr) { - return func; - } - - llvm::Type *func_parameters[] = {type}; - - auto func_type = llvm::FunctionType::get(type, func_parameters, true); - - func = llvm::Function::Create(func_type, llvm::GlobalValue::ExternalLinkage, - func_name, module); - - func->addFnAttr(llvm::Attribute::ReadNone); - - return func; -} - // Annotate and instruction with the `id` annotation if that instruction // is unannotated. static void AnnotateInstruction(llvm::Instruction *inst, unsigned id, @@ -128,14 +102,6 @@ static void AnnotateInstruction(llvm::Instruction *inst, unsigned id, } } -static void AnnotateInstruction(llvm::Value *val, unsigned id, - llvm::MDNode *annot) { - if (auto inst = llvm::dyn_cast(val)) { - if (!inst->getMetadata(id)) { - inst->setMetadata(id, annot); - } - } -} // Annotate and instruction with the `id` annotation if that instruction // is unannotated. @@ -152,151 +118,22 @@ static void AnnotateInstructions(llvm::BasicBlock *block, unsigned id, FunctionLifter::~FunctionLifter(void) {} -FunctionLifter::FunctionLifter(const LifterOptions &options_) - : options(options_), - memory_provider(options.memory_provider), - type_provider(options.type_provider), - type_specifier(options.TypeDictionary(), options.arch), - semantics_module(remill::LoadArchSemantics(options.arch)), - llvm_context(semantics_module->getContext()), - intrinsics(semantics_module.get()), - op_lifter(options.arch->DefaultLifter(intrinsics)), - pc_reg(options.arch - ->RegisterByName(options.arch->ProgramCounterRegisterName()) - ->EnclosingRegister()), - sp_reg( - options.arch->RegisterByName(options.arch->StackPointerRegisterName()) - ->EnclosingRegister()), - is_sparc(options.arch->IsSPARC32() || options.arch->IsSPARC64()), - is_x86_or_amd64(options.arch->IsX86() || options.arch->IsAMD64()), - i8_type(llvm::Type::getInt8Ty(llvm_context)), - i8_zero(llvm::Constant::getNullValue(i8_type)), - i32_type(llvm::Type::getInt32Ty(llvm_context)), - mem_ptr_type( - llvm::dyn_cast(remill::RecontextualizeType( - options.arch->MemoryPointerType(), llvm_context))), - state_ptr_type( - llvm::dyn_cast(remill::RecontextualizeType( - options.arch->StatePointerType(), llvm_context))), - address_type( - llvm::Type::getIntNTy(llvm_context, options.arch->address_size)), - pc_reg_type(pc_reg->type) { - if (options.pc_metadata_name) { - pc_annotation_id = llvm_context.getMDKindID(options.pc_metadata_name); - } +FunctionLifter +FunctionLifter::CreateFunctionLifter(const LifterOptions &options_) { + return FunctionLifter(options_, remill::LoadArchSemantics(options_.arch)); } -llvm::BranchInst * -FunctionLifter::BranchToInst(uint64_t from_addr, uint64_t to_addr, - const remill::DecodingContext &mapper, - llvm::BasicBlock *from_block) { - auto br = llvm::BranchInst::Create( - GetOrCreateBlock(from_addr, to_addr, mapper), from_block); - AnnotateInstruction(br, pc_annotation_id, pc_annotation); - return br; -} - -// Helper to get the basic block to contain the instruction at `addr`. This -// function drives a work list, where the first time we ask for the -// instruction at `addr`, we enqueue a bit of work to decode and lift that -// instruction. -llvm::BasicBlock * -FunctionLifter::GetOrCreateBlock(uint64_t from_addr, uint64_t to_addr, - const remill::DecodingContext &mapper) { - auto &block = edge_to_dest_block[{from_addr, to_addr}]; - if (block) { - return block; - } - - std::stringstream ss; - ss << "inst_" << std::hex << to_addr; - block = llvm::BasicBlock::Create(llvm_context, ss.str(), lifted_func); - - // NOTE(pag): We always add to the work list without consulting/updating - // `addr_to_block` so that we can observe self-tail-calls and - // lift them as such, rather than as jumps back into the first - // lifted block. - edge_work_list.emplace(to_addr, from_addr); - this->decoding_contexts.emplace(std::make_pair(to_addr, from_addr), mapper); - return block; -} - -llvm::BasicBlock * -FunctionLifter::GetOrCreateTargetBlock(const remill::Instruction &from_inst, - uint64_t to_addr, - const remill::DecodingContext &mapper) { - return GetOrCreateBlock(from_inst.pc, to_addr, mapper); -} - -// Try to decode an instruction at address `addr` into `*inst_out`. Returns -// the context map of the decoded instruction if successful and std::nullopt otherwise. `is_delayed` tells the decoder -// whether or not the instruction being decoded is being decoded inside of a -// delay slot of another instruction. -bool FunctionLifter::DecodeInstructionInto(const uint64_t addr, bool is_delayed, - remill::Instruction *inst_out, - remill::DecodingContext context) { - static const auto max_inst_size = options.arch->MaxInstructionSize(context); - inst_out->Reset(); - - // Read the maximum number of bytes possible for instructions on this - // architecture. For x86(-64), this is 15 bytes, whereas for fixed-width - // architectures like AArch32/AArch64 and SPARC32/SPARC64, this is 4 bytes. - inst_out->bytes.reserve(max_inst_size); - - auto accumulate_inst_byte = [=](auto byte, auto accessible, auto perms) { - switch (accessible) { - case ByteAvailability::kUnknown: - case ByteAvailability::kUnavailable: return false; - default: - switch (perms) { - case BytePermission::kUnknown: - case BytePermission::kReadableExecutable: - case BytePermission::kReadableWritableExecutable: - inst_out->bytes.push_back(static_cast(byte)); - return true; - case BytePermission::kReadable: - case BytePermission::kReadableWritable: return false; - } - } - }; - - for (auto i = 0u; i < max_inst_size; ++i) { - if (!std::apply(accumulate_inst_byte, memory_provider.Query(addr + i))) { - break; - } - } - - if (is_delayed) { - return options.arch->DecodeDelayedInstruction( - addr, inst_out->bytes, *inst_out, std::move(context)); - } else { - DLOG(INFO) << "Ops emplace: " << inst_out->operands.size(); - return options.arch->DecodeInstruction(addr, inst_out->bytes, *inst_out, - std::move(context)); - } -} +FunctionLifter::FunctionLifter(const LifterOptions &options_, + std::unique_ptr semantics_module) + : CodeLifter(options_, semantics_module.get(), this->type_specifier), + semantics_module(std::move(semantics_module)), + type_specifier(options_.TypeDictionary(), options_.arch) { -// Visit an invalid instruction. An invalid instruction is a sequence of -// bytes which cannot be decoded, or an empty byte sequence. -void FunctionLifter::VisitInvalid(const remill::Instruction &inst, - llvm::BasicBlock *block) { - MuteStateEscape( - remill::AddTerminatingTailCall(block, intrinsics.error, intrinsics)); -} -// Visit an error instruction. An error instruction is guaranteed to trap -// execution somehow, e.g. `ud2` on x86. Error instructions are treated -// similarly to invalid instructions, with the exception that they can have -// delay slots, and therefore the subsequent instruction may actually execute -// prior to the error. -void FunctionLifter::VisitError( - const remill::Instruction &inst, - std::optional &delayed_inst, llvm::BasicBlock *block) { - VisitDelayedInstruction(inst, delayed_inst, block, true); - MuteStateEscape( - remill::AddTerminatingTailCall(block, intrinsics.error, intrinsics)); + anvill::CloneIntrinsicsFromModule(*this->semantics_module, + *this->options.module); } void FunctionLifter::InsertError(llvm::BasicBlock *block) { @@ -307,791 +144,6 @@ void FunctionLifter::InsertError(llvm::BasicBlock *block) { AnnotateInstruction(tail, pc_annotation_id, pc_annotation); } -// Visit a normal instruction. Normal instructions have straight line control- -// flow semantics, i.e. after executing the instruction, execution proceeds -// to the next instruction (`inst.next_pc`). -void FunctionLifter::VisitNormal( - const remill::Instruction &inst, llvm::BasicBlock *block, - const remill::Instruction::NormalInsn &mapper) { - auto cf = options.control_flow_provider.GetControlFlowOverride(inst.pc); - bool stop = false; - - if (std::holds_alternative(cf)) { - auto spec = std::get(cf); - stop = spec.stop; - } else if (!std::holds_alternative(cf)) { - LOG(ERROR) - << "Found invalid control flow override for normal instruction at " - << std::hex << inst.pc; - } - - if (stop) { - InsertError(block); - } else { - llvm::BranchInst::Create( - GetOrCreateTargetBlock(inst, inst.next_pc, - mapper.fallthrough.fallthrough_context), - block); - } -} - -// Visit a no-op instruction. These behave identically to normal instructions -// from a control-flow perspective. -void FunctionLifter::VisitNoOp(const remill::Instruction &inst, - llvm::BasicBlock *block, - const remill::Instruction::NoOp &mapper) { - VisitNormal(inst, block, mapper.fallthrough); -} - -// Visit a direct jump control-flow instruction. The target of the jump is -// known at decode time, and the target address is available in -// `inst.branch_taken_pc`. Execution thus needs to transfer to the instruction -// (and thus `llvm::BasicBlock`) associated with `inst.branch_taken_pc`. -void FunctionLifter::VisitDirectJump( - const remill::Instruction &inst, - std::optional &delayed_inst, llvm::BasicBlock *block, - const remill::Instruction::DirectJump &mapper) { - auto cf = options.control_flow_provider.GetControlFlowOverride(inst.pc); - if (std::holds_alternative(cf)) { - auto jmp_spec = std::get(cf); - - if (jmp_spec.targets.size() != 1) { - LOG(FATAL) << "Invalid number of targets for direct jump at " << std::hex - << inst.pc; - } - - CHECK_EQ(mapper.taken_flow.known_target, jmp_spec.targets[0].address) - << "Spec and remill don't agree on jump target at " << std::hex - << inst.pc; - VisitDelayedInstruction(inst, delayed_inst, block, true); - llvm::BranchInst::Create( - GetOrCreateTargetBlock(inst, mapper.taken_flow.known_target, - mapper.taken_flow.static_context), - block); - } else if (std::holds_alternative(cf)) { - VisitDelayedInstruction(inst, delayed_inst, block, true); - CallFunction(inst, block, inst.branch_taken_pc); - InsertError(block); - } else { - LOG(FATAL) << "Invalid spec for direct jump at " << std::hex << inst.pc; - } -} - -remill::DecodingContext FunctionLifter::ApplyTargetList( - const std::unordered_map &assignments, - remill::DecodingContext prev_context) { - for (const auto &[k, v] : assignments) { - prev_context.UpdateContextReg(k, v); - } - return prev_context; -} - -// Visit an indirect jump that is a jump table. -void FunctionLifter::DoSwitchBasedIndirectJump( - const remill::Instruction &inst, llvm::BasicBlock *block, - const std::vector &target_list, - const remill::Instruction::IndirectJump &mapper, - const remill::DecodingContext &prev_context) { - - auto add_remill_jump{true}; - llvm::BasicBlock *current_bb = block; - - // This is a list of the possibilities we want to cover: - // - // 1. No target: AddTerminatingTailCall - // 2. Single target, complete: normal jump - // 3. Multiple targets, complete: switch with no default case - // 4. Single or multiple targets, not complete: switch with default case - // containing AddTerminatingTailCall - - - // If the target list is complete and has only one destination, then we - // can handle it as normal jump - if (target_list.size() == 1U) { - add_remill_jump = false; - - auto destination = target_list[0]; - - - llvm::BranchInst::Create( - GetOrCreateTargetBlock( - inst, destination.address, - this->ApplyTargetList(destination.context_assignments, - prev_context)), - block); - - // We have multiple destinations. Handle this with a switch. If the target - // list is not marked as complete, then we'll still add __remill_jump - // inside the default block - } else { - llvm::BasicBlock *default_case{nullptr}; - - // Create a default case that is not reachable - add_remill_jump = false; - default_case = llvm::BasicBlock::Create(llvm_context, "", lifted_func); - - llvm::IRBuilder<> builder(default_case); - builder.CreateUnreachable(); - - // Create the parameters for the special anvill switch - auto pc = this->op_lifter->LoadRegValue( - block, state_ptr, options.arch->ProgramCounterRegisterName()); - - std::vector switch_parameters; - switch_parameters.push_back(pc); - - for (auto destination : target_list) { - switch_parameters.push_back( - llvm::ConstantInt::get(pc_reg->type, destination.address)); - } - - // Invoke the anvill switch - auto &module = *block->getModule(); - auto anvill_switch_func = GetAnvillSwitchFunc(module, address_type); - - llvm::IRBuilder<> ir(block); - auto next_pc = ir.CreateCall(anvill_switch_func, switch_parameters); - - // Now use the anvill switch output with a SwitchInst, mapping cases - // by index - auto dest_count = target_list.size(); - auto switch_inst = ir.CreateSwitch(next_pc, default_case, dest_count); - auto dest_id{0u}; - - for (auto dest : target_list) { - auto dest_block = GetOrCreateTargetBlock( - inst, dest.address, - this->ApplyTargetList(dest.context_assignments, prev_context)); - auto dest_case = llvm::ConstantInt::get(address_type, dest_id++); - switch_inst->addCase(dest_case, dest_block); - } - - AnnotateInstruction(next_pc, pc_annotation_id, pc_annotation); - AnnotateInstruction(switch_inst, pc_annotation_id, pc_annotation); - } - - if (add_remill_jump) { - - // Either we didn't find any target list from the control flow provider, or - // we did but it wasn't marked as `complete`. - auto jump = - remill::AddTerminatingTailCall(current_bb, intrinsics.jump, intrinsics); - AnnotateInstruction(jump, pc_annotation_id, pc_annotation); - } -} - -// Visit an indirect jump control-flow instruction. This may be register- or -// memory-indirect, e.g. `jmp rax` or `jmp [rax]` on x86. Thus, the target is -// not know a priori and our default mechanism for handling this is to perform -// a tail-call to the `__remill_jump` function, whose role is to be a stand-in -// something that enacts the effect of "transfer to target." -void FunctionLifter::VisitIndirectJump( - const remill::Instruction &inst, - std::optional &delayed_inst, llvm::BasicBlock *block, - const remill::Instruction::IndirectJump &mapper, - const remill::DecodingContext &prev_context) { - auto cf = options.control_flow_provider.GetControlFlowOverride(inst.pc); - if (std::holds_alternative(cf)) { - auto jmp_spec = std::get(cf); - - VisitDelayedInstruction(inst, delayed_inst, block, true); - - // Try to get the target type given the source. This is like a tail-call, - // e.g. `jmp [fseek]`. - if (auto maybe_decl = - type_provider.TryGetCalledFunctionType(func_address, inst)) { - llvm::IRBuilder<> ir(block); - llvm::Value *dest_addr = ir.CreateLoad(pc_reg_type, pc_reg_ref); - AnnotateInstruction(dest_addr, pc_annotation_id, pc_annotation); - auto new_mem_ptr = - CallCallableDecl(block, dest_addr, std::move(maybe_decl.value())); - ir.CreateRet(new_mem_ptr); - - // Attempt to get the target list for this control flow instruction - // so that we can handle this jump in a less generic way. - } else if (jmp_spec.targets.size() > 0) { - - DoSwitchBasedIndirectJump(inst, block, jmp_spec.targets, mapper, - prev_context); - - // No good info; do an indirect jump. - } else { - auto jump = - remill::AddTerminatingTailCall(block, intrinsics.jump, intrinsics); - AnnotateInstruction(jump, pc_annotation_id, pc_annotation); - } - } else if (std::holds_alternative(cf)) { - VisitDelayedInstruction(inst, delayed_inst, block, true); - CallFunction(inst, block, std::nullopt); - InsertError(block); - } else if (std::holds_alternative(cf)) { - // TODO(Ian): It feels like we should be able to handle overrides/control flow much more uniformally, I think it would be good to do so in - // a separate PR. - this->VisitFunctionReturn(inst, delayed_inst, block); - } else { - LOG(FATAL) << "Invalid spec for indirect jump at " << std::hex << inst.pc; - } -} - -void FunctionLifter::VisitConditionalInstruction( - const remill::Instruction &inst, - std::optional &delayed_inst, llvm::BasicBlock *block, - const remill::Instruction::ConditionalInstruction &conditional_insn, - const remill::DecodingContext &prev_context) { - - const auto lifted_func = block->getParent(); - const auto cond = remill::LoadBranchTaken(block); - const auto taken_block = - llvm::BasicBlock::Create(llvm_context, "", lifted_func); - const auto not_taken_block = - llvm::BasicBlock::Create(llvm_context, "", lifted_func); - - auto cond_jump_fallthrough_br = - llvm::BranchInst::Create(taken_block, not_taken_block, cond, block); - - FlowVisitor visitor = {*this, inst, taken_block, delayed_inst, prev_context}; - std::visit(visitor, conditional_insn.taken_branch); - - VisitDelayedInstruction(inst, delayed_inst, not_taken_block, false); - - - auto fallthrough_br = llvm::BranchInst::Create( - GetOrCreateTargetBlock(inst, inst.next_pc, - conditional_insn.fall_through.fallthrough_context), - not_taken_block); - - AnnotateInstruction(cond_jump_fallthrough_br, pc_annotation_id, - pc_annotation); - AnnotateInstruction(fallthrough_br, pc_annotation_id, pc_annotation); -} - -// Visit a function return control-flow instruction, which is a form of -// indirect control-flow, but with a certain semantic associated with -// returning from a function. This is treated similarly to indirect jumps, -// except the `__remill_function_return` function is tail-called. -void FunctionLifter::VisitFunctionReturn( - const remill::Instruction &inst, - std::optional &delayed_inst, llvm::BasicBlock *block) { - VisitDelayedInstruction(inst, delayed_inst, block, true); - auto func_return = remill::AddTerminatingTailCall( - block, intrinsics.function_return, intrinsics); - AnnotateInstruction(func_return, pc_annotation_id, pc_annotation); - MuteStateEscape(func_return); -} - -std::optional -FunctionLifter::TryGetTargetFunctionType(const remill::Instruction &from_inst, - std::uint64_t address) { - std::optional opt_callable_decl = - type_provider.TryGetCalledFunctionTypeOrDefault(func_address, from_inst, - address); - - return opt_callable_decl; -} - -// Call `pc` in `block`, treating it as a callable declaration `decl`. -llvm::Value *FunctionLifter::CallCallableDecl(llvm::BasicBlock *block, - llvm::Value *pc, - CallableDecl decl) { - llvm::IRBuilder<> ir(block); - CHECK_NOTNULL(decl.type); - CHECK_EQ(decl.arch, options.arch); - - auto &context = block->getContext(); - - auto dest_func = - ir.CreateBitOrPointerCast(pc, llvm::PointerType::get(context, 0)); - - auto mem_ptr = ir.CreateLoad(mem_ptr_type, mem_ptr_ref); - auto new_mem_ptr = - decl.CallFromLiftedBlock(dest_func, type_specifier.Dictionary(), - intrinsics, block, state_ptr, mem_ptr); - auto store = ir.CreateStore(new_mem_ptr, mem_ptr_ref); - - AnnotateInstruction(dest_func, pc_annotation_id, pc_annotation); - AnnotateInstruction(mem_ptr, pc_annotation_id, pc_annotation); - AnnotateInstruction(new_mem_ptr, pc_annotation_id, pc_annotation); - AnnotateInstruction(store, pc_annotation_id, pc_annotation); - - return new_mem_ptr; -} - -// Try to resolve `inst.branch_taken_pc` to a lifted function, and introduce -// a function call to that address in `block`. Failing this, add a call -// to `__remill_function_call`. -bool FunctionLifter::CallFunction(const remill::Instruction &inst, - llvm::BasicBlock *block, - std::optional target_pc) { - auto cf = options.control_flow_provider.GetControlFlowOverride(inst.pc); - if (!std::holds_alternative(cf)) { - LOG(FATAL) << "Invalid spec for call at " << std::hex << inst.pc; - } - auto call_spec = std::get(cf); - std::optional maybe_decl; - - if (call_spec.target_address.has_value()) { - // First, try to see if it's actually related to another function. This is - // equivalent to a tail-call in the original code. - auto redirected_addr = *call_spec.target_address; - - // Now, get the type of the target given the source and destination. - maybe_decl = TryGetTargetFunctionType(inst, redirected_addr); - target_pc = redirected_addr; - } else if (target_pc.has_value()) { - - maybe_decl = - type_provider.TryGetCalledFunctionType(func_address, inst, *target_pc); - } else { - - // If we don't know a concrete target address, then just try to get the - // target given the source. - maybe_decl = type_provider.TryGetCalledFunctionType(func_address, inst); - } - - if (!maybe_decl) { - LOG(ERROR) << "Missing type information for function called at address " - << std::hex << inst.pc << " in function at address " - << func_address << std::dec; - - // If we do not have a function declaration, treat this as a call - // to an unknown address. - auto call = remill::AddCall(block, intrinsics.function_call, intrinsics); - AnnotateInstruction(call, pc_annotation_id, pc_annotation); - return true; - } - - - llvm::IRBuilder<> ir(block); - llvm::Value *dest_addr = nullptr; - - if (target_pc) { - dest_addr = - options.program_counter_init_procedure(ir, pc_reg, target_pc.value()); - } else { - dest_addr = ir.CreateLoad(pc_reg_type, pc_reg_ref); - } - - AnnotateInstruction(dest_addr, pc_annotation_id, pc_annotation); - auto is_noreturn = maybe_decl->is_noreturn; - (void) CallCallableDecl(block, dest_addr, std::move(maybe_decl.value())); - return !is_noreturn; -} - -// Visit a direct function call control-flow instruction. The target is known -// at decode time, and its realized address is stored in -// `inst.branch_taken_pc`. In practice, what we do in this situation is try -// to call the lifted function function at the target address. -void FunctionLifter::VisitDirectFunctionCall( - const remill::Instruction &inst, - std::optional &delayed_inst, llvm::BasicBlock *block, - const remill::Instruction::DirectFunctionCall &dcall, - const remill::DecodingContext &prev_context) { - VisitDelayedInstruction(inst, delayed_inst, block, true); - bool can_return = CallFunction(inst, block, inst.branch_taken_pc); - VisitAfterFunctionCall(inst, block, dcall, can_return, prev_context); -} - - -// Visit an indirect function call control-flow instruction. Similar to -// indirect jumps, we invoke an intrinsic function, `__remill_function_call`; -// however, unlike indirect jumps, we do not tail-call this intrinsic, and -// we continue lifting at the instruction where execution will resume after -// the callee returns. Thus, lifted bitcode maintains the call graph structure -// as it presents itself in the binary. -void FunctionLifter::VisitIndirectFunctionCall( - const remill::Instruction &inst, - std::optional &delayed_inst, llvm::BasicBlock *block, - const remill::Instruction::IndirectFunctionCall &icall, - const remill::DecodingContext &prev_context) { - - VisitDelayedInstruction(inst, delayed_inst, block, true); - bool can_return = CallFunction(inst, block, std::nullopt); - VisitAfterFunctionCall(inst, block, icall, can_return, prev_context); -} - - -// Helper to figure out the address where execution will resume after a -// function call. In practice this is the instruction following the function -// call, encoded in `inst.branch_not_taken_pc`. However, SPARC has a terrible -// ABI where they inject an invalid instruction following some calls as a way -// of communicating to the callee that they should return an object of a -// particular, hard-coded size. Thus, we want to actually identify then ignore -// that instruction, and present the following address for where execution -// should resume after a `call`. -std::pair -FunctionLifter::LoadFunctionReturnAddress(const remill::Instruction &inst, - llvm::BasicBlock *block) { - - const auto pc = inst.branch_not_taken_pc; - - // The semantics for handling a call save the expected return program counter - // into a local variable. - auto ret_pc = this->op_lifter->LoadRegValue(block, state_ptr, - remill::kReturnPCVariableName); - if (!is_sparc) { - return {pc, ret_pc}; - } - - uint8_t bytes[4] = {}; - - for (auto i = 0u; i < 4u; ++i) { - auto [byte, accessible, perms] = memory_provider.Query(pc + i); - switch (accessible) { - case ByteAvailability::kUnknown: - case ByteAvailability::kUnavailable: - LOG(ERROR) - << "Byte at address " << std::hex << (pc + i) - << " is not available for inspection to figure out return address " - << " of call instruction at address " << pc << std::dec; - return {pc, ret_pc}; - - default: bytes[i] = byte; break; - } - - switch (perms) { - case BytePermission::kUnknown: - case BytePermission::kReadableExecutable: - case BytePermission::kReadableWritableExecutable: break; - case BytePermission::kReadable: - case BytePermission::kReadableWritable: - LOG(ERROR) - << "Byte at address " << std::hex << (pc + i) << " being inspected " - << "to figure out return address of call instruction at address " - << pc << " is not executable" << std::dec; - return {pc, ret_pc}; - } - } - - union Format0a { - uint32_t flat; - struct { - uint32_t imm22 : 22; - uint32_t op2 : 3; - uint32_t rd : 5; - uint32_t op : 2; - } u __attribute__((packed)); - } __attribute__((packed)) enc = {}; - static_assert(sizeof(Format0a) == 4, " "); - - enc.flat |= bytes[0]; - enc.flat <<= 8; - enc.flat |= bytes[1]; - enc.flat <<= 8; - enc.flat |= bytes[2]; - enc.flat <<= 8; - enc.flat |= bytes[3]; - - // This looks like an `unimp ` instruction, where the `imm22` encodes - // the size of the value to return. See "Specificationming Note" in v8 manual, - // B.31, p 137. - // - // TODO(pag, kumarak): Does a zero value in `enc.u.imm22` imply a no-return - // function? Try this on Compiler Explorer! - if (!enc.u.op && !enc.u.op2) { - LOG(INFO) << "Found structure return of size " << enc.u.imm22 << " to " - << std::hex << pc << " at " << inst.pc << std::dec; - - llvm::IRBuilder<> ir(block); - return {pc + 4u, - ir.CreateAdd(ret_pc, llvm::ConstantInt::get(ret_pc->getType(), 4))}; - - } else { - return {pc, ret_pc}; - } -} - -// Enact relevant control-flow changes after a function call. This figures -// out the return address targeted by the callee and links it into the -// control-flow graph. -void FunctionLifter::VisitAfterFunctionCall( - const remill::Instruction &inst, llvm::BasicBlock *block, - const std::variant &call, - bool can_return, const remill::DecodingContext &prev_context) { - const auto [ret_pc, ret_pc_val] = LoadFunctionReturnAddress(inst, block); - - llvm::IRBuilder<> ir(block); - if (can_return) { - auto update_pc = ir.CreateStore(ret_pc_val, pc_reg_ref, false); - auto update_next_pc = ir.CreateStore(ret_pc_val, next_pc_reg_ref, false); - auto branch_to_next_pc = - ir.CreateBr(GetOrCreateTargetBlock(inst, ret_pc, prev_context)); - - AnnotateInstruction(update_pc, pc_annotation_id, pc_annotation); - AnnotateInstruction(update_next_pc, pc_annotation_id, pc_annotation); - AnnotateInstruction(branch_to_next_pc, pc_annotation_id, pc_annotation); - } else { - auto tail = remill::AddTerminatingTailCall( - ir.GetInsertBlock(), intrinsics.error, this->intrinsics); - AnnotateInstruction(tail, pc_annotation_id, pc_annotation); - AnnotateInstruction(tail, pc_annotation_id, pc_annotation); - } -} - - -// Visit an asynchronous hyper call control-flow instruction. These are non- -// local control-flow transfers, such as system calls. We treat them like -// indirect function calls. -void FunctionLifter::VisitAsyncHyperCall( - const remill::Instruction &inst, - std::optional &delayed_inst, llvm::BasicBlock *block) { - VisitDelayedInstruction(inst, delayed_inst, block, true); - remill::AddTerminatingTailCall(block, intrinsics.async_hyper_call, - intrinsics); -} - -// Visit (and thus lift) a delayed instruction. When lifting a delayed -// instruction, we need to know if we're one the taken path of a control-flow -// edge, or on the not-taken path. Delayed instructions appear physically -// after some instructions, but execute logically before them in the -// CPU pipeline. They are basically a way for hardware designers to push -// the effort of keeping the pipeline full to compiler developers. -void FunctionLifter::VisitDelayedInstruction( - const remill::Instruction &inst, - std::optional &delayed_inst, llvm::BasicBlock *block, - bool on_taken_path) { - if (delayed_inst && options.arch->NextInstructionIsDelayed( - inst, *delayed_inst, on_taken_path)) { - const auto prev_pc_annotation = pc_annotation; - pc_annotation = GetPCAnnotation(delayed_inst->pc); - inst.GetLifter()->LiftIntoBlock(*delayed_inst, block, state_ptr, true); - AnnotateInstructions(block, pc_annotation_id, pc_annotation); - pc_annotation = prev_pc_annotation; - } -} - -// Instrument an instruction. This inject a `printf`-like function call just -// before a lifted instruction to aid in tracking the provenance of register -// values, and relating them back to original instructions. -// -// TODO(pag): In future, this mechanism should be used to provide a feedback -// loop, or to provide information to the `TypeProvider` for future -// re-lifting of code. -// -// TODO(pag): Right now, this feature is enabled by a command-line flag, and -// that flag is tested in `VisitInstruction`; we should move -// lifting configuration decisions out of here so that we can pass -// in a kind of `LiftingOptions` type that changes the lifter's -// behavior. -void FunctionLifter::InstrumentDataflowProvenance(llvm::BasicBlock *block) { - if (!data_provenance_function) { - data_provenance_function = - semantics_module->getFunction(kAnvillDataProvenanceFunc); - - if (!data_provenance_function) { - llvm::Type *args[] = {mem_ptr_type, pc_reg_type}; - auto fty = llvm::FunctionType::get(mem_ptr_type, args, true); - data_provenance_function = - llvm::Function::Create(fty, llvm::GlobalValue::ExternalLinkage, - kAnvillDataProvenanceFunc, *semantics_module); - } - } - - std::vector args; - llvm::IRBuilder<> ir(block); - args.push_back(ir.CreateLoad(mem_ptr_type, mem_ptr_ref)); - args.push_back(llvm::ConstantInt::get(pc_reg_type, curr_inst->pc)); - options.arch->ForEachRegister([&](const remill::Register *reg) { - if (reg != pc_reg && reg != sp_reg && reg->EnclosingRegister() == reg) { - args.push_back( - this->op_lifter->LoadRegValue(block, state_ptr, reg->name)); - } - }); - - ir.CreateStore(ir.CreateCall(data_provenance_function, args), mem_ptr_ref); -} - -// Adds a 'breakpoint' instrumentation, which calls functions that are named -// with an instruction's address just before that instruction executes. These -// are nifty to spot checking bitcode. This function is used like: -// -// mem = breakpoint_(mem, PC, NEXT_PC) -// -// That way, we can look at uses and compare the second argument to the -// hex address encoded in the function name, and also look at the third argument -// and see if it corresponds to the subsequent instruction address. -void FunctionLifter::InstrumentCallBreakpointFunction(llvm::BasicBlock *block) { - std::stringstream ss; - ss << "breakpoint_" << std::hex << curr_inst->pc; - - const auto func_name = ss.str(); - auto module = block->getModule(); - auto func = module->getFunction(func_name); - if (!func) { - llvm::Type *const params[] = {mem_ptr_type, address_type, address_type}; - const auto fty = llvm::FunctionType::get(mem_ptr_type, params, false); - func = llvm::Function::Create(fty, llvm::GlobalValue::ExternalLinkage, - func_name, module); - - // Make sure to keep this function around (along with `ExternalLinkage`). - func->addFnAttr(llvm::Attribute::OptimizeNone); - func->removeFnAttr(llvm::Attribute::AlwaysInline); - func->removeFnAttr(llvm::Attribute::InlineHint); - func->addFnAttr(llvm::Attribute::NoInline); - func->addFnAttr(llvm::Attribute::ReadNone); - - llvm::IRBuilder<> ir(llvm::BasicBlock::Create(llvm_context, "", func)); - ir.CreateRet(remill::NthArgument(func, 0)); - } - - llvm::Value *args[] = { - new llvm::LoadInst(mem_ptr_type, mem_ptr_ref, llvm::Twine::createNull(), - block), - this->op_lifter->LoadRegValue(block, state_ptr, remill::kPCVariableName), - this->op_lifter->LoadRegValue(block, state_ptr, - remill::kNextPCVariableName)}; - llvm::IRBuilder<> ir(block); - ir.CreateCall(func, args); -} - -// Visit an instruction, and lift it into a basic block. Then, based off of -// the category of the instruction, invoke one of the category-specific -// lifters to enact a change in control-flow. -void FunctionLifter::VisitInstruction( - remill::Instruction &inst, llvm::BasicBlock *block, - remill::DecodingContext prev_insn_context) { - curr_inst = &inst; - - std::optional delayed_inst; - - if (options.track_provenance) { - InstrumentDataflowProvenance(block); - } - - if (options.add_breakpoints) { - InstrumentCallBreakpointFunction(block); - } - - // Even when something isn't supported or is invalid, we still lift - // a call to a semantic, e.g.`INVALID_INSTRUCTION`, so we really want - // to treat instruction lifting as an operation that can't fail. - std::ignore = inst.GetLifter()->LiftIntoBlock(inst, block, state_ptr, - false /* is_delayed */); - - // Figure out if we have to decode the subsequent instruction as a delayed - // instruction. - if (options.arch->MayHaveDelaySlot(inst)) { - delayed_inst = remill::Instruction(); - - if (!DecodeInstructionInto(inst.delayed_pc, true /* is_delayed */, - &*delayed_inst, prev_insn_context)) { - LOG(ERROR) << "Unable to decode or use delayed instruction at " - << std::hex << inst.delayed_pc << std::dec << " of " - << inst.Serialize(); - } - } - - // Do an initial annotation of instructions injected by `LiftIntoBlock`, - // and prior to any lifting of a delayed instruction that might happen - // in any of the below `Visit*` calls. - pc_annotation = GetPCAnnotation(inst.pc); - AnnotateInstructions(block, pc_annotation_id, pc_annotation); - - FlowVisitor visitor = {*this, inst, block, delayed_inst, prev_insn_context}; - std::visit(visitor, inst.flows); - - - // Do a second pass of annotations to apply to the control-flow branching - // instructions added in by the above `Visit*` calls. - AnnotateInstructions(block, pc_annotation_id, pc_annotation); - - curr_inst = nullptr; -} - -// In the process of lifting code, we may want to call another native -// function, `native_func`, for which we have high-level type info. The main -// lifter operates on a special three-argument form function style, and -// operating on this style is actually to our benefit, as it means that as -// long as we can put data into the emulated `State` structure and pull it -// out, then calling one native function from another doesn't require /us/ -// to know how to adapt one native return type into another native return -// type, and instead we let LLVM's optimizations figure it out later during -// scalar replacement of aggregates (SROA). -llvm::Value *FunctionLifter::TryCallNativeFunction(FunctionDecl decl, - llvm::Function *native_func, - llvm::BasicBlock *block) { - llvm::IRBuilder<> irb(block); - - llvm::Value *mem_ptr = irb.CreateLoad(mem_ptr_type, mem_ptr_ref); - mem_ptr = decl.CallFromLiftedBlock(native_func, type_specifier.Dictionary(), - intrinsics, block, state_ptr, mem_ptr); - irb.SetInsertPoint(block); - irb.CreateStore(mem_ptr, mem_ptr_ref); - return mem_ptr; -} - -// Visit all instructions. This runs the work list and lifts instructions. -void FunctionLifter::VisitInstructions(uint64_t address) { - remill::Instruction inst; - - // Recursively decode and lift all instructions that we come across. - while (!edge_work_list.empty()) { - auto [inst_addr, from_addr] = *(edge_work_list.begin()); - auto insn_context = this->decoding_contexts[{inst_addr, from_addr}]; - - - edge_work_list.erase(edge_work_list.begin()); - - llvm::BasicBlock *const block = edge_to_dest_block[{from_addr, inst_addr}]; - CHECK_NOTNULL(block); - if (!block->empty()) { - continue; // Already handled. - } - - llvm::BasicBlock *&inst_block = addr_to_block[inst_addr]; - if (!inst_block) { - inst_block = block; - - // We've already lifted this instruction via another control-flow edge. - } else { - auto br = llvm::BranchInst::Create(inst_block, block); - AnnotateInstruction(br, pc_annotation_id, pc_annotation); - continue; - } - - // Decode. - auto next_context = DecodeInstructionInto(inst_addr, false /* is_delayed */, - &inst, insn_context); - if (!next_context) { - if (inst_addr == func_address) { - inst.pc = inst_addr; - inst.arch_name = options.arch->arch_name; - - // Failed to decode the first instruction of the function, but we can - // possibly recover via a tail-call to a redirection address! - if (inst_addr != func_address) { - // TODO(Ian): is this context right? - this->BranchToInst(func_address, inst_addr, insn_context, block); - continue; - } - } - - - // TODO(Ian): If we hit this in our new model then the low level lift is a failure and we need to mark this somehow... - // otherwise we are inventing the abscence of control flow... - LOG(ERROR) << "Could not decode instruction at " << std::hex << inst_addr - << " reachable from instruction " << from_addr - << " in function at " << func_address << std::dec; - - auto call = - remill::AddTerminatingTailCall(block, intrinsics.error, intrinsics); - AnnotateInstruction(call, pc_annotation_id, pc_annotation); - MuteStateEscape(call); - continue; - - // Didn't get a valid instruction. - } else if (!inst.IsValid() || inst.IsError()) { - auto call = - remill::AddTerminatingTailCall(block, intrinsics.error, intrinsics); - AnnotateInstruction(call, pc_annotation_id, pc_annotation); - MuteStateEscape(call); - continue; - } else { - VisitInstruction(inst, block, insn_context); - } - } -} - // Get the annotation for the program counter `pc`, or `nullptr` if we're // not doing annotations. llvm::MDNode *FunctionLifter::GetPCAnnotation(uint64_t pc) const { @@ -1104,6 +156,7 @@ llvm::MDNode *FunctionLifter::GetPCAnnotation(uint64_t pc) const { } } + // Declare the function decl `decl` and return an `llvm::Function *`. llvm::Function *FunctionLifter::GetOrDeclareFunction(const FunctionDecl &decl) { const auto func_type = llvm::dyn_cast( @@ -1146,136 +199,8 @@ llvm::Function *FunctionLifter::GetOrDeclareFunction(const FunctionDecl &decl) { if (decl.is_noreturn) { native_func->addFnAttr(llvm::Attribute::NoReturn); } - return native_func; -} - -// Allocate and initialize the state structure. -void FunctionLifter::AllocateAndInitializeStateStructure( - llvm::BasicBlock *block, const remill::Arch *arch) { - llvm::IRBuilder<> ir(block); - const auto state_type = arch->StateStructType(); - switch (options.state_struct_init_procedure) { - case StateStructureInitializationProcedure::kNone: - state_ptr = ir.CreateAlloca(state_type); - break; - case StateStructureInitializationProcedure::kZeroes: - state_ptr = ir.CreateAlloca(state_type); - ir.CreateStore(llvm::Constant::getNullValue(state_type), state_ptr); - break; - case StateStructureInitializationProcedure::kUndef: - state_ptr = ir.CreateAlloca(state_type); - ir.CreateStore(llvm::UndefValue::get(state_type), state_ptr); - break; - case StateStructureInitializationProcedure::kGlobalRegisterVariables: - state_ptr = ir.CreateAlloca(state_type); - InitializeStateStructureFromGlobalRegisterVariables(block); - break; - case StateStructureInitializationProcedure:: - kGlobalRegisterVariablesAndZeroes: - state_ptr = ir.CreateAlloca(state_type); - ir.CreateStore(llvm::Constant::getNullValue(state_type), state_ptr); - InitializeStateStructureFromGlobalRegisterVariables(block); - break; - case StateStructureInitializationProcedure:: - kGlobalRegisterVariablesAndUndef: - state_ptr = ir.CreateAlloca(state_type); - ir.CreateStore(llvm::UndefValue::get(state_type), state_ptr); - InitializeStateStructureFromGlobalRegisterVariables(block); - break; - } - - ArchSpecificStateStructureInitialization(block); -} -// Perform architecture-specific initialization of the state structure -// in `block`. -void FunctionLifter::ArchSpecificStateStructureInitialization( - llvm::BasicBlock *block) { - - if (is_x86_or_amd64) { - llvm::IRBuilder<> ir(block); - - const auto ssbase_reg = options.arch->RegisterByName("SSBASE"); - const auto fsbase_reg = options.arch->RegisterByName("FSBASE"); - const auto gsbase_reg = options.arch->RegisterByName("GSBASE"); - const auto dsbase_reg = options.arch->RegisterByName("DSBASE"); - const auto esbase_reg = options.arch->RegisterByName("ESBASE"); - const auto csbase_reg = options.arch->RegisterByName("CSBASE"); - - if (gsbase_reg) { - const auto gsbase_val = llvm::ConstantExpr::getPtrToInt( - llvm::ConstantExpr::getAddrSpaceCast( - llvm::ConstantExpr::getNullValue( - llvm::PointerType::get(block->getContext(), 256)), - llvm::PointerType::get(block->getContext(), 0)), - pc_reg_type); - ir.CreateStore(gsbase_val, gsbase_reg->AddressOf(state_ptr, ir)); - } - - if (fsbase_reg) { - const auto fsbase_val = llvm::ConstantExpr::getPtrToInt( - llvm::ConstantExpr::getAddrSpaceCast( - llvm::ConstantExpr::getNullValue( - llvm::PointerType::get(block->getContext(), 257)), - llvm::PointerType::get(block->getContext(), 0)), - pc_reg_type); - ir.CreateStore(fsbase_val, fsbase_reg->AddressOf(state_ptr, ir)); - } - - if (ssbase_reg) { - ir.CreateStore(llvm::Constant::getNullValue(pc_reg_type), - ssbase_reg->AddressOf(state_ptr, ir)); - } - - if (dsbase_reg) { - ir.CreateStore(llvm::Constant::getNullValue(pc_reg_type), - dsbase_reg->AddressOf(state_ptr, ir)); - } - - if (esbase_reg) { - ir.CreateStore(llvm::Constant::getNullValue(pc_reg_type), - esbase_reg->AddressOf(state_ptr, ir)); - } - - if (csbase_reg) { - ir.CreateStore(llvm::Constant::getNullValue(pc_reg_type), - csbase_reg->AddressOf(state_ptr, ir)); - } - } -} - -// Initialize the state structure with default values, loaded from global -// variables. The purpose of these global variables is to show that there are -// some unmodelled external dependencies inside of a lifted function. -void FunctionLifter::InitializeStateStructureFromGlobalRegisterVariables( - llvm::BasicBlock *block) { - - // Get or create globals for all top-level registers. The idea here is that - // the spec could feasibly miss some dependencies, and so after optimization, - // we'll be able to observe uses of `__anvill_reg_*` globals, and handle - // them appropriately. - - llvm::IRBuilder<> ir(block); - - options.arch->ForEachRegister([=, &ir](const remill::Register *reg_) { - if (auto reg = reg_->EnclosingRegister(); - reg_ == reg && reg != sp_reg && reg != pc_reg) { - - std::stringstream ss; - ss << kUnmodelledRegisterPrefix << reg->name; - const auto reg_name = ss.str(); - - auto reg_global = semantics_module->getGlobalVariable(reg_name); - if (!reg_global) { - reg_global = new llvm::GlobalVariable( - *semantics_module, reg->type, false, - llvm::GlobalValue::ExternalLinkage, nullptr, reg_name); - } - - const auto reg_ptr = reg->AddressOf(state_ptr, block); - ir.CreateStore(ir.CreateLoad(reg->type, reg_global), reg_ptr); - } - }); + return native_func; } // Set up `native_func` to be able to call `lifted_func`. This means @@ -1297,15 +222,16 @@ void FunctionLifter::CallLiftedFunctionFromNativeFunction( llvm::Value *mem_ptr = llvm::Constant::getNullValue(mem_ptr_type); // Stack-allocate and initialize the state pointer. - AllocateAndInitializeStateStructure(block, decl.arch); + auto native_state_ptr = AllocateAndInitializeStateStructure(block, decl.arch); - auto pc_ptr = pc_reg->AddressOf(state_ptr, block); - auto sp_ptr = sp_reg->AddressOf(state_ptr, block); + auto pc_ptr = pc_reg->AddressOf(native_state_ptr, block); + auto sp_ptr = sp_reg->AddressOf(native_state_ptr, block); llvm::IRBuilder<> ir(block); // Initialize the program counter. - auto pc = options.program_counter_init_procedure(ir, pc_reg, func_address); + auto pc = + options.program_counter_init_procedure(ir, address_type, func_address); ir.CreateStore(pc, pc_ptr); // Initialize the stack pointer. @@ -1324,7 +250,7 @@ void FunctionLifter::CallLiftedFunctionFromNativeFunction( options.return_address_init_procedure(ir, address_type, func_address); mem_ptr = StoreNativeValue(ra, decl.return_address, types, intrinsics, - block, state_ptr, mem_ptr); + block, native_state_ptr, mem_ptr); } // Store the function parameters either into the state struct @@ -1333,11 +259,11 @@ void FunctionLifter::CallLiftedFunctionFromNativeFunction( for (auto &arg : native_func->args()) { const auto ¶m_decl = decl.params[arg_index++]; mem_ptr = StoreNativeValue(&arg, param_decl, types, intrinsics, block, - state_ptr, mem_ptr); + native_state_ptr, mem_ptr); } llvm::Value *lifted_func_args[remill::kNumBlockArgs] = {}; - lifted_func_args[remill::kStatePointerArgNum] = state_ptr; + lifted_func_args[remill::kStatePointerArgNum] = native_state_ptr; lifted_func_args[remill::kMemoryPointerArgNum] = mem_ptr; lifted_func_args[remill::kPCArgNum] = pc; auto call_to_lifted_func = ir.CreateCall(lifted_func->getFunctionType(), @@ -1357,24 +283,14 @@ void FunctionLifter::CallLiftedFunctionFromNativeFunction( AnnotateInstructions(block, pc_annotation_id, GetPCAnnotation(func_address)); llvm::Value *ret_val = nullptr; - - if (decl.returns.size() == 1) { - ret_val = LoadLiftedValue(decl.returns.front(), types, intrinsics, block, - state_ptr, mem_ptr); - ir.SetInsertPoint(block); - - } else if (1 < decl.returns.size()) { - ret_val = llvm::UndefValue::get(native_func->getReturnType()); - auto index = 0u; - for (auto &ret_decl : decl.returns) { - auto partial_ret_val = LoadLiftedValue(ret_decl, types, intrinsics, block, - state_ptr, mem_ptr); - ir.SetInsertPoint(block); - unsigned indexes[] = {index}; - ret_val = ir.CreateInsertValue(ret_val, partial_ret_val, indexes); - index += 1; - } + if (decl.returns.ordered_locs.size() != 0 && + !decl.returns.type->isVoidTy()) { + ret_val = + LoadLiftedValue(decl.returns, types, intrinsics, this->options.arch, + block, native_state_ptr, mem_ptr); } + ir.SetInsertPoint(block); + auto memory_escape = GetMemoryEscapeFunc(intrinsics); llvm::Value *escape_args[] = {mem_ptr}; @@ -1387,121 +303,98 @@ void FunctionLifter::CallLiftedFunctionFromNativeFunction( } } + // In practice, lifted functions are not workable as is; we need to emulate // `__attribute__((flatten))`, i.e. recursively inline as much as possible, so // that all semantics and helpers are completely inlined. void FunctionLifter::RecursivelyInlineLiftedFunctionIntoNativeFunction(void) { - std::vector calls_to_inline; + DCHECK(!llvm::verifyModule(*this->native_func->getParent(), &llvm::errs())); + this->RecursivelyInlineFunctionCallees(this->native_func); +} - CHECK(!llvm::verifyModule(*this->native_func->getParent(), &llvm::errs())); +// Lift a function. Will return `nullptr` if the memory is +// not accessible or executable. +llvm::Function *FunctionLifter::DeclareFunction(const FunctionDecl &decl) { - // Set of instructions that we should not annotate because we can't tie them - // to a particular instruction address. - std::unordered_set insts_without_provenance; - if (options.pc_metadata_name) { - for (auto &inst : llvm::instructions(*native_func)) { - if (!inst.getMetadata(pc_annotation_id)) { - insts_without_provenance.insert(&inst); - } - } - } + // This is our higher-level function, i.e. it presents itself more like + // a function compiled from C/C++, rather than being a three-argument Remill + // function. In this function, we will stack-allocate a `State` structure, + // then call a `lifted_func` below, which will embed the instruction + // semantics. + return GetOrDeclareFunction(decl); +} - for (auto changed = true; changed; changed = !calls_to_inline.empty()) { - calls_to_inline.clear(); +BasicBlockLifter &FunctionLifter::GetOrCreateBasicBlockLifter(Uid uid) { + std::pair key{curr_decl->address, uid.value}; + auto lifter = this->bb_lifters.find(key); + if (lifter != this->bb_lifters.end()) { + return lifter->second; + } + std::unique_ptr context = + std::make_unique( + this->curr_decl->GetBlockContext(uid)); - for (auto &inst : llvm::instructions(*native_func)) { - if (auto call_inst = llvm::dyn_cast(&inst); call_inst) { - if (auto called_func = call_inst->getCalledFunction(); - called_func && !called_func->isDeclaration() && - !called_func->hasFnAttribute(llvm::Attribute::NoInline)) { - calls_to_inline.push_back(call_inst); - } - } - } + auto &cfg = this->curr_decl->cfg; + CodeBlock defblk = cfg.find(uid)->second; - for (llvm::CallInst *call_inst : calls_to_inline) { - llvm::MDNode *call_pc = nullptr; - if (options.pc_metadata_name) { - call_pc = call_inst->getMetadata(pc_annotation_id); - } + auto inserted = this->bb_lifters.emplace( + key, + BasicBlockLifter(std::move(context), *this->curr_decl, std::move(defblk), + this->options, this->semantics_module.get(), + this->type_specifier, *this)); + return inserted.first->second; +} - llvm::InlineFunctionInfo info; - auto res = llvm::InlineFunction(*call_inst, info); +const BasicBlockLifter & +FunctionLifter::LiftBasicBlockFunction(const CodeBlock &blk) { + auto &lifter = this->GetOrCreateBasicBlockLifter(blk.uid); + lifter.LiftBasicBlockFunction(); + return lifter; +} - CHECK(res.isSuccess()); +void FunctionLifter::VisitBlock(CodeBlock blk, + llvm::Value *lifted_function_state, + llvm::Value *abstract_stack) { + LiftBasicBlockFunction(blk); +} - // Propagate PC metadata from call sites into inlined call bodies. - if (options.pc_metadata_name) { - for (auto &inst : llvm::instructions(*native_func)) { - if (!inst.getMetadata(pc_annotation_id)) { - if (insts_without_provenance.count(&inst)) { - continue; +void FunctionLifter::VisitBlocks(llvm::Value *lifted_function_state, + llvm::Value *abstract_stack) { + DLOG(INFO) << "Num blocks for func " << std::hex << this->curr_decl->address + << ": " << this->curr_decl->cfg.size(); - // This call site had no associated PC metadata, and so we want - // to exclude any inlined code from accidentally being associated - // with other PCs on future passes. - } else if (!call_pc) { - insts_without_provenance.insert(&inst); - // We can propagate the annotation. - } else { - inst.setMetadata(pc_annotation_id, call_pc); - } - } - } - } - } + for (const auto &[uid, blk] : this->curr_decl->cfg) { + DLOG(INFO) << "Visiting: " << std::hex << blk.addr << " " << std::dec << uid.value; + this->VisitBlock(blk, lifted_function_state, abstract_stack); } +} - // Initialize cleanup optimizations +LiftedFunction FunctionLifter::CreateLiftedFunction(const std::string &name) { + auto new_func = + options.arch->DefineLiftedFunction(name, semantics_module.get()); + auto state_ptr = remill::NthArgument(new_func, remill::kStatePointerArgNum); + auto pc_arg = remill::NthArgument(new_func, remill::kPCArgNum); + auto mem_arg = remill::NthArgument(new_func, remill::kMemoryPointerArgNum); - if (llvm::verifyFunction(*native_func, &llvm::errs())) { - LOG(FATAL) << "Function verification failed: " - << native_func->getName().str() << " " - << remill::LLVMThingToString(native_func->getType()); - } + new_func->removeFnAttr(llvm::Attribute::NoInline); + new_func->addFnAttr(llvm::Attribute::InlineHint); + new_func->addFnAttr(llvm::Attribute::AlwaysInline); + new_func->setLinkage(llvm::GlobalValue::InternalLinkage); - llvm::legacy::FunctionPassManager fpm(semantics_module.get()); - fpm.add(llvm::createCFGSimplificationPass()); - fpm.add(llvm::createPromoteMemoryToRegisterPass()); - fpm.add(llvm::createReassociatePass()); - fpm.add(llvm::createDeadStoreEliminationPass()); - fpm.add(llvm::createDeadCodeEliminationPass()); - fpm.add(llvm::createSROAPass()); - fpm.add(llvm::createDeadCodeEliminationPass()); - fpm.add(llvm::createInstructionCombiningPass()); - fpm.doInitialization(); - fpm.run(*native_func); - fpm.doFinalization(); - - ClearVariableNames(native_func); -} - -// Lift a function. Will return `nullptr` if the memory is -// not accessible or executable. -llvm::Function *FunctionLifter::DeclareFunction(const FunctionDecl &decl) { - - // This is our higher-level function, i.e. it presents itself more like - // a function compiled from C/C++, rather than being a three-argument Remill - // function. In this function, we will stack-allocate a `State` structure, - // then call a `lifted_func` below, which will embed the instruction - // semantics. - return GetOrDeclareFunction(decl); + return {new_func, state_ptr, pc_arg, mem_arg}; } - // Lift a function. Will return `nullptr` if the memory is // not accessible or executable. llvm::Function *FunctionLifter::LiftFunction(const FunctionDecl &decl) { addr_to_func.clear(); edge_work_list.clear(); - edge_to_dest_block.clear(); addr_to_block.clear(); this->op_lifter->ClearCache(); curr_decl = &decl; curr_inst = nullptr; - state_ptr = nullptr; mem_ptr_ref = nullptr; func_address = decl.address; native_func = DeclareFunction(decl); @@ -1540,30 +433,34 @@ llvm::Function *FunctionLifter::LiftFunction(const FunctionDecl &decl) { return native_func; } + // Function has no valid instructions. + auto &cfg = decl.cfg; + if (cfg.find(decl.entry_uid) == cfg.end()) { + LOG(ERROR) << "Function missing entry block " << std::hex << decl.address; + return nullptr; + } + + // Every lifted function starts as a clone of __remill_basic_block. That // prototype has multiple arguments (memory pointer, state pointer, program // counter). This extracts the state pointer. - lifted_func = options.arch->DefineLiftedFunction( - native_func->getName().str() + ".lifted", semantics_module.get()); + auto lifted_func_st = + this->CreateLiftedFunction(native_func->getName().str() + ".lifted"); + lifted_func = lifted_func_st.func; - state_ptr = remill::NthArgument(lifted_func, remill::kStatePointerArgNum); - - lifted_func->removeFnAttr(llvm::Attribute::NoInline); - lifted_func->addFnAttr(llvm::Attribute::InlineHint); - lifted_func->addFnAttr(llvm::Attribute::AlwaysInline); - lifted_func->setLinkage(llvm::GlobalValue::InternalLinkage); - - const auto pc = remill::NthArgument(lifted_func, remill::kPCArgNum); + const auto pc = lifted_func_st.pc_arg; const auto entry_block = &(lifted_func->getEntryBlock()); pc_reg_ref = - this->op_lifter->LoadRegAddress(entry_block, state_ptr, pc_reg->name) - .first; - next_pc_reg_ref = this->op_lifter - ->LoadRegAddress(entry_block, state_ptr, remill::kNextPCVariableName) + ->LoadRegAddress(entry_block, lifted_func_st.state_ptr, pc_reg->name) .first; + next_pc_reg_ref = this->op_lifter + ->LoadRegAddress(entry_block, lifted_func_st.state_ptr, + remill::kNextPCVariableName) + .first; sp_reg_ref = - this->op_lifter->LoadRegAddress(entry_block, state_ptr, sp_reg->name) + this->op_lifter + ->LoadRegAddress(entry_block, lifted_func_st.state_ptr, sp_reg->name) .first; mem_ptr_ref = remill::LoadMemoryPointerRef(entry_block); @@ -1577,6 +474,14 @@ llvm::Function *FunctionLifter::LiftFunction(const FunctionDecl &decl) { ir.CreateStore(pc, next_pc_reg_ref); ir.CreateStore(pc, pc_reg_ref); + + auto abstract_stack = ir.CreateAlloca( + AbstractStack::StackTypeFromSize(llvm_context, decl.maximum_depth), + nullptr, "abstract_stack"); + + abstract_stack->setMetadata(kStackMetadata, + llvm::MDNode::get(llvm_context, {})); + // Add a branch between the first block of the lifted function, which sets // up some local variables, and the block that will contain the lifted // instruction. @@ -1585,36 +490,44 @@ llvm::Function *FunctionLifter::LiftFunction(const FunctionDecl &decl) { // // TODO: This could be a thunk, that we are maybe lifting on purpose. // How should control flow redirection behave in this case? + const auto &entry_lifter = this->GetOrCreateBasicBlockLifter(this->curr_decl->entry_uid); + + auto call_inst = entry_lifter.CallBasicBlockFunction( + ir, lifted_func_st.state_ptr, abstract_stack, this->mem_ptr_ref); - // TODO(Ian): for a thumb vs arm function we need to figure out how to setup the correct initial context, - // maybe the spec should have a section (Context reg assignments or something), where we apply those assingments to a defautl initial context + auto memptr = remill::LoadMemoryPointer(ir, this->intrinsics); - auto default_mapping = options.arch->CreateInitialContext(); - for (const auto &[k, v] : decl.context_assignments) { - default_mapping.UpdateContextReg(k, v); + if (!call_inst->getType()->isVoidTy()) { + // TODO(Ian): this memptr is not right + // The bad effect that could happen here I guess is that the return read might not be tied to + // this store. + memptr = StoreNativeValue(call_inst, curr_decl->returns, + type_specifier.Dictionary(), intrinsics, ir, + lifted_func_st.state_ptr, memptr); } - ir.CreateBr(GetOrCreateBlock(0u, func_address, default_mapping)); + ir.CreateRet(memptr); AnnotateInstructions(entry_block, pc_annotation_id, GetPCAnnotation(func_address)); DLOG(INFO) << "Visiting insns"; // Go lift all instructions! - VisitInstructions(func_address); + VisitBlocks(lifted_func_st.state_ptr, abstract_stack); + + DCHECK(!llvm::verifyFunction(*this->lifted_func, &llvm::errs())); // Fill up `native_func` with a basic block and make it call `lifted_func`. // This creates things like the stack-allocated `State` structure. CallLiftedFunctionFromNativeFunction(decl); + // The last stage is that we need to recursively inline all calls to semantics // functions into `native_func`. RecursivelyInlineLiftedFunctionIntoNativeFunction(); - return native_func; } - // Returns the address of a named function. std::optional FunctionLifter::AddressOfNamedFunction(const std::string &func_name) const { @@ -1680,7 +593,7 @@ llvm::Function *EntityLifter::LiftEntity(const FunctionDecl &decl) const { // Add the function to the entity lifter's target module. const auto func_in_target_module = - func_lifter.AddFunctionToContext(func, decl.address, *impl); + func_lifter.AddFunctionToContext(func, decl, *impl); // If we had a previous declaration/definition, then we want to make sure // that we replaced its body, and we also want to make sure that if our @@ -1694,6 +607,7 @@ llvm::Function *EntityLifter::LiftEntity(const FunctionDecl &decl) const { } } + return func_in_target_module; } @@ -1741,7 +655,7 @@ llvm::Function *EntityLifter::DeclareEntity(const FunctionDecl &decl) const { if (const auto func = func_lifter.DeclareFunction(decl)) { DCHECK(!module->getFunction(func->getName())); - return func_lifter.AddFunctionToContext(func, decl.address, *impl); + return func_lifter.AddFunctionToContext(func, decl, *impl); } else { return nullptr; } @@ -1770,11 +684,36 @@ static void EraseFunctionBody(llvm::Function *func) { // function, and copy the function into the context's module. Returns the // version of `func` inside the module of the lifter context. llvm::Function * -FunctionLifter::AddFunctionToContext(llvm::Function *func, uint64_t address, +FunctionLifter::AddFunctionToContext(llvm::Function *func, + const FunctionDecl &decl, EntityLifterImpl &lifter_context) const { const auto target_module = options.module; auto &module_context = target_module->getContext(); + std::string prefix = "func" + std::to_string(decl.address); + + if (!func->isDeclaration()) { + for (auto &[block_uid, block] : decl.cfg) { + CHECK(block_uid == block.uid); + std::string name = prefix + "basic_block" + std::to_string(block.addr) + "_" + std::to_string(block.uid.value); + + auto new_version = target_module->getFunction(name); + auto old_version = semantics_module->getFunction(name); + if (!new_version) { + auto type = + llvm::dyn_cast(remill::RecontextualizeType( + old_version->getFunctionType(), module_context)); + new_version = llvm::Function::Create( + type, llvm::GlobalValue::ExternalLinkage, name, target_module); + } + remill::CloneFunctionInto(old_version, new_version); + new_version->setMetadata( + kBasicBlockUidMetadata, + this->GetUidAnnotation(block.uid, module_context)); + CHECK(anvill::GetBasicBlockUid(new_version).has_value()); + } + } + const auto name = func->getName().str(); const auto module_func_type = llvm::dyn_cast( remill::RecontextualizeType(func->getFunctionType(), module_context)); @@ -1794,13 +733,14 @@ FunctionLifter::AddFunctionToContext(llvm::Function *func, uint64_t address, // It's possible that we've lifted this function before, but that it was // renamed by user code, and so the above check failed. Go check for that. } else { - lifter_context.ForEachEntityAtAddress(address, [&](llvm::Constant *gv) { - if (auto gv_func = llvm::dyn_cast(gv); - gv_func && gv_func->getFunctionType() == module_func_type) { - CHECK(!new_version); - new_version = gv_func; - } - }); + lifter_context.ForEachEntityAtAddress( + decl.address, [&](llvm::Constant *gv) { + if (auto gv_func = llvm::dyn_cast(gv); + gv_func && gv_func->getFunctionType() == module_func_type) { + CHECK(!new_version); + new_version = gv_func; + } + }); } // This is the first time we're lifting this function, or even the first time @@ -1818,13 +758,13 @@ FunctionLifter::AddFunctionToContext(llvm::Function *func, uint64_t address, // just in case it will be needed in future lifts. EraseFunctionBody(func); - if (auto func_annotation = GetPCAnnotation(address)) { + if (auto func_annotation = GetPCAnnotation(decl.address)) { new_version->setMetadata(pc_annotation_id, func_annotation); } // Update the context to keep its internal concepts of what LLVM objects // correspond with which native binary addresses. - lifter_context.AddEntity(new_version, address); + lifter_context.AddEntity(new_version, decl.address); // The function we just lifted may call other functions, so we need to go // find those and also use them to update the context. @@ -1843,59 +783,4 @@ FunctionLifter::AddFunctionToContext(llvm::Function *func, uint64_t address, return new_version; } - -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::NormalInsn &normal) { - this->lifter.VisitNormal(this->inst, this->block, normal); -} -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::InvalidInsn &) { - this->lifter.VisitInvalid(inst, block); -} - - -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::ErrorInsn &) { - this->lifter.VisitError(inst, delayed_inst, block); -} -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::DirectJump &djump) { - this->lifter.VisitDirectJump(inst, delayed_inst, block, djump); -} - -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::IndirectJump &ijump) { - this->lifter.VisitIndirectJump(inst, delayed_inst, block, ijump, - prev_context); -} - -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::IndirectFunctionCall &icall) { - this->lifter.VisitIndirectFunctionCall(inst, delayed_inst, block, icall, - prev_context); -} -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::DirectFunctionCall &dcall) { - this->lifter.VisitDirectFunctionCall(inst, delayed_inst, block, dcall, - prev_context); -} -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::FunctionReturn &) { - this->lifter.VisitFunctionReturn(inst, delayed_inst, block); -} -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::AsyncHyperCall &async_hcall) { - this->lifter.VisitAsyncHyperCall(inst, delayed_inst, block); -} -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::ConditionalInstruction &cond_insn) { - this->lifter.VisitConditionalInstruction(inst, delayed_inst, block, cond_insn, - this->prev_context); -} - -void FunctionLifter::FlowVisitor::operator()( - const remill::Instruction::NoOp &noop) { - this->lifter.VisitNoOp(inst, block, noop); -} - } // namespace anvill diff --git a/lib/Lifters/FunctionLifter.h b/lib/Lifters/FunctionLifter.h index bf0043f74..a10413868 100644 --- a/lib/Lifters/FunctionLifter.h +++ b/lib/Lifters/FunctionLifter.h @@ -9,9 +9,11 @@ #pragma once #include -#include #include +#include #include +#include +#include #include #include #include @@ -27,6 +29,22 @@ #include #include +#include "CodeLifter.h" +#include "Lifters/BasicBlockLifter.h" + +namespace std { +template +struct hash> { + std::size_t operator()(std::pair const &p) const { + std::size_t seed(0); + llvm::hash_combine(seed, p.first); + llvm::hash_combine(seed, p.second); + + return seed; + } +}; +} // namespace std + namespace llvm { class Constant; class Function; @@ -48,12 +66,24 @@ class MemoryProvider; class TypeProvider; struct ControlFlowTargetList; + +struct LiftedFunction { + llvm::Function *func; + llvm::Argument *state_ptr; + llvm::Argument *pc_arg; + llvm::Argument *mem_ptr; +}; + + // Orchestrates lifting of instructions and control-flow between instructions. -class FunctionLifter { +class FunctionLifter : public CodeLifter { + friend class BasicBlockLifter; + public: ~FunctionLifter(void); - FunctionLifter(const LifterOptions &options_); + + static FunctionLifter CreateFunctionLifter(const LifterOptions &options_); // Declare a lifted a function. Will return `nullptr` if the memory is // not accessible or executable. @@ -70,49 +100,27 @@ class FunctionLifter { // Update the associated entity lifter with information about this // function, and copy the function into the context's module. Returns the // version of `func` inside the module of the lifter context. - llvm::Function *AddFunctionToContext(llvm::Function *func, uint64_t address, + llvm::Function *AddFunctionToContext(llvm::Function *func, + const FunctionDecl &decl, EntityLifterImpl &lifter_context) const; - private: - const LifterOptions &options; - const MemoryProvider &memory_provider; - const TypeProvider &type_provider; - const TypeTranslator type_specifier; + // Get or create a basic block lifter for the basic block with specified + // uid. If a lifter for the uid does not exist, this function will create it + BasicBlockLifter &GetOrCreateBasicBlockLifter(Uid uid); - // Semantics module containing all instruction semantics. - std::unique_ptr semantics_module; + const BasicBlockLifter &LiftBasicBlockFunction(const CodeBlock &); - // Context associated with `module`. - llvm::LLVMContext &llvm_context; + llvm::Function *GetBasicBlockFunction(uint64_t address) const; - // Remill intrinsics inside of `module`. - remill::IntrinsicTable intrinsics; - - remill::OperandLifter::OpLifterPtr op_lifter; - - // Specification counter and stack pointer registers. - const remill::Register *const pc_reg; - const remill::Register *const sp_reg; - - // Are we lifting SPARC code? This affects whether or not we need to do - // double checking on function return addresses; - const bool is_sparc; + private: + FunctionLifter(const LifterOptions &options_, + std::unique_ptr semantics_module); - // Are we lifting x86(-64) code? - const bool is_x86_or_amd64; + // Semantics module containing all instruction semantics. + std::unique_ptr semantics_module; - // Convenient to keep around. - llvm::Type *const i8_type; - llvm::Constant *const i8_zero; - llvm::Type *const i32_type; - llvm::PointerType *const mem_ptr_type; - llvm::PointerType *const state_ptr_type; - llvm::IntegerType *const address_type; - llvm::Type *const pc_reg_type{nullptr}; + TypeTranslator type_specifier; - // Metadata node to attach to lifted instructions to related them to - // original instructions. - unsigned pc_annotation_id{0}; llvm::MDNode *pc_annotation{nullptr}; @@ -128,8 +136,6 @@ class FunctionLifter { // Three-argument Remill function into which instructions are lifted. llvm::Function *lifted_func{nullptr}; - // State pointer in `lifted_func`. - llvm::Value *state_ptr{nullptr}; // Pointer to the `Memory *` in `lifted_func`. llvm::Value *mem_ptr_ref{nullptr}; @@ -169,11 +175,6 @@ class FunctionLifter { std::map, remill::DecodingContext> decoding_contexts; - // Maps control flow edges `(from_pc -> to_pc)` to the basic block associated - // with `to_pc`. - std::map, llvm::BasicBlock *> - edge_to_dest_block; - // Maps an instruction address to a basic block that will hold the lifted code // for that instruction. std::unordered_map addr_to_block; @@ -181,6 +182,10 @@ class FunctionLifter { // Maps program counters to lifted functions. std::unordered_map addr_to_func; + // maps a uid to the lifter for that block + std::unordered_map, BasicBlockLifter> + bb_lifters; + // Get the annotation for the program counter `pc`, or `nullptr` if we're // not doing annotations. llvm::MDNode *GetPCAnnotation(uint64_t pc) const; @@ -189,112 +194,8 @@ class FunctionLifter { // returned function is a "high-level" function. llvm::Function *GetOrDeclareFunction(const FunctionDecl &decl); - - llvm::BranchInst *BranchToInst(uint64_t from_addr, uint64_t to_addr, - const remill::DecodingContext &mapper, - llvm::BasicBlock *from_block); - - // Helper to get the basic block to contain the instruction at `addr`. This - // function drives a work list, where the first time we ask for the - // instruction at `addr`, we enqueue a bit of work to decode and lift that - // instruction. - llvm::BasicBlock *GetOrCreateBlock(uint64_t from_addr, uint64_t to_addr, - const remill::DecodingContext &mapper); - - // Attempts to lookup any redirection of the given address, and then - // calls GetOrCreateBlock - llvm::BasicBlock * - GetOrCreateTargetBlock(const remill::Instruction &from_inst, uint64_t to_addr, - const remill::DecodingContext &mapper); - void InsertError(llvm::BasicBlock *block); - /* -NormalInsn, NoOp, InvalidInsn, ErrorInsn, DirectJump, - IndirectJump, IndirectFunctionCall, DirectFunctionCall, - FunctionReturn, AsyncHyperCall, ConditionalInstruction>*/ - - struct FlowVisitor { - FunctionLifter &lifter; - const remill::Instruction &inst; - llvm::BasicBlock *block; - std::optional &delayed_inst; - const remill::DecodingContext &prev_context; - - - void operator()(const remill::Instruction::NormalInsn &); - void operator()(const remill::Instruction::NoOp &); - void operator()(const remill::Instruction::InvalidInsn &); - void operator()(const remill::Instruction::ErrorInsn &); - void operator()(const remill::Instruction::DirectJump &); - void operator()(const remill::Instruction::IndirectJump &); - void operator()(const remill::Instruction::IndirectFunctionCall &); - void operator()(const remill::Instruction::DirectFunctionCall &); - void operator()(const remill::Instruction::FunctionReturn &); - void operator()(const remill::Instruction::AsyncHyperCall &); - void operator()(const remill::Instruction::ConditionalInstruction &); - }; - - // The following `Visit*` methods exist to orchestrate control flow. The way - // lifting works in Remill is that the mechanics of an instruction are - // simulated by a single-entry, single-exit function, called a semantics - // function. A `remill::Instruction` is basically a fancy package of - // information describing what to pass to that function. However, many - // instructions affect control-flow, and so that means that in order to - // enact the control-flow changes that are implied by an instruction, we must - // "orchestrate" lifting of control flow at a higher level, introduction - // conditional branches and such between these called to semantics functions. - - // Visit an invalid instruction. An invalid instruction is a sequence of - // bytes which cannot be decoded, or an empty byte sequence. - void VisitInvalid(const remill::Instruction &inst, llvm::BasicBlock *block); - - // Visit an error instruction. An error instruction is guaranteed to trap - // execution somehow, e.g. `ud2` on x86. Error instructions are treated - // similarly to invalid instructions, with the exception that they can have - // delay slots, and therefore the subsequent instruction may actually execute - // prior to the error. - void VisitError(const remill::Instruction &inst, - std::optional &delayed_inst, - llvm::BasicBlock *block); - - // Visit a normal instruction. Normal instructions have straight line control- - // flow semantics, i.e. after executing the instruction, execution proceeds - // to the next instruction (`inst.next_pc`). - void VisitNormal(const remill::Instruction &inst, llvm::BasicBlock *block, - const remill::Instruction::NormalInsn &norm); - - // Visit a no-op instruction. These behave identically to normal instructions - // from a control-flow perspective. - void VisitNoOp(const remill::Instruction &inst, llvm::BasicBlock *block, - const remill::Instruction::NoOp &noop); - - // Visit a direct jump control-flow instruction. The target of the jump is - // known at decode time, and the target address is available in - // `inst.branch_taken_pc`. Execution thus needs to transfer to the instruction - // (and thus `llvm::BasicBlock`) associated with `inst.branch_taken_pc`. - void VisitDirectJump(const remill::Instruction &inst, - std::optional &delayed_inst, - llvm::BasicBlock *block, - const remill::Instruction::DirectJump &norm); - - // Visit an indirect jump control-flow instruction. This may be register- or - // memory-indirect, e.g. `jmp rax` or `jmp [rax]` on x86. Thus, the target is - // not know a priori and our default mechanism for handling this is to perform - // a tail-call to the `__remill_jump` function, whose role is to be a stand-in - // something that enacts the effect of "transfer to target." - void VisitIndirectJump(const remill::Instruction &inst, - std::optional &delayed_inst, - llvm::BasicBlock *block, - const remill::Instruction::IndirectJump &ijump, - const remill::DecodingContext &prev_context); - - // Visit an indirect jump that is a jump table. - void DoSwitchBasedIndirectJump(const remill::Instruction &inst, - llvm::BasicBlock *block, - const std::vector &target_list, - const remill::Instruction::IndirectJump &norm, - const remill::DecodingContext &prev_context); remill::DecodingContext ApplyTargetList(const std::unordered_map &assignments, @@ -315,11 +216,6 @@ NormalInsn, NoOp, InvalidInsn, ErrorInsn, DirectJump, std::optional &delayed_inst, llvm::BasicBlock *block); - // Call `pc` in `block`, treating it as a callable declaration `decl`. - // Returns the new value of the memory pointer (after it is stored to - // `MEMORY`). - llvm::Value *CallCallableDecl(llvm::BasicBlock *block, llvm::Value *pc, - CallableDecl decl); // Try to resolve `target_pc` to a lifted function, and introduce // a function call to that address in `block`. Failing this, add a call @@ -328,13 +224,6 @@ NormalInsn, NoOp, InvalidInsn, ErrorInsn, DirectJump, bool CallFunction(const remill::Instruction &inst, llvm::BasicBlock *block, std::optional target_pc); - // A wrapper around the type provider's TryGetFunctionType that makes use - // of the control flow provider to handle control flow redirections for - // thunks - std::optional - TryGetTargetFunctionType(const remill::Instruction &inst, - std::uint64_t address); - // Visit a direct function call control-flow instruction. The target is known // at decode time, and its realized address is stored in // `inst.branch_taken_pc`. In practice, what we do in this situation is try @@ -369,74 +258,18 @@ NormalInsn, NoOp, InvalidInsn, ErrorInsn, DirectJump, // should resume after a `call`. std::pair LoadFunctionReturnAddress(const remill::Instruction &inst, - llvm::BasicBlock *block); - - // Enact relevant control-flow changed after a function call. This figures - // out the return address targeted by the callee and links it into the - // control-flow graph. - void VisitAfterFunctionCall( - const remill::Instruction &inst, llvm::BasicBlock *block, - const std::variant &, - bool can_return, const remill::DecodingContext &prev_context); - - // Visit an asynchronous hyper call control-flow instruction. These are non- - // local control-flow transfers, such as system calls. We treat them like - // indirect function calls. - void VisitAsyncHyperCall(const remill::Instruction &inst, - std::optional &delayed_inst, - llvm::BasicBlock *block); + llvm::BasicBlock *block, llvm::Value *state_ptr); - // Visit (and thus lift) a delayed instruction. When lifting a delayed - // instruction, we need to know if we're one the taken path of a control-flow - // edge, or on the not-taken path. Delayed instructions appear physically - // after some instructions, but execute logically before them in the - // CPU pipeline. They are basically a way for hardware designers to push - // the effort of keeping the pipeline full to compiler developers. - void VisitDelayedInstruction(const remill::Instruction &inst, - std::optional &delayed_inst, - llvm::BasicBlock *block, bool on_taken_path); + void VisitBlock(CodeBlock entry_context, llvm::Value *lifted_function_state, + llvm::Value *abstract_stack); - // Instrument an instruction. This inject a `printf` call just before a - // lifted instruction to aid in debugging. - // - // TODO(pag): In future, this mechanism should be used to provide a feedback - // loop, or to provide information to the `TypeProvider` for future - // re-lifting of code. - // - // TODO(pag): Right now, this feature is enabled by a command-line flag, and - // that flag is tested in `VisitInstruction`; we should move - // lifting configuration decisions out of here so that we can pass - // in a kind of `LiftingOptions` type that changes the lifter's - // behavior. - void InstrumentDataflowProvenance(llvm::BasicBlock *block); - - // Adds a 'breakpoint' instrumentation, which calls functions that are named - // with an instruction's address just before that instruction executes. These - // are nifty to spot checking bitcode. - void InstrumentCallBreakpointFunction(llvm::BasicBlock *block); - - // Visit an instruction, and lift it into a basic block. Then, based off of - // the category of the instruction, invoke one of the category-specific - // lifters to enact a change in control-flow. - void VisitInstruction(remill::Instruction &inst, llvm::BasicBlock *block, - remill::DecodingContext prev_insn_context); - - // In the process of lifting code, we may want to call another native - // function, `native_func`, for which we have high-level type info. The main - // lifter operates on a special three-argument form function style, and - // operating on this style is actually to our benefit, as it means that as - // long as we can put data into the emulated `State` structure and pull it - // out, then calling one native function from another doesn't require /us/ - // to know how to adapt one native return type into another native return - // type, and instead we let LLVM's optimizations figure it out later during - // scalar replacement of aggregates (SROA). - llvm::Value *TryCallNativeFunction(FunctionDecl decl, - llvm::Function *native_func, - llvm::BasicBlock *block); - - // Visit all instructions. This runs the work list and lifts instructions. - void VisitInstructions(uint64_t address); + LiftedFunction CreateLiftedFunction(const std::string &name); + + remill::DecodingContext CreateDecodingContext(const CodeBlock &blk); + + + void VisitBlocks(llvm::Value *lifted_function_state, + llvm::Value *abstract_stack); // Try to decode an instruction at address `addr` into `*inst_out`. Returns // a context map if sueccessful and std::nullopt otherwise. `is_delayed` tells the decoder @@ -457,19 +290,29 @@ NormalInsn, NoOp, InvalidInsn, ErrorInsn, DirectJump, // that all semantics and helpers are completely inlined. void RecursivelyInlineLiftedFunctionIntoNativeFunction(void); - // Allocate and initialize the state structure. - void AllocateAndInitializeStateStructure(llvm::BasicBlock *block, - const remill::Arch *arch); + // Manipulates the control flow to restore intra-procedural state when reaching an + // inter-procedural effect. + // Returns a boolean represnting wether decoding should continue (true = non-terminal, false=terminal) + bool ApplyInterProceduralControlFlowOverride(const remill::Instruction &, + llvm::BasicBlock *&block, + llvm::Value *state_ptr); + + bool DoInterProceduralControlFlow(const remill::Instruction &insn, + llvm::BasicBlock *block, + const anvill::ControlFlowOverride &override, + llvm::Value *state_ptr); // Perform architecture-specific initialization of the state structure // in `block`. - void ArchSpecificStateStructureInitialization(llvm::BasicBlock *block); + void ArchSpecificStateStructureInitialization(llvm::BasicBlock *block, + llvm::Value *state_ptr); // Initialize the state structure with default values, loaded from global // variables. The purpose of these global variables is to show that there are // some unmodelled external dependencies inside of a lifted function. void - InitializeStateStructureFromGlobalRegisterVariables(llvm::BasicBlock *block); + InitializeStateStructureFromGlobalRegisterVariables(llvm::BasicBlock *block, + llvm::Value *state_ptr); }; } // namespace anvill diff --git a/lib/Lifters/Options.cpp b/lib/Lifters/Options.cpp index 6b6a06d59..2d0bc2aab 100644 --- a/lib/Lifters/Options.cpp +++ b/lib/Lifters/Options.cpp @@ -6,15 +6,15 @@ * the LICENSE file found in the root directory of this source tree. */ -#include - #include +#include #include #include #include -#include +#include #include #include +#include #include #include @@ -38,12 +38,9 @@ const ::anvill::TypeDictionary &LifterOptions::TypeDictionary(void) const { return type_provider.Dictionary(); } -// Initialize the stack frame with a constant expression of the form: -// -// (ptrtoint __anvill_sp) -llvm::Value *LifterOptions::SymbolicStackPointerInit( +llvm::Value *LifterOptions::SymbolicStackPointerInitWithOffset( llvm::IRBuilderBase &ir, const remill::Register *sp_reg, - uint64_t func_address) { + uint64_t func_address, std::int64_t offset) { auto &context = ir.getContext(); auto block = ir.GetInsertBlock(); @@ -58,20 +55,36 @@ llvm::Value *LifterOptions::SymbolicStackPointerInit( llvm::Constant::getNullValue(type), kSymbolicSPName); } - return llvm::ConstantExpr::getPtrToInt(base_sp, type); + auto sp = llvm::ConstantExpr::getPtrToInt(base_sp, type); + + if (offset != 0) { + return ir.CreateAdd(sp, llvm::ConstantInt::get(type, offset, true)); + } else { + return sp; + } +} + +// Initialize the stack frame with a constant expression of the form: +// +// (ptrtoint __anvill_sp) +llvm::Value * +LifterOptions::SymbolicStackPointerInit(llvm::IRBuilderBase &ir, + const remill::Register *sp_reg, + uint64_t func_address) { + return SymbolicStackPointerInitWithOffset(ir, sp_reg, func_address, 0); } // Initialize the program counter with a constant expression of the form: // // (ptrtoint __anvill_pc) -llvm::Value *LifterOptions::SymbolicProgramCounterInit( - llvm::IRBuilderBase &ir, const remill::Register *pc_reg, - uint64_t func_address) { +llvm::Value *LifterOptions::SymbolicProgramCounterInit(llvm::IRBuilderBase &ir, + llvm::Type *address_type, + uint64_t func_address) { auto &context = ir.getContext(); auto block = ir.GetInsertBlock(); auto module = block->getModule(); - auto type = remill::RecontextualizeType(pc_reg->type, context); + auto type = remill::RecontextualizeType(address_type, context); auto base_pc = module->getGlobalVariable(kSymbolicPCName); if (!base_pc) { @@ -88,8 +101,9 @@ llvm::Value *LifterOptions::SymbolicProgramCounterInit( // Initialize the return address with a constant expression of the form: // // (ptrtoint __anvill_ra) -llvm::Value *LifterOptions::SymbolicReturnAddressInit( - llvm::IRBuilderBase &ir, llvm::IntegerType *type, uint64_t func_address) { +llvm::Value *LifterOptions::SymbolicReturnAddressInit(llvm::IRBuilderBase &ir, + llvm::IntegerType *type, + uint64_t func_address) { auto &context = ir.getContext(); auto block = ir.GetInsertBlock(); auto module = block->getModule(); @@ -107,16 +121,17 @@ llvm::Value *LifterOptions::SymbolicReturnAddressInit( // Initialize the return address with the result of: // // call llvm.returnaddress() -llvm::Value *LifterOptions::ConcreteReturnAddressInit( - llvm::IRBuilderBase &ir, llvm::IntegerType *type, uint64_t) { +llvm::Value *LifterOptions::ConcreteReturnAddressInit(llvm::IRBuilderBase &ir, + llvm::IntegerType *type, + uint64_t) { auto &context = ir.getContext(); auto block = ir.GetInsertBlock(); auto module = block->getModule(); type = llvm::dyn_cast( remill::RecontextualizeType(type, context)); - auto ret_addr_func = llvm::Intrinsic::getDeclaration( - module, llvm::Intrinsic::returnaddress); + auto ret_addr_func = + llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::returnaddress); llvm::Value *args[] = { llvm::ConstantInt::get(llvm::Type::getInt32Ty(context), 0)}; diff --git a/lib/Lifters/ValueLifter.cpp b/lib/Lifters/ValueLifter.cpp index 023b7ad84..f68d4c0bb 100644 --- a/lib/Lifters/ValueLifter.cpp +++ b/lib/Lifters/ValueLifter.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -27,14 +28,14 @@ namespace anvill { // Consume `num_bytes` of bytes from `data`, interpreting them as an integer, // and update `data` in place, bumping out the first `num_bytes` of consumed // data. -llvm::APInt ValueLifterImpl::ConsumeBytesAsInt(std::string_view &data, +llvm::APInt ValueLifterImpl::ConsumeBytesAsInt(llvm::ArrayRef &data, unsigned num_bytes) const { llvm::APInt result(num_bytes * 8u, 0u); for (auto i = 0u; i < num_bytes; ++i) { result <<= 8u; - result |= data[i]; + result |= static_cast(data[i]); } - data = data.substr(num_bytes); + data = data.drop_front(num_bytes); if (dl.isLittleEndian() && 1u < num_bytes) { return result.byteSwap(); @@ -49,7 +50,7 @@ ValueLifterImpl::GetFunctionPointer(const FunctionDecl &decl, auto &func_lifter = ent_lifter.function_lifter; auto func = func_lifter.DeclareFunction(decl); auto func_in_context = - func_lifter.AddFunctionToContext(func, decl.address, ent_lifter); + func_lifter.AddFunctionToContext(func, decl, ent_lifter); return func_in_context; } @@ -127,10 +128,8 @@ static llvm::Constant *UnwrapZeroIndices(llvm::Constant *ret, // entity or plausible entity. // // NOTE(pag): `hinted_type` can be `nullptr`. -llvm::Constant * -ValueLifterImpl::TryGetPointerForAddress(uint64_t ea, - EntityLifterImpl &ent_lifter, - llvm::Type *hinted_type) const { +llvm::Constant *ValueLifterImpl::TryGetPointerForAddress( + uint64_t ea, EntityLifterImpl &ent_lifter, llvm::Type *hinted_type) const { // First, try to see if we already have an entity for this address. Give // preference to an entity with a matching type. Then to global variables and @@ -163,8 +162,7 @@ ValueLifterImpl::TryGetPointerForAddress(uint64_t ea, // Try to create a `FunctionDecl` on-demand. if (hinted_type) { - if (auto func_type = - llvm::dyn_cast(hinted_type)) { + if (auto func_type = llvm::dyn_cast(hinted_type)) { const auto func = llvm::Function::Create(func_type, llvm::GlobalValue::PrivateLinkage, ".anvill.value_lifter.temp", options.module); @@ -237,9 +235,9 @@ llvm::Constant *ValueLifterImpl::GetPointer(uint64_t ea, llvm::Type *value_type, // Interpret `data` as the backing bytes to initialize an `llvm::Constant` // of type `type_of_data`. This requires access to `ent_lifter` to be able // to lift pointer types that will reference declared data/functions. -llvm::Constant *ValueLifterImpl::Lift(std::string_view data, llvm::Type *type, - EntityLifterImpl &ent_lifter, - uint64_t loc_ea) const { +llvm::Constant * +ValueLifterImpl::Lift(llvm::ArrayRef data, llvm::Type *type, + EntityLifterImpl &ent_lifter, uint64_t loc_ea) const { switch (type->getTypeID()) { @@ -274,8 +272,8 @@ llvm::Constant *ValueLifterImpl::Lift(std::string_view data, llvm::Type *type, } // If we successfully lift it as a reference then we're in good shape. - if (auto val = GetPointer(address, nullptr, - ent_lifter, loc_ea, addr_space)) { + if (auto val = + GetPointer(address, nullptr, ent_lifter, loc_ea, addr_space)) { return val; } @@ -298,8 +296,8 @@ llvm::Constant *ValueLifterImpl::Lift(std::string_view data, llvm::Type *type, const auto elm_type = struct_type->getStructElementType(i); const auto offset = layout->getElementOffset(i); CHECK_LE(prev_offset, offset); - auto const_elm = - Lift(data.substr(offset), elm_type, ent_lifter, loc_ea + offset); + auto const_elm = Lift(data.drop_front(offset), elm_type, ent_lifter, + loc_ea + offset); initializer_list.push_back(const_elm); prev_offset = offset; } @@ -317,7 +315,7 @@ llvm::Constant *ValueLifterImpl::Lift(std::string_view data, llvm::Type *type, for (auto i = 0u; i < num_elms; ++i) { const auto elm_offset = i * elm_size; - auto const_elm = Lift(data.substr(elm_offset), elm_type, ent_lifter, + auto const_elm = Lift(data.drop_front(elm_offset), elm_type, ent_lifter, loc_ea + elm_offset); initializer_list.push_back(const_elm); } @@ -335,7 +333,7 @@ llvm::Constant *ValueLifterImpl::Lift(std::string_view data, llvm::Type *type, for (auto i = 0u; i < num_elms; ++i) { const auto elm_offset = i * elm_size; - auto const_elm = Lift(data.substr(elm_offset), elm_type, ent_lifter, + auto const_elm = Lift(data.drop_front(elm_offset), elm_type, ent_lifter, loc_ea + elm_offset); initializer_list.push_back(const_elm); } @@ -354,6 +352,13 @@ llvm::Constant *ValueLifterImpl::Lift(std::string_view data, llvm::Type *type, return llvm::ConstantFP::get(type, val.bitsToDouble()); } + case llvm::Type::X86_FP80TyID: { + const auto size = static_cast(dl.getTypeStoreSize(type)); + auto val = ConsumeBytesAsInt(data, size); + const llvm::APFloat float_val(llvm::APFloat::x87DoubleExtended(), val); + return llvm::ConstantFP::get(type, float_val); + } + default: LOG(FATAL) << "Cannot initialize constant of unhandled LLVM type " << remill::LLVMThingToString(type) << " at " << std::hex @@ -377,7 +382,7 @@ ValueLifter::ValueLifter(const EntityLifter &entity_lifter_) // Interpret `data` as the backing bytes to initialize an `llvm::Constant` // of type `type_of_data`. `loc_ea`, if non-null, is the address at which // `data` appears. -llvm::Constant *ValueLifter::Lift(std::string_view data, +llvm::Constant *ValueLifter::Lift(llvm::ArrayRef data, llvm::Type *type_of_data) const { return impl->value_lifter.Lift(data, type_of_data, *impl, 0); } diff --git a/lib/Lifters/ValueLifter.h b/lib/Lifters/ValueLifter.h index b4acbd850..472fcfcb6 100644 --- a/lib/Lifters/ValueLifter.h +++ b/lib/Lifters/ValueLifter.h @@ -11,6 +11,7 @@ #include #include #include +#include #include namespace llvm { @@ -36,11 +37,11 @@ class ValueLifterImpl { // Consume `num_bytes` of bytes from `data`, interpreting them as an integer, // and update `data` in place, bumping out the first `num_bytes` of consumed // data. - llvm::APInt ConsumeBytesAsInt(std::string_view &data, + llvm::APInt ConsumeBytesAsInt(llvm::ArrayRef &data, unsigned num_bytes) const; // Consume `size` bytes of data from `data`, and update `data` in place. - inline llvm::APInt ConsumeBytesAsInt(std::string_view &data, + inline llvm::APInt ConsumeBytesAsInt(llvm::ArrayRef &data, llvm::TypeSize size) const { return ConsumeBytesAsInt( data, static_cast(static_cast(size))); @@ -49,7 +50,7 @@ class ValueLifterImpl { // Interpret `data` as the backing bytes to initialize an `llvm::Constant` // of type `type_of_data`. This requires access to `ent_lifter` to be able // to lift pointer types that will reference declared data/functions. - llvm::Constant *Lift(std::string_view data, llvm::Type *type_of_data, + llvm::Constant *Lift(llvm::ArrayRef data, llvm::Type *type_of_data, EntityLifterImpl &ent_lifter, uint64_t loc_ea) const; // Lift pointers at `ea`. @@ -66,10 +67,9 @@ class ValueLifterImpl { // // Returns an `llvm::GlobalValue *` if the pointer is associated with a // known or plausible entity, and an `llvm::Constant *` otherwise. - llvm::Constant *GetPointer( - uint64_t ea, llvm::Type *value_type, - EntityLifterImpl &ent_lifter, - uint64_t loc_ea, unsigned address_space=0) const; + llvm::Constant *GetPointer(uint64_t ea, llvm::Type *value_type, + EntityLifterImpl &ent_lifter, uint64_t loc_ea, + unsigned address_space = 0) const; private: llvm::Constant *GetFunctionPointer(const FunctionDecl &decl, diff --git a/lib/Optimize.cpp b/lib/Optimize.cpp index df3586c95..c0f1c468f 100644 --- a/lib/Optimize.cpp +++ b/lib/Optimize.cpp @@ -11,6 +11,7 @@ #include // clang-format off +#include #include #include #include @@ -26,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +46,7 @@ #include #include #include +#include #include #include @@ -56,9 +59,14 @@ #include #include #include +#include +#include #include +#include +#include #include #include +#include #include #include #include @@ -68,6 +76,10 @@ #include #include +#include "anvill/Passes/RewriteVectorOps.h" +#include "anvill/Passes/SplitStackFrameAtReturnAddress.h" +#include "anvill/Specification.h" + namespace anvill { //// TODO(pag): NewGVN passes in debug build of LLVM on challenge 5. @@ -102,10 +114,13 @@ class OurVerifierPass : public llvm::PassInfoMixin { // code, etc. // When utilizing crossRegisterProxies cleanup triggers asan -void OptimizeModule(const EntityLifter &lifter, llvm::Module &module) { +void OptimizeModule(const EntityLifter &lifter, llvm::Module &module, + const BasicBlockContexts &contexts, + const anvill::Specification &spec) { + + CHECK(!llvm::verifyModule(module, &llvm::errs())); const LifterOptions &options = lifter.Options(); - const MemoryProvider &mp = lifter.MemoryProvider(); EntityCrossReferenceResolver xr(lifter); @@ -113,6 +128,7 @@ void OptimizeModule(const EntityLifter &lifter, llvm::Module &module) { LOG(FATAL) << remill::GetErrorString(err); } + /* if (auto used = module.getGlobalVariable("llvm.used"); used) { used->setLinkage(llvm::GlobalValue::PrivateLinkage); used->eraseFromParent(); @@ -121,7 +137,7 @@ void OptimizeModule(const EntityLifter &lifter, llvm::Module &module) { if (auto used = module.getGlobalVariable("llvm.compiler.used"); used) { used->setLinkage(llvm::GlobalValue::PrivateLinkage); used->eraseFromParent(); - } + }*/ LOG(INFO) << "Optimizing module."; @@ -140,6 +156,10 @@ void OptimizeModule(const EntityLifter &lifter, llvm::Module &module) { pc_metadata_id = context.getMDKindID(options.pc_metadata_name); } + ConvertPointerArithmeticToGEP::StructMap structs; + ConvertPointerArithmeticToGEP::TypeMap types; + ConvertPointerArithmeticToGEP::MDMap md; + llvm::PassBuilder pb; llvm::ModulePassManager mpm; llvm::ModuleAnalysisManager mam; @@ -148,6 +168,10 @@ void OptimizeModule(const EntityLifter &lifter, llvm::Module &module) { // llvm::InlineParams params; llvm::FunctionAnalysisManager fam; + llvm::Triple ModuleTriple(module.getTargetTriple()); + llvm::TargetLibraryInfoImpl TLII(ModuleTriple); + TLII.disableAllFunctions(); + fam.registerPass([&] { return llvm::TargetLibraryAnalysis(TLII); }); pb.registerFunctionAnalyses(fam); pb.registerModuleAnalyses(mam); pb.registerCGSCCAnalyses(cam); @@ -161,45 +185,63 @@ void OptimizeModule(const EntityLifter &lifter, llvm::Module &module) { // mpm.addPass(std::move(inliner)); mpm.addPass(llvm::GlobalOptPass()); + mpm.addPass(llvm::GlobalDCEPass()); + mpm.addPass(llvm::StripDeadDebugInfoPass()); llvm::FunctionPassManager fpm; fpm.addPass(llvm::DCEPass()); + fpm.addPass(llvm::VerifierPass()); // NOTE(alex): This pass is extremely slow with LLVM 14. // fpm.addPass(llvm::SinkingPass()); // NewGVN has bugs with `____strtold_l_internal` from chal5, amd64. - // fpm.addPass(llvm::NewGVNPass()); + fpm.addPass(llvm::NewGVNPass()); + fpm.addPass(llvm::VerifierPass()); fpm.addPass(llvm::SCCPPass()); - // NOTE(alex): This pass is extremely slow with LLVM 14. - // fpm.addPass(llvm::DSEPass()); - fpm.addPass(llvm::SROAPass()); + fpm.addPass(llvm::VerifierPass()); + fpm.addPass(llvm::DSEPass()); + fpm.addPass(llvm::VerifierPass()); + fpm.addPass(llvm::SROAPass(llvm::SROAOptions::ModifyCFG)); + fpm.addPass(llvm::VerifierPass()); fpm.addPass(llvm::EarlyCSEPass(true)); + fpm.addPass(llvm::VerifierPass()); fpm.addPass(llvm::BDCEPass()); + fpm.addPass(llvm::VerifierPass()); fpm.addPass(llvm::SimplifyCFGPass()); - // NOTE(alex): This pass is extremely slow with LLVM 14. - // fpm.addPass(llvm::SinkingPass()); + fpm.addPass(llvm::VerifierPass()); + fpm.addPass(llvm::SinkingPass()); + fpm.addPass(llvm::VerifierPass()); fpm.addPass(llvm::SimplifyCFGPass()); + fpm.addPass(llvm::VerifierPass()); fpm.addPass(llvm::InstCombinePass()); - + fpm.addPass(llvm::VerifierPass()); AddSinkSelectionsIntoBranchTargets(fpm); + fpm.addPass(llvm::VerifierPass()); AddRemoveUnusedFPClassificationCalls(fpm); + fpm.addPass(llvm::VerifierPass()); AddRemoveDelaySlotIntrinsics(fpm); + fpm.addPass(llvm::VerifierPass()); AddRemoveErrorIntrinsics(fpm); + fpm.addPass(llvm::VerifierPass()); AddLowerRemillMemoryAccessIntrinsics(fpm); + fpm.addPass(llvm::VerifierPass()); AddRemoveCompilerBarriers(fpm); - AddLowerTypeHintIntrinsics(fpm); + fpm.addPass(llvm::VerifierPass()); // TODO(pag): This pass has an issue on the `SMIME_write_ASN1` function // of the ARM64 variant of Challenge 5. // AddHoistUsersOfSelectsAndPhis(fpm); fpm.addPass(llvm::InstCombinePass()); + fpm.addPass(llvm::VerifierPass()); fpm.addPass(llvm::DCEPass()); - fpm.addPass(llvm::SROAPass()); + fpm.addPass(llvm::VerifierPass()); + fpm.addPass(llvm::SROAPass(llvm::SROAOptions::ModifyCFG)); + fpm.addPass(llvm::VerifierPass()); // Sometimes we observe patterns where PC- and SP-related offsets are // accidentally truncated, and thus displacement-based analyses make them @@ -208,29 +250,45 @@ void OptimizeModule(const EntityLifter &lifter, llvm::Module &module) { // negative numbers. Thus, we want to fixup such cases prior to any kind of // stack analysis. AddConvertMasksToCasts(fpm); - + fpm.addPass(llvm::VerifierPass()); AddSinkSelectionsIntoBranchTargets(fpm); + fpm.addPass(llvm::VerifierPass()); AddRemoveTrivialPhisAndSelects(fpm); + fpm.addPass(llvm::VerifierPass()); fpm.addPass(llvm::DCEPass()); + fpm.addPass(llvm::VerifierPass()); AddRemoveStackPointerCExprs(fpm, options.stack_frame_recovery_options); - AddRecoverBasicStackFrame(fpm, options.stack_frame_recovery_options); - AddSplitStackFrameAtReturnAddress(fpm, options.stack_frame_recovery_options); - fpm.addPass(llvm::SROAPass()); - + fpm.addPass(llvm::VerifierPass()); + //AddRecoverBasicStackFrame(fpm, options.stack_frame_recovery_options); + //AddSplitStackFrameAtReturnAddress(fpm, options.stack_frame_recovery_options); + fpm.addPass(llvm::SROAPass(llvm::SROAOptions::ModifyCFG)); + fpm.addPass(llvm::VerifierPass()); + fpm.addPass(llvm::SROAPass(llvm::SROAOptions::ModifyCFG)); + fpm.addPass(llvm::VerifierPass()); AddCombineAdjacentShifts(fpm); + fpm.addPass(llvm::VerifierPass()); // Sometimes we have a values in the form of (expr ^ 1) used as branch // conditions or other targets. Try to fix these to be CMPs, since it // makes code easier to read and analyze. This is a fairly narrow optimization // but it comes up often enough for lifted code. + + fpm.addPass(llvm::VerifierPass()); + fpm.addPass(anvill::RemoveCallIntrinsics(xr, spec, lifter)); + fpm.addPass(llvm::VerifierPass()); + fpm.addPass(llvm::SROAPass(llvm::SROAOptions::ModifyCFG)); + fpm.addPass(RewriteVectorOps()); + fpm.addPass(llvm::VerifierPass()); AddConvertAddressesToEntityUses(fpm, xr, pc_metadata_id); - AddBranchRecovery(fpm); - AddLowerSwitchIntrinsics(fpm, mp); + AddBranchRecovery(fpm); + fpm.addPass(llvm::VerifierPass()); + fpm.addPass(ConvertPointerArithmeticToGEP(contexts, types, structs, md)); + fpm.addPass(llvm::VerifierPass()); pb.crossRegisterProxies(lam, fam, cam, mam); mpm.addPass(llvm::createModuleToFunctionPassAdaptor(std::move(fpm))); @@ -239,18 +297,34 @@ void OptimizeModule(const EntityLifter &lifter, llvm::Module &module) { llvm::FunctionPassManager second_fpm; AddTransformRemillJumpIntrinsics(second_fpm, xr); - AddRemoveRemillFunctionReturns(second_fpm, xr); - AddConvertSymbolicReturnAddressToConcreteReturnAddress(second_fpm); + second_fpm.addPass(llvm::VerifierPass()); + second_fpm.addPass(anvill::ReplaceStackReferences(contexts, lifter)); + //AddRemoveRemillFunctionReturns(second_fpm, xr); + //AddConvertSymbolicReturnAddressToConcreteReturnAddress(second_fpm); AddLowerRemillUndefinedIntrinsics(second_fpm); + second_fpm.addPass(llvm::VerifierPass()); AddRemoveFailedBranchHints(second_fpm); + fpm.addPass(RewriteVectorOps()); + second_fpm.addPass(llvm::VerifierPass()); second_fpm.addPass(llvm::NewGVNPass()); + second_fpm.addPass(llvm::VerifierPass()); + second_fpm.addPass(llvm::InstCombinePass()); AddSpreadPCMetadata(second_fpm, options); - second_fpm.addPass(CodeQualityStatCollector()); + + + second_fpm.addPass(llvm::VerifierPass()); + AddConvertAddressesToEntityUses(fpm, xr, pc_metadata_id); + second_fpm.addPass(llvm::VerifierPass()); AddConvertXorsToCmps(second_fpm); + second_fpm.addPass(llvm::VerifierPass()); second_fpm.addPass(llvm::DCEPass()); + second_fpm.addPass(llvm::VerifierPass()); + second_fpm.addPass(llvm::DSEPass()); + second_fpm.addPass(llvm::VerifierPass()); mpm.addPass(llvm::createModuleToFunctionPassAdaptor(std::move(second_fpm))); + mpm.addPass(anvill::CodeQualityStatCollector()); mpm.run(module, mam); // Get rid of all final uses of `__anvill_pc`. @@ -262,6 +336,48 @@ void OptimizeModule(const EntityLifter &lifter, llvm::Module &module) { } } + mpm.run(module, mam); + + if (lifter.Options().should_inline_basic_blocks) { + llvm::FunctionPassManager inliner; + + inliner.addPass(InlineBasicBlockFunctions(contexts)); + + llvm::ModulePassManager mpminliner; + mpminliner.addPass( + llvm::createModuleToFunctionPassAdaptor(std::move(inliner))); + mpminliner.addPass( + llvm::createModuleToPostOrderCGSCCPassAdaptor(llvm::InlinerPass())); + + mpminliner.run(module, mam); + + // lets make sure we eliminate all the basic block functions because we dont care anymore + for (auto &f : module.getFunctionList()) { + if (anvill::GetBasicBlockUid(&f)) { + f.setLinkage(llvm::GlobalValue::InternalLinkage); + } + } + + auto intrinsics = module.getFunction("__remill_intrinsics"); + if (intrinsics) { + intrinsics->eraseFromParent(); + } + + + auto defaultmpm = + pb.buildPerModuleDefaultPipeline(llvm::OptimizationLevel::O3); + + defaultmpm.run(module, mam); + + llvm::createModuleToFunctionPassAdaptor( + SplitStackFrameAtReturnAddress(options.stack_frame_recovery_options)) + .run(module, mam); + + + pb.buildPerModuleDefaultPipeline(llvm::OptimizationLevel::O3) + .run(module, mam); + } + // Manually clear the analyses to prevent ASAN failures in the destructors. mam.clear(); diff --git a/lib/Passes/CodeQualityStatCollector.cpp b/lib/Passes/CodeQualityStatCollector.cpp index c1078404e..ed51004c7 100644 --- a/lib/Passes/CodeQualityStatCollector.cpp +++ b/lib/Passes/CodeQualityStatCollector.cpp @@ -11,13 +11,15 @@ #include namespace anvill { -STATISTIC( +ALWAYS_ENABLED_STATISTIC( ConditionalComplexity, "A factor that approximates the complexity of the condition in branch instructions"); -STATISTIC(NumberOfInstructions, "Total number of instructions"); -STATISTIC(AbruptControlFlow, "Indirect control flow instructions"); -STATISTIC(IntToPointerCasts, "Integer to pointer casts"); -STATISTIC(PointerToIntCasts, "Pointer to integer casts"); +ALWAYS_ENABLED_STATISTIC(NumberOfInstructions, "Total number of instructions"); +ALWAYS_ENABLED_STATISTIC(AbruptControlFlow, "Indirect control flow instructions"); +ALWAYS_ENABLED_STATISTIC(IntToPointerCasts, "Integer to pointer casts"); +ALWAYS_ENABLED_STATISTIC(PointerToIntCasts, "Pointer to integer casts"); +ALWAYS_ENABLED_STATISTIC(AnvillStackPointers, "Number of functions that expose an Anvill stack pointer"); +ALWAYS_ENABLED_STATISTIC(AnvillPCPointers, "Number of functions that expose an Anvill pc pointer"); namespace { @@ -35,7 +37,7 @@ class ConditionalComplexityVisitor void visitBinaryOperator(llvm::BinaryOperator &I) { if (auto *inttype = llvm::dyn_cast(I.getType())) { if (inttype->getBitWidth() == 1) { - ConditionalComplexity++; + ++ConditionalComplexity; this->tryVisit(I.getOperand(0)); this->tryVisit(I.getOperand(1)); } @@ -43,12 +45,12 @@ class ConditionalComplexityVisitor } void visitCmpInst(llvm::CmpInst &I) { - ConditionalComplexity++; + ++ConditionalComplexity; } void visitUnaryOperator(llvm::UnaryOperator &I) { if (auto *inttype = llvm::dyn_cast(I.getType())) { - ConditionalComplexity++; + ++ConditionalComplexity; this->tryVisit(I.getOperand(0)); } } @@ -57,32 +59,61 @@ class ConditionalComplexityVisitor llvm::PreservedAnalyses -CodeQualityStatCollector::run(llvm::Function &function, - llvm::FunctionAnalysisManager &analysisManager) { +CodeQualityStatCollector::run(llvm::Module &module, + llvm::ModuleAnalysisManager &analysisManager) { ConditionalComplexityVisitor complexity_visitor; - for (auto &i : llvm::instructions(function)) { - if (auto *int_to_ptr = llvm::dyn_cast(&i)) { - IntToPointerCasts++; - } + llvm::GlobalVariable* anvill_sp = module.getGlobalVariable(kSymbolicSPName); + llvm::GlobalVariable* anvill_pc = module.getGlobalVariable(kSymbolicPCName); + + llvm::DenseSet sp_funcs; + llvm::DenseSet pc_funcs; - if (auto *int_to_ptr = llvm::dyn_cast(&i)) { - PointerToIntCasts++; + if (anvill_sp != nullptr) { + for (const auto &U: anvill_sp->uses()) { + const auto &user = U.getUser(); + if (const llvm::Instruction *I = llvm::dyn_cast(user)) { + sp_funcs.insert(I->getFunction()); + } } + } + AnvillStackPointers += sp_funcs.size(); - NumberOfInstructions++; - if (auto *branch = llvm::dyn_cast(&i)) { - if (branch->isConditional()) { - complexity_visitor.tryVisit(branch->getCondition()); + if (anvill_pc != nullptr) { + for (const auto &U: anvill_pc->uses()) { + const auto &user = U.getUser(); + if (const llvm::Instruction *I = llvm::dyn_cast(user)) { + pc_funcs.insert(I->getFunction()); } } + } + + AnvillPCPointers += pc_funcs.size(); + + for (auto &function : module) { + for (auto &i : llvm::instructions(function)) { + if (auto *int_to_ptr = llvm::dyn_cast(&i)) { + ++IntToPointerCasts; + } + + if (auto *int_to_ptr = llvm::dyn_cast(&i)) { + ++PointerToIntCasts; + } + + ++NumberOfInstructions; + if (auto *branch = llvm::dyn_cast(&i)) { + if (branch->isConditional()) { + complexity_visitor.tryVisit(branch->getCondition()); + } + } - if (auto *cb = llvm::dyn_cast(&i)) { - auto target = cb->getCalledFunction(); - if (target != nullptr) { - if (target->getName() == kAnvillSwitchCompleteFunc || - target->getName() == kAnvillSwitchIncompleteFunc) { - AbruptControlFlow++; + if (auto *cb = llvm::dyn_cast(&i)) { + auto target = cb->getCalledFunction(); + if (target != nullptr) { + if (target->getName() == kAnvillSwitchCompleteFunc || + target->getName() == kAnvillSwitchIncompleteFunc) { + ++AbruptControlFlow; + } } } } @@ -94,4 +125,4 @@ llvm::StringRef CodeQualityStatCollector::name(void) { return "CodeQualityStatCollector"; } -} // namespace anvill \ No newline at end of file +} // namespace anvill diff --git a/lib/Passes/ConvertAddressesToEntityUses.cpp b/lib/Passes/ConvertAddressesToEntityUses.cpp index 1fcbf5a20..b9c3d00ee 100644 --- a/lib/Passes/ConvertAddressesToEntityUses.cpp +++ b/lib/Passes/ConvertAddressesToEntityUses.cpp @@ -14,6 +14,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -37,6 +41,16 @@ static llvm::MDNode *GetPCAnnotation(llvm::Module *module, uint64_t pc) { } // namespace + +bool ConvertAddressesToEntityUses::IsPointerLike(llvm::Use &use) { + if (auto cst = llvm::dyn_cast(use.get())) { + return llvm::Instruction::IntToPtr == cst->getOpcode(); + } + // TODO(Ian): Add use of type annotations here + + return false; +} + llvm::PreservedAnalyses ConvertAddressesToEntityUses::run(llvm::Function &function, llvm::FunctionAnalysisManager &fam) { @@ -84,14 +98,17 @@ ConvertAddressesToEntityUses::run(llvm::Function &function, auto ent_type = llvm::dyn_cast(entity->getType()); CHECK_NOTNULL(ent_type); + auto adapted = AdaptToType(ir, entity, val_type); + if (!adapted) { + continue; + } if (auto phi = llvm::dyn_cast(user_inst)) { auto pred_block = phi->getIncomingBlock(*(xref_use.use)); llvm::IRBuilder<> ir(pred_block->getTerminator()); - xref_use.use->set(AdaptToType(ir, entity, val_type)); + xref_use.use->set(adapted); } else { - llvm::IRBuilder<> ir(user_inst); - xref_use.use->set(AdaptToType(ir, entity, val_type)); + xref_use.use->set(adapted); } if (auto val_inst = llvm::dyn_cast(val)) { @@ -147,9 +164,11 @@ EntityUsages ConvertAddressesToEntityUses::EnumeratePossibleEntityUsages( ra.is_valid && !ra.references_return_address && !ra.references_stack_pointer) { + if (ra.references_entity || // Related to an existing lifted entity. ra.references_global_value || // Related to a global var/func. - ra.references_program_counter) { // Related to `__anvill_pc`. + ra.references_program_counter || + IsPointerLike(use)) { // Related to `__anvill_pc`. output.emplace_back(&use, ra); } } diff --git a/lib/Passes/ConvertPointerArithmeticToGEP.cpp b/lib/Passes/ConvertPointerArithmeticToGEP.cpp new file mode 100644 index 000000000..961173f69 --- /dev/null +++ b/lib/Passes/ConvertPointerArithmeticToGEP.cpp @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2022-present, Trail of Bits, Inc. + * All rights reserved. + * + * This source code is licensed in accordance with the terms specified in + * the LICENSE file found in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace anvill { +struct ConvertPointerArithmeticToGEP::Impl { + const BasicBlockContexts &contexts; + TypeMap &types; + StructMap &structs; + MDMap &md; + + TypeSpec MDToTypeSpec(llvm::MDNode *md); + std::optional GetTypeInfo(llvm::Value *val); + + llvm::Type *TypeSpecToType(llvm::LLVMContext &context, BaseType t); + llvm::PointerType *TypeSpecToType(llvm::LLVMContext &context, + std::shared_ptr t); + llvm::ArrayType *TypeSpecToType(llvm::LLVMContext &context, + std::shared_ptr t); + llvm::FixedVectorType *TypeSpecToType(llvm::LLVMContext &context, + std::shared_ptr t); + llvm::StructType *TypeSpecToType(llvm::LLVMContext &context, + std::shared_ptr t); + llvm::FunctionType *TypeSpecToType(llvm::LLVMContext &context, + std::shared_ptr t); + llvm::IntegerType *TypeSpecToType(llvm::LLVMContext &context, UnknownType t); + llvm::Type *TypeSpecToType(llvm::LLVMContext &context, TypeSpec type); + + llvm::MDNode *TypeSpecToMD(llvm::LLVMContext &context, BaseType t); + llvm::MDNode *TypeSpecToMD(llvm::LLVMContext &context, + std::shared_ptr t); + llvm::MDNode *TypeSpecToMD(llvm::LLVMContext &context, + std::shared_ptr t); + llvm::MDNode *TypeSpecToMD(llvm::LLVMContext &context, + std::shared_ptr t); + llvm::MDNode *TypeSpecToMD(llvm::LLVMContext &context, + std::shared_ptr t); + llvm::MDNode *TypeSpecToMD(llvm::LLVMContext &context, + std::shared_ptr t); + llvm::MDNode *TypeSpecToMD(llvm::LLVMContext &context, UnknownType t); + llvm::MDNode *TypeSpecToMD(llvm::LLVMContext &context, TypeSpec type); + + + bool ConvertTypeHints(llvm::Function &f); + bool ConvertLoadInt(llvm::Function &f); + bool FoldPtrAdd(llvm::Function &f); + bool FoldScaledIndex(llvm::Function &f); + + Impl(const BasicBlockContexts &contexts, TypeMap &types, StructMap &structs, + MDMap &md) + : contexts(contexts), + types(types), + structs(structs), + md(md) {} +}; + + +llvm::Type * +ConvertPointerArithmeticToGEP::Impl::TypeSpecToType(llvm::LLVMContext &context, + BaseType t) { + switch (t) { + case BaseType::Bool: + case BaseType::Char: + case BaseType::SignedChar: + case BaseType::UnsignedChar: + case BaseType::Int8: + case BaseType::UInt8: + case BaseType::Padding: return llvm::Type::getInt8Ty(context); + + case BaseType::Int16: + case BaseType::UInt16: return llvm::Type::getInt16Ty(context); + + case BaseType::Int24: + case BaseType::UInt24: return llvm::Type::getIntNTy(context, 24); + + case BaseType::Int32: + case BaseType::UInt32: return llvm::Type::getInt32Ty(context); + + case BaseType::Int64: + case BaseType::UInt64: return llvm::Type::getInt64Ty(context); + + case BaseType::Int128: + case BaseType::UInt128: return llvm::Type::getInt128Ty(context); + + case BaseType::Float16: return llvm::Type::getHalfTy(context); + case BaseType::Float32: return llvm::Type::getFloatTy(context); + case BaseType::Float64: return llvm::Type::getDoubleTy(context); + case BaseType::Float80: return llvm::Type::getX86_FP80Ty(context); + case BaseType::Float128: return llvm::Type::getFP128Ty(context); + case BaseType::MMX64: return llvm::Type::getX86_MMXTy(context); + + case BaseType::Void: return llvm::Type::getVoidTy(context); + + default: return nullptr; + } +} + +llvm::PointerType *ConvertPointerArithmeticToGEP::Impl::TypeSpecToType( + llvm::LLVMContext &context, std::shared_ptr t) { + return llvm::PointerType::get(context, 0); +} + +llvm::ArrayType *ConvertPointerArithmeticToGEP::Impl::TypeSpecToType( + llvm::LLVMContext &context, std::shared_ptr t) { + return llvm::ArrayType::get(TypeSpecToType(context, t->base), t->size); +} + +llvm::FixedVectorType *ConvertPointerArithmeticToGEP::Impl::TypeSpecToType( + llvm::LLVMContext &context, std::shared_ptr t) { + return llvm::FixedVectorType::get(TypeSpecToType(context, t->base), t->size); +} + +llvm::StructType *ConvertPointerArithmeticToGEP::Impl::TypeSpecToType( + llvm::LLVMContext &context, std::shared_ptr t) { + auto &type = structs[t.get()]; + if (type) { + return type; + } + + std::vector members; + for (auto member : t->members) { + members.push_back(TypeSpecToType(context, member)); + } + type = llvm::StructType::get(context, members, /*isPacked=*/true); + return type; +} + +llvm::FunctionType *ConvertPointerArithmeticToGEP::Impl::TypeSpecToType( + llvm::LLVMContext &context, std::shared_ptr t) { + std::vector args; + for (auto arg : t->arguments) { + args.push_back(TypeSpecToType(context, arg)); + } + return llvm::FunctionType::get(TypeSpecToType(context, t->return_type), args, + t->is_variadic); +} + +llvm::IntegerType * +ConvertPointerArithmeticToGEP::Impl::TypeSpecToType(llvm::LLVMContext &context, + UnknownType t) { + return llvm::Type::getIntNTy(context, t.size * 8); +} + +llvm::Type * +ConvertPointerArithmeticToGEP::Impl::TypeSpecToType(llvm::LLVMContext &context, + TypeSpec type) { + return std::visit( + [this, &context](auto &&t) { + return static_cast(TypeSpecToType(context, t)); + }, + type); +} + +TypeSpec ConvertPointerArithmeticToGEP::Impl::MDToTypeSpec(llvm::MDNode *md) { + if (types.count(md)) { + return types[md]; + } + + auto &type = types[md]; + auto tag = llvm::cast(md->getOperand(0).get()); + auto tag_string = tag->getString(); + if (tag_string == "BaseType") { + auto kind_const = + llvm::cast(md->getOperand(1).get()); + auto kind_int = llvm::cast(kind_const->getValue()); + auto kind = static_cast(kind_int->getZExtValue()); + + type = kind; + } else if (tag_string == "PointerType") { + auto pointee = + MDToTypeSpec(llvm::cast(md->getOperand(1).get())); + type = std::make_shared(pointee, false); + } else if (tag_string == "VectorType") { + auto elem = MDToTypeSpec(llvm::cast(md->getOperand(1).get())); + auto size_const = + llvm::cast(md->getOperand(2).get()); + auto size_int = llvm::cast(size_const->getValue()); + type = std::make_shared(elem, size_int->getZExtValue()); + } else if (tag_string == "ArrayType") { + auto elem = MDToTypeSpec(llvm::cast(md->getOperand(1).get())); + auto size_const = + llvm::cast(md->getOperand(2).get()); + auto size_int = llvm::cast(size_const->getValue()); + type = std::make_shared(elem, size_int->getZExtValue()); + } else if (tag_string == "StructType") { + auto struct_ = std::make_shared(); + for (unsigned i = 1; i < md->getNumOperands(); ++i) { + struct_->members.push_back( + MDToTypeSpec(llvm::cast(md->getOperand(i).get()))); + } + type = struct_; + } else if (tag_string == "FunctionType") { + // TODO(frabert) + } else if (tag_string == "UnknownType") { + auto size_const = + llvm::cast(md->getOperand(1).get()); + auto size_int = llvm::cast(size_const->getValue()); + type = UnknownType{static_cast(size_int->getZExtValue())}; + } + return type; +} + +std::optional +ConvertPointerArithmeticToGEP::Impl::GetTypeInfo(llvm::Value *val) { + llvm::MDNode *md = nullptr; + if (auto gvar = llvm::dyn_cast(val)) { + md = gvar->getMetadata("anvill.type"); + } else if (auto ptr_insn = llvm::dyn_cast(val)) { + md = ptr_insn->getMetadata("anvill.type"); + } + + if (!md) { + return {}; + } + + return MDToTypeSpec(md); +} + +llvm::MDNode * +ConvertPointerArithmeticToGEP::Impl::TypeSpecToMD(llvm::LLVMContext &context, + BaseType t) { + auto str = llvm::MDString::get(context, "BaseType"); + auto value = llvm::ConstantInt::get(llvm::IntegerType::getInt32Ty(context), + static_cast(t)); + return llvm::MDNode::get(context, + {str, llvm::ConstantAsMetadata::get(value)}); +} + +llvm::MDNode *ConvertPointerArithmeticToGEP::Impl::TypeSpecToMD( + llvm::LLVMContext &context, std::shared_ptr t) { + auto str = llvm::MDString::get(context, "PointerType"); + return llvm::MDNode::get(context, {str, TypeSpecToMD(context, t->pointee)}); +} + +llvm::MDNode *ConvertPointerArithmeticToGEP::Impl::TypeSpecToMD( + llvm::LLVMContext &context, std::shared_ptr t) { + auto str = llvm::MDString::get(context, "ArrayType"); + auto size = + llvm::ConstantInt::get(llvm::IntegerType::getInt32Ty(context), t->size); + return llvm::MDNode::get(context, {str, TypeSpecToMD(context, t->base), + llvm::ConstantAsMetadata::get(size)}); +} + +llvm::MDNode *ConvertPointerArithmeticToGEP::Impl::TypeSpecToMD( + llvm::LLVMContext &context, std::shared_ptr t) { + auto str = llvm::MDString::get(context, "VectorType"); + auto size = + llvm::ConstantInt::get(llvm::IntegerType::getInt32Ty(context), t->size); + return llvm::MDNode::get(context, {str, TypeSpecToMD(context, t->base), + llvm::ConstantAsMetadata::get(size)}); +} + +llvm::MDNode *ConvertPointerArithmeticToGEP::Impl::TypeSpecToMD( + llvm::LLVMContext &context, std::shared_ptr t) { + auto str = llvm::MDString::get(context, "StructType"); + std::vector members; + members.push_back(str); + for (auto member : t->members) { + members.push_back(TypeSpecToMD(context, member)); + } + return llvm::MDNode::get(context, members); +} + +llvm::MDNode *ConvertPointerArithmeticToGEP::Impl::TypeSpecToMD( + llvm::LLVMContext &context, std::shared_ptr t) { + return nullptr; +} + +llvm::MDNode * +ConvertPointerArithmeticToGEP::Impl::TypeSpecToMD(llvm::LLVMContext &context, + UnknownType t) { + auto str = llvm::MDString::get(context, "UnknownType"); + auto size = llvm::ConstantInt::get(llvm::IntegerType::getInt32Ty(context), + static_cast(t.size)); + return llvm::MDNode::get(context, {str, llvm::ConstantAsMetadata::get(size)}); +} + +llvm::MDNode * +ConvertPointerArithmeticToGEP::Impl::TypeSpecToMD(llvm::LLVMContext &context, + TypeSpec type) { + return std::visit( + [this, &context](auto &&t) { return TypeSpecToMD(context, t); }, type); +} + +ConvertPointerArithmeticToGEP::ConvertPointerArithmeticToGEP( + const BasicBlockContexts &contexts, TypeMap &types, StructMap &structs, + MDMap &md) + : BasicBlockPass(contexts), + impl(std::make_unique(contexts, types, structs, md)) {} + +ConvertPointerArithmeticToGEP::ConvertPointerArithmeticToGEP( + const ConvertPointerArithmeticToGEP &pass) + : BasicBlockPass(pass.impl->contexts), + impl(std::make_unique(pass.impl->contexts, pass.impl->types, + pass.impl->structs, pass.impl->md)) {} + + +ConvertPointerArithmeticToGEP::~ConvertPointerArithmeticToGEP() = default; + +llvm::StringRef ConvertPointerArithmeticToGEP::name() { + return "ConvertPointerArithmeticToGEP"; +} + +bool ConvertPointerArithmeticToGEP::Impl::ConvertTypeHints(llvm::Function &f) { + std::vector calls; + for (auto &insn : llvm::instructions(f)) { + if (auto *call = llvm::dyn_cast(&insn)) { + if (call->getCalledFunction() && + call->getCalledFunction()->getName() == kTypeHintFunctionPrefix) { + calls.push_back(call); + } + } + } + + for (auto call : calls) { + auto arg = call->getArgOperand(0); + call->replaceAllUsesWith(arg); + call->eraseFromParent(); + } + + return !calls.empty(); +} + +// Finds `(load i64, P)` and converts it to `(ptrtoint (load ptr, P))` +bool ConvertPointerArithmeticToGEP::Impl::ConvertLoadInt(llvm::Function &f) { + using namespace llvm::PatternMatch; + llvm::Value *ptr; + auto &context = f.getContext(); + auto &dl = f.getParent()->getDataLayout(); + auto pat = m_Load(m_Value(ptr)); + for (auto &insn : llvm::instructions(f)) { + if (!match(&insn, pat)) { + continue; + } + + auto old_load = llvm::cast(&insn); + auto load_ty = old_load->getType(); + if (load_ty != llvm::Type::getIntNTy(context, dl.getPointerSizeInBits())) { + continue; + } + + auto maybe_type_info = GetTypeInfo(ptr); + if (!maybe_type_info) { + continue; + } + auto type_info = *maybe_type_info; + + if (auto gvar = llvm::dyn_cast(ptr)) { + if (!std::holds_alternative>(type_info)) { + continue; + } + + auto ptr_type = std::get>(type_info); + auto new_load = new llvm::LoadInst(llvm::PointerType::get(context, 0), + ptr, "", &insn); + new_load->setMetadata("anvill.type", TypeSpecToMD(context, type_info)); + auto ptrtoint = new llvm::PtrToIntInst(new_load, load_ty, "", &insn); + insn.replaceAllUsesWith(ptrtoint); + + return true; + } + + if (auto ptr_insn = llvm::dyn_cast(ptr)) { + if (!std::holds_alternative>(type_info)) { + continue; + } + + auto ptr_type = std::get>(type_info); + if (!std::holds_alternative>( + ptr_type->pointee)) { + continue; + } + + auto new_load = new llvm::LoadInst(llvm::PointerType::get(context, 0), + ptr, "", &insn); + new_load->setMetadata("anvill.type", + TypeSpecToMD(context, ptr_type->pointee)); + auto ptrtoint = new llvm::PtrToIntInst(new_load, load_ty, "", &insn); + insn.replaceAllUsesWith(ptrtoint); + + return true; + } + } + + return false; +} + +namespace { +void BuildIndices(uint64_t &offset, TypeSpec &cur_spec, llvm::Type *&cur_type, + std::vector &indices, const llvm::DataLayout &dl) { + while (offset != 0) { + if (std::holds_alternative>(cur_spec)) { + auto struct_spec = std::get>(cur_spec); + llvm::StructType *struct_type = llvm::cast(cur_type); + + auto layout = dl.getStructLayout(struct_type); + if (offset >= layout->getSizeInBytes()) { + return; + } + + auto index = layout->getElementContainingOffset(offset); + indices.push_back(index); + + cur_spec = struct_spec->members[index]; + cur_type = struct_type->getElementType(index); + offset -= layout->getElementOffset(index); + } else if (std::holds_alternative>(cur_spec)) { + auto arr_spec = std::get>(cur_spec); + auto arr_type = llvm::cast(cur_type); + + auto elem_size = + dl.getTypeSizeInBits(arr_type->getArrayElementType()) / 8; + auto index = offset / elem_size; + + if (index >= arr_type->getNumElements()) { + return; + } + + indices.push_back(index); + + cur_spec = arr_spec->base; + cur_type = arr_type->getArrayElementType(); + offset -= index * elem_size; + } else if (std::holds_alternative>(cur_spec)) { + auto vec_spec = std::get>(cur_spec); + auto vec_type = llvm::cast(cur_type); + + auto elem_size = dl.getTypeSizeInBits(vec_type->getElementType()) / 8; + auto index = offset / elem_size; + if (index >= vec_type->getElementCount().getKnownMinValue()) { + return; + } + indices.push_back(index); + + cur_spec = vec_spec->base; + cur_type = vec_type->getElementType(); + offset -= index * elem_size; + } else { + return; + } + } +} +} // namespace + +// Finds `(add (ptrtoint P), A)` and tries to convert to `(ptrtoint (gep ...))` +bool ConvertPointerArithmeticToGEP::Impl::FoldPtrAdd(llvm::Function &f) { + using namespace llvm::PatternMatch; + llvm::Value *ptr; + llvm::ConstantInt *offset_const; + auto &context = f.getContext(); + auto &dl = f.getParent()->getDataLayout(); + auto pat = m_Add(m_PtrToInt(m_Value(ptr)), m_ConstantInt(offset_const)); + for (auto &insn : llvm::instructions(f)) { + if (!match(&insn, pat)) { + continue; + } + + auto maybe_ptr_type = GetTypeInfo(ptr); + if (!maybe_ptr_type.has_value()) { + continue; + } + + if (!std::holds_alternative>( + *maybe_ptr_type)) { + continue; + } + + auto pointee_spec = + std::get>(*maybe_ptr_type)->pointee; + auto pointee_type = TypeSpecToType(context, pointee_spec); + + auto offset = offset_const->getZExtValue(); + std::vector indices; + + auto cur_spec = pointee_spec; + auto cur_type = pointee_type; + if (!cur_type->isSized()) { + continue; + } + + { + auto cur_size = dl.getTypeSizeInBits(cur_type) / 8; + auto index = offset / cur_size; + indices.push_back(index); + offset = offset % cur_size; + } + + BuildIndices(offset, cur_spec, cur_type, indices, dl); + if (offset != 0) { + continue; + } + + std::vector indices_values; + auto i32 = llvm::Type::getInt32Ty(context); + for (auto i : indices) { + indices_values.push_back(llvm::ConstantInt::get(i32, i)); + } + auto next_insn = insn.getNextNonDebugInstruction(); + auto gep = llvm::GetElementPtrInst::Create(pointee_type, ptr, + indices_values, "", next_insn); + gep->setMetadata("anvill.type", TypeSpecToMD(context, cur_spec)); + auto ptrtoint = new llvm::PtrToIntInst( + gep, llvm::Type::getIntNTy(context, dl.getPointerSizeInBits()), "", + next_insn); + insn.replaceAllUsesWith(ptrtoint); + + return true; + } + + return false; +} + +// Convert `(add (ptrtoint P), (shl I, S))` to `(ptrtoint (gep P, I))` +bool ConvertPointerArithmeticToGEP::Impl::FoldScaledIndex(llvm::Function &f) { + using namespace llvm::PatternMatch; + llvm::Value *ptr; + llvm::Value *base; + llvm::ConstantInt *shift_const; + auto &context = f.getContext(); + auto &dl = f.getParent()->getDataLayout(); + auto patL = m_Add(m_PtrToInt(m_Value(ptr)), + m_Shl(m_Value(base), m_ConstantInt(shift_const))); + auto patR = m_Add(m_Shl(m_Value(base), m_ConstantInt(shift_const)), + m_PtrToInt(m_Value(ptr))); + auto ptrint_ty = llvm::Type::getIntNTy(context, dl.getPointerSizeInBits()); + for (auto &insn : llvm::instructions(f)) { + if (!match(&insn, patL) && !match(&insn, patR)) { + continue; + } + + auto maybe_type_info = GetTypeInfo(ptr); + if (!maybe_type_info.has_value()) { + continue; + } + + auto scale = 1ull << shift_const->getZExtValue(); + auto type_info = *maybe_type_info; + + auto next_insn = insn.getNextNonDebugInstruction(); + + if (std::holds_alternative>(type_info)) { + auto array_spec = std::get>(type_info); + auto array_type = TypeSpecToType(context, array_spec); + auto elem_size = + dl.getTypeSizeInBits(array_type->getArrayElementType()) / 8; + if (scale != elem_size) { + continue; + } + + auto gep = llvm::GetElementPtrInst::Create( + array_type, ptr, {llvm::ConstantInt::get(ptrint_ty, 0), base}, "", + next_insn); + gep->setMetadata("anvill.type", TypeSpecToMD(context, array_spec->base)); + auto ptrtoint = new llvm::PtrToIntInst(gep, ptrint_ty, "", next_insn); + insn.replaceAllUsesWith(ptrtoint); + return true; + } + + if (std::holds_alternative>(type_info)) { + auto vector_spec = std::get>(type_info); + auto vector_type = TypeSpecToType(context, vector_spec); + auto elem_size = dl.getTypeSizeInBits(vector_type->getElementType()) / 8; + if (scale != elem_size) { + continue; + } + + auto gep = llvm::GetElementPtrInst::Create( + vector_type, ptr, {llvm::ConstantInt::get(ptrint_ty, 0), base}, "", + next_insn); + gep->setMetadata("anvill.type", TypeSpecToMD(context, vector_spec->base)); + auto ptrtoint = new llvm::PtrToIntInst(gep, ptrint_ty, "", next_insn); + insn.replaceAllUsesWith(ptrtoint); + return true; + } + } + return false; +} + +llvm::PreservedAnalyses ConvertPointerArithmeticToGEP::runOnBasicBlockFunction( + llvm::Function &function, llvm::FunctionAnalysisManager &fam, + const anvill::BasicBlockContext &, const FunctionDecl &) { + bool changed = impl->ConvertLoadInt(function); + changed |= impl->FoldPtrAdd(function); + changed |= impl->FoldScaledIndex(function); + changed |= impl->ConvertTypeHints(function); + return changed ? llvm::PreservedAnalyses::none() + : llvm::PreservedAnalyses::all(); +} +} // namespace anvill \ No newline at end of file diff --git a/lib/Passes/InlineBasicBlockFunctions.cpp b/lib/Passes/InlineBasicBlockFunctions.cpp new file mode 100644 index 000000000..8b2674194 --- /dev/null +++ b/lib/Passes/InlineBasicBlockFunctions.cpp @@ -0,0 +1,38 @@ +#include "anvill/Passes/InlineBasicBlockFunctions.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "Utils.h" + +namespace anvill { + +llvm::StringRef InlineBasicBlockFunctions::name(void) { + return "Inline the basic block functions"; +} + +llvm::PreservedAnalyses InlineBasicBlockFunctions::runOnBasicBlockFunction( + llvm::Function &F, llvm::FunctionAnalysisManager &AM, + const anvill::BasicBlockContext &cont, const anvill::FunctionDecl &) { + F.removeFnAttr(llvm::Attribute::NoInline); + F.addFnAttr(llvm::Attribute::AlwaysInline); + return llvm::PreservedAnalyses::all(); +} + +} // namespace anvill \ No newline at end of file diff --git a/lib/Passes/LowerSwitchIntrinsics.cpp b/lib/Passes/LowerSwitchIntrinsics.cpp deleted file mode 100644 index b941904e4..000000000 --- a/lib/Passes/LowerSwitchIntrinsics.cpp +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (c) 2019-present, Trail of Bits, Inc. - * All rights reserved. - * - * This source code is licensed in accordance with the terms specified in - * the LICENSE file found in the root directory of this source tree. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -namespace anvill { - -class PcBinding { - private: - llvm::DenseMap mapping; - - PcBinding(llvm::DenseMap mapping) - : mapping(std::move(mapping)) {} - - - public: - std::optional Lookup(llvm::APInt targetPc) const { - if (this->mapping.find(targetPc) != this->mapping.end()) { - return {this->mapping.find(targetPc)->second}; - } - - return std::nullopt; - } - - static PcBinding Build(const llvm::CallInst *complete_switch, - llvm::SwitchInst *follower) { - assert(complete_switch->arg_size() - 1 == follower->getNumCases()); - - llvm::DenseMap mapping; - for (auto case_handler : follower->cases()) { - auto pc_arg = complete_switch->getArgOperand( - case_handler.getCaseValue()->getValue().getLimitedValue() + - 1); // is the switch has more than 2^64 cases we have bigger problems - mapping.insert( - {llvm::cast(pc_arg)->getValue(), - case_handler - .getCaseSuccessor()}); // the argument to a complete switch should always be a constant int - } - - - return PcBinding(std::move(mapping)); - } -}; - -class SwitchBuilder { - private: - llvm::LLVMContext &context; - const MemoryProvider &mem_prov; - const llvm::DataLayout &dl; - - std::optional ReadIntFrom(llvm::IntegerType *ty, - llvm::APInt addr) { - auto uaddr = addr.getLimitedValue(); - std::vector memory; - assert(ty->getBitWidth() % 8 == 0); - auto target_bytes = ty->getBitWidth() / 8; - - for (uint64_t i = 0; i < target_bytes; i++) { - auto res = this->mem_prov.Query(uaddr + i); - ByteAvailability avail = std::get<1>(res); - if (avail != ByteAvailability::kAvailable) { - return std::nullopt; - } - - memory.push_back(std::get<0>(res)); - } - - - llvm::APInt res(ty->getBitWidth(), 0); - - // Endianess? may have to flip around memory as needed, yeah looks like - // LoadIntMemory loads at system memory so need to use flip_memory in - // llvm::endianess - llvm::LoadIntFromMemory(res, memory.data(), target_bytes); - - if (this->dl.isLittleEndian() == llvm::sys::IsLittleEndianHost) { - return res; - } else { - return res.byteSwap(); - } - } - - public: - SwitchBuilder(llvm::LLVMContext &context, const MemoryProvider &memProv, - const llvm::DataLayout &dl) - : context(context), - mem_prov(memProv), - dl(dl) {} - - // A native switch utilizes llvms switch construct in the intended manner to - // dispatch control flow on integer values. This pass converts jump table- - // based compiler implementations of this construct back into simple switch - // cases over an integer index that directly jumps to known labels. - std::optional - CreateNativeSwitch(const JumpTableResult &jt, const PcBinding &binding, - llvm::LLVMContext &context) { - auto min_index = jt.bounds.lower; - auto number_of_cases = (jt.bounds.upper - min_index) + 1; - auto interp = jt.interp.getInterp(); - llvm::SwitchInst *new_switch = - llvm::SwitchInst::Create(jt.indexRel.getIndex(), jt.defaultOut, - number_of_cases.getLimitedValue()); - for (llvm::APInt curr_ind_value = min_index; - jt.bounds.lessThanOrEqual(curr_ind_value, jt.bounds.upper); - curr_ind_value += 1) { - auto read_address = jt.indexRel.apply(interp, curr_ind_value); - std::optional jmp_off = - this->ReadIntFrom(jt.pcRel.getExpectedType(jt.interp), read_address); - if (!jmp_off.has_value()) { - delete new_switch; - return std::nullopt; - } - - auto new_pc = jt.pcRel.apply(interp, *jmp_off); - auto out_block = binding.Lookup(new_pc); - if (!out_block.has_value()) { - delete new_switch; - return std::nullopt; - } - - - if (*out_block != jt.defaultOut) { - llvm::ConstantInt *index_val = - llvm::ConstantInt::get(this->context, curr_ind_value); - new_switch->addCase(index_val, *out_block); - } - } - return new_switch; - } -}; - - -llvm::PreservedAnalyses -LowerSwitchIntrinsics::runOnIndirectJump(llvm::CallInst *targetCall, - llvm::FunctionAnalysisManager &am, - llvm::PreservedAnalyses agg) { - - const auto &jt_analysis = - am.getResult(*targetCall->getFunction()); - auto jresult = jt_analysis.find(targetCall); - - - if (jresult == jt_analysis.end()) { - return agg; - } - - llvm::Function &f = *targetCall->getFunction(); - auto dl = f.getParent()->getDataLayout(); - llvm::LLVMContext &context = f.getParent()->getContext(); - - SwitchBuilder sbuilder(context, this->memProv, dl); - auto following_switch = targetCall->getParent()->getTerminator(); - - if (auto *follower = llvm::dyn_cast(following_switch)) { - // Check that the switch uses the complete switch - if (follower->getCondition() == targetCall) { - auto binding = PcBinding::Build(targetCall, follower); - std::optional new_switch = - sbuilder.CreateNativeSwitch(jresult->second, binding, context); - - if (new_switch) { - llvm::ReplaceInstWithInst(follower, *new_switch); - if (targetCall->uses().empty()) { - targetCall->eraseFromParent(); - } - agg.intersect(llvm::PreservedAnalyses::none()); - return agg; - } - } - } - - return agg; -} - -llvm::StringRef LowerSwitchIntrinsics::name() { - return "LowerSwitchIntrinsics"; -} - -llvm::PreservedAnalyses LowerSwitchIntrinsics::BuildInitialResult() { - return llvm::PreservedAnalyses::all(); -} - - -void AddLowerSwitchIntrinsics(llvm::FunctionPassManager &fpm, - const MemoryProvider &memprov) { - fpm.addPass(LowerSwitchIntrinsics(memprov)); -} - -} // namespace anvill diff --git a/lib/Passes/LowerTypeHintIntrinsics.cpp b/lib/Passes/LowerTypeHintIntrinsics.cpp deleted file mode 100644 index 4e3f2c8a8..000000000 --- a/lib/Passes/LowerTypeHintIntrinsics.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2019-present, Trail of Bits, Inc. - * All rights reserved. - * - * This source code is licensed in accordance with the terms specified in - * the LICENSE file found in the root directory of this source tree. - */ - -#include - -#include -#include -#include -#include -#include -#include - -#include - -#include "Utils.h" - -namespace anvill { - -llvm::StringRef LowerTypeHintIntrinsics::name(void) { - return "LowerTypeHintIntrinsics"; -} - -llvm::PreservedAnalyses -LowerTypeHintIntrinsics::run(llvm::Function &func, - llvm::FunctionAnalysisManager &AM) { - std::vector calls; - - for (auto &inst : llvm::instructions(func)) { - if (auto call = llvm::dyn_cast(&inst)) { - if (auto callee = call->getCalledFunction(); - callee && callee->getName().startswith(kTypeHintFunctionPrefix)) { - calls.push_back(call); - } - } - } - - auto changed = false; - for (auto call : calls) { - auto val = call->getArgOperand(0)->stripPointerCasts(); - llvm::IRBuilder<> ir(call); - auto *cast_val = ir.CreateBitOrPointerCast(val, call->getType()); - CopyMetadataTo(call, cast_val); - call->replaceAllUsesWith(cast_val); - changed = true; - } - - for (auto call : calls) { - if (call->use_empty()) { - call->eraseFromParent(); - changed = true; - } - } - - return ConvertBoolToPreserved(changed); -} - -void AddLowerTypeHintIntrinsics(llvm::FunctionPassManager &fpm) { - fpm.addPass(LowerTypeHintIntrinsics()); -} - -} // namespace anvill diff --git a/lib/Passes/RecoverBasicStackFrame.cpp b/lib/Passes/RecoverBasicStackFrame.cpp deleted file mode 100644 index f8cced3b4..000000000 --- a/lib/Passes/RecoverBasicStackFrame.cpp +++ /dev/null @@ -1,550 +0,0 @@ -/* - * Copyright (c) 2019-present, Trail of Bits, Inc. - * All rights reserved. - * - * This source code is licensed in accordance with the terms specified in - * the LICENSE file found in the root directory of this source tree. - */ - -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "Utils.h" - -namespace anvill { -namespace { - -// Describes an instruction that accesses the stack pointer through the -// `__anvill_sp` symbol. -struct StackPointerUse final { - inline explicit StackPointerUse(llvm::Use *use_, std::uint64_t type_size_, - std::int64_t stack_offset_) - : use(use_), - type_size(type_size_), - stack_offset(stack_offset_) {} - - // An operand inside of a particular instruction, where `use->getUser()` - // is an `llvm::Instruction`, and `use->get()` is a value related to the - // stack pointer. - llvm::Use *const use; - - // Operand size - const std::uint64_t type_size; - - // Stack offset referenced - const std::int64_t stack_offset; -}; - -// Contains a list of `load` and `store` instructions that reference -// the stack pointer -using StackPointerRegisterUsages = std::vector; - -// This structure contains the stack size, along with the lower and -// higher bounds of the offsets, and all the instructions that have -// been analyzed -struct StackFrameAnalysis final { - - // A list of uses that reference the stack pointer - std::vector instruction_uses; - - // Lowest SP-relative offset - std::int64_t lowest_offset{}; - - // Highest SP-relative offset - std::int64_t highest_offset{}; - - // Stack frame size - std::size_t size{}; -}; - -// Enumerates all the store and load instructions that reference -// the stack -static StackPointerRegisterUsages EnumerateStackPointerUsages( - llvm::Function &function) { - StackPointerRegisterUsages output; - StackPointerResolver sp_resolver(function.getParent()); - - for (auto &basic_block : function) { - for (auto &instr : basic_block) { - for (auto i = 0u, num_ops = instr.getNumOperands(); i < num_ops; ++i) { - auto &use = instr.getOperandUse(i); - if (auto val = use.get(); llvm::isa(val) && - sp_resolver.IsRelatedToStackPointer(val)) { - output.emplace_back(&use); - } - } - } - } - - return output; -} - -static constexpr uint64_t kMax16 = std::numeric_limits::max(); -static constexpr uint64_t kMax32 = std::numeric_limits::max(); - -// Analyzes the stack frame, determining the relative boundaries and -// collecting the instructions that operate on the stack pointer -static StackFrameAnalysis AnalyzeStackFrame( - llvm::Function &function, const StackFrameRecoveryOptions &options) { - - // The CrossReferenceResolver can accumulate all the offsets - // applied to the stack pointer symbol for us - auto module = function.getParent(); - auto &data_layout = module->getDataLayout(); - - NullCrossReferenceResolver resolver; - CrossReferenceFolder folder(resolver, data_layout); - - // Pre-initialize the stack limits - StackFrameAnalysis output; - output.highest_offset = std::numeric_limits::min(); - output.lowest_offset = std::numeric_limits::max(); - - // Go through each one of the instructions we have found - for (const auto use : EnumerateStackPointerUsages(function)) { - - // Skip any operand that is not related to the stack pointer. - const auto val = use->get(); - - // Attempt to resolve the constant expression into an offset. If we can't - // resolve it, then it probably means that there was a comparison or - // something, and we should unfold it. - const auto reference = folder.TryResolveReferenceWithCaching(val); - if (!reference.is_valid || !reference.references_stack_pointer) { - continue; - } - - // The offset from the stack pointer. Force to a 32-bit, then sign-extend. - int64_t stack_offset = reference.Displacement(data_layout); - if (options.max_stack_frame_size <= kMax16) { - stack_offset = static_cast(stack_offset); - } else if (options.max_stack_frame_size <= kMax32) { - stack_offset = static_cast(stack_offset); - } - - // Update the boundaries, based on the offset we have found - std::uint64_t type_size = - data_layout.getTypeAllocSize(val->getType()).getFixedSize(); - - // In the case of `store` instructions, we want to record the size of the - // stored value as the type size or updating the stack offset. - if (auto store = llvm::dyn_cast(use->getUser())) { - if (use->getOperandNo() == 1) { - const auto stored_type = store->getValueOperand()->getType(); - type_size = data_layout.getTypeAllocSize(stored_type).getFixedSize(); - } - - // In the case of `load` instructions, we want to redord the size of the - // loaded value. - } else if (auto load = llvm::dyn_cast(use->getUser())) { - type_size = data_layout.getTypeAllocSize(load->getType()).getFixedSize(); - } - - output.highest_offset = - std::max(output.highest_offset, - stack_offset + static_cast(type_size)); - - output.lowest_offset = std::min(output.lowest_offset, stack_offset); - - // Save the operand use. - output.instruction_uses.emplace_back(use, type_size, stack_offset); - } - - output.size = - static_cast(output.highest_offset - output.lowest_offset); - - return output; -} - -// Generates a simple, byte-array based, stack frame for the given -// function -static llvm::StructType *GenerateStackFrameType( - const llvm::Function &function, const StackFrameRecoveryOptions &options, - const StackFrameAnalysis &stack_frame_analysis, std::size_t padding_bytes, - llvm::IntegerType *el_type) { - - const auto element_size = static_cast( - el_type->getPrimitiveSizeInBits().getFixedSize() / 8u); - - // Generate a stack frame type with a name that matches the anvill ABI - auto function_name = function.getName().str(); - auto stack_frame_type_name = function_name + kStackFrameTypeNameSuffix; - - // Make sure this type is not defined already - auto module = function.getParent(); - const auto &dl = module->getDataLayout(); - auto &context = module->getContext(); - - auto stack_frame_type = llvm::StructType::getTypeByName( - context, stack_frame_type_name); - - // Determine how many bytes we should allocate. We may have been - // asked to add some additional padding. We don't care how it is - // accessed right now, we just add to the total size of the final - // stack frame - auto stack_frame_size = std::max( - 1u, - std::min(options.max_stack_frame_size, - padding_bytes + stack_frame_analysis.size)); - - // Round the stack frame to a multiple of the address size. - auto address_size = dl.getPointerSize(0); - const unsigned slot_size = std::lcm(address_size, element_size); - const auto num_slots = (stack_frame_size + (slot_size - 1u)) / - slot_size; - stack_frame_size = num_slots * slot_size; - - if (stack_frame_type != nullptr) { - assert(dl.getTypeAllocSize(stack_frame_type).getKnownMinSize() <= - stack_frame_size); - return stack_frame_type; - } - - // Generate the stack frame using an array of address-sized elements. - auto arr_type = llvm::ArrayType::get(el_type, num_slots); - - llvm::Type *stack_frame_types[] = {arr_type}; - return llvm::StructType::create(stack_frame_types, stack_frame_type_name); -} - -// Generates a new symbolic stack value. -static llvm::GlobalVariable *GetStackSymbolicByteValue( - llvm::Module &module, const StackFrameRecoveryOptions &options, - std::int32_t offset, llvm::IntegerType *type) { - - // Create a new name - auto value_name = kSymbolicStackFrameValuePrefix; - if (options.stack_grows_down) { - if (offset < 0) { - value_name += "minus_"; - } else if (offset > 0) { - value_name += "plus_"; - } - } else { - if (offset < 0) { - value_name += "plus_"; - } else if (offset > 0) { - value_name += "minus_"; - } - } - - value_name += std::to_string(abs(offset)); - - auto gv = module.getGlobalVariable(value_name); - if (gv) { - CHECK_EQ(gv->getValueType(), type); - return gv; - } else { - return new llvm::GlobalVariable( - module, type, false, llvm::GlobalValue::ExternalLinkage, nullptr, - value_name); - } -} - -// Patches the function, replacing the load/store instructions so that -// they operate on the new stack frame type we generated. -static void UpdateFunction( - llvm::Function &function, const StackFrameRecoveryOptions &options, - const StackFrameAnalysis &stack_frame_analysis) { - - StackFrameStructureInitializationProcedure init_strategy = - options.stack_frame_struct_init_procedure; - - std::size_t stack_frame_lower_padding = - options.stack_frame_lower_padding; - - std::size_t stack_frame_higher_padding = - options.stack_frame_higher_padding; - - auto &context = function.getContext(); - auto module = function.getParent(); - const auto &dl = module->getDataLayout(); - auto address_size = dl.getPointerSize(0); - auto addr_type = llvm::Type::getIntNTy(context, address_size * 8u); - - // Generate a new stack frame type, using a byte array inside a - // StructType - auto padding_bytes = stack_frame_lower_padding + stack_frame_higher_padding; - unsigned stack_frame_word_size = options.stack_frame_word_size; - if (!stack_frame_word_size) { - stack_frame_word_size = address_size; - } - llvm::IntegerType * const stack_frame_word_type = llvm::IntegerType::get( - context, stack_frame_word_size * 8u); - auto stack_frame_type = GenerateStackFrameType( - function, options, stack_frame_analysis, padding_bytes, - stack_frame_word_type); - - int64_t base_stack_offset; - if (options.stack_grows_down) { - base_stack_offset = stack_frame_analysis.lowest_offset - - static_cast(stack_frame_lower_padding); - } else { - base_stack_offset = stack_frame_analysis.lowest_offset - - static_cast(stack_frame_lower_padding); - } - - // Take the first instruction as an insert pointer for the - // IRBuilder, and then create an `alloca` instruction to - // generate our new stack frame - auto &entry_block = function.getEntryBlock(); - auto &insert_point = *entry_block.getFirstInsertionPt(); - - llvm::IRBuilder<> builder(&insert_point); - auto stack_frame_alloca = builder.CreateAlloca(stack_frame_type); - - // Annotate the stack. - if (options.stack_offset_metadata_name) { - - // TODO(pag): Account for the stack size having actually been clamped down - // to a smaller range. - - int64_t base = 0; - - // If the stack grows down, the higher offsets represent accesses to - // the callee's stack frame. These will be positive. - if (options.stack_grows_down) { - base = stack_frame_analysis.highest_offset; - base += static_cast(options.stack_frame_higher_padding); - base -= static_cast(stack_frame_word_size); - - // If the stack grows up, the lower offsets represent accesses to the - // callee's stack frame. These will be negative. - } else { - base = stack_frame_analysis.lowest_offset; - base -= static_cast(options.stack_frame_lower_padding); - base += static_cast(stack_frame_word_size); - } - - // NOTE(pag): Base points to the highest or lowest address-sized integer - // that can be stored on the stack. - - auto md_id = context.getMDKindID(kAnvillStackZero); - auto adjust_val = llvm::ConstantInt::get( - addr_type, static_cast(base), true); - auto adjust_md = llvm::ValueAsMetadata::get(adjust_val); - stack_frame_alloca->setMetadata( - md_id, llvm::MDNode::get(context, adjust_md)); - } - - // When we have padding enabled in the configuration, we must - // make sure that accesses are still correctly centered around the - // stack pointer we were given (i.e.: we don't alter where the - // `__anvill_stack_0` is supposed to land). - // - // This is true regardless of which initialization method we use, but - // the following example assumes kSymbolic since it makes the - // explanation easier to follow. - // - // [higher addresses] - // - // [__anvill_stack_plus_3 <- optional higher padding <- alloca - // - // __anvill_stack_plus_2 - // __anvill_stack_plus_1 - // __anvill_stack_0 <- __anvill_sp - // __anvill_stack_minus_1 - // __anvill_stack_minus_2 - // - // [__anvill_stack_minus_3 <- optional lower padding - // - // [lower addresses] - - auto total_stack_frame_size = padding_bytes + stack_frame_analysis.size; - - // Pre-initialize the stack frame if we have been requested to do so. This - // covers the frame padding bytes as well. - // - // Look at the definition for the `StackFrameStructureInitializationProcedure` - // enum class to get more details on each initialization strategy. - switch (init_strategy) { - case StackFrameStructureInitializationProcedure::kZeroes: { - - // Initialize to zero - auto null_value = llvm::Constant::getNullValue(stack_frame_type); - builder.CreateStore(null_value, stack_frame_alloca); - break; - } - - case StackFrameStructureInitializationProcedure::kUndef: { - - // Mark the stack values as explicitly undefined - auto undef_value = llvm::UndefValue::get(stack_frame_type); - builder.CreateStore(undef_value, stack_frame_alloca); - break; - } - - case StackFrameStructureInitializationProcedure::kSymbolic: { - - // Generate symbolic values for each byte in the stack frame - auto &module = *function.getParent(); - - auto current_offset = base_stack_offset; - - llvm::Value *gep_indexes[] = {builder.getInt32(0), builder.getInt32(0), - nullptr}; - - for (auto i = 0U; i < total_stack_frame_size; - i += stack_frame_word_size) { - - gep_indexes[2] = builder.getInt32(i / stack_frame_word_size); - DCHECK_EQ(stack_frame_word_type, - llvm::GetElementPtrInst::getIndexedType(stack_frame_type, - gep_indexes)); - auto stack_frame_byte = - builder.CreateGEP(stack_frame_type, stack_frame_alloca, - gep_indexes); - - auto symbolic_value_ptr = GetStackSymbolicByteValue( - module, options, current_offset, stack_frame_word_type); - - current_offset += static_cast(stack_frame_word_size); - - auto symbolic_value = builder.CreateLoad(stack_frame_word_type, - symbolic_value_ptr); - builder.CreateStore(symbolic_value, stack_frame_byte); - } - - break; - } - - case StackFrameStructureInitializationProcedure::kNone: { - - // Skip initialization - break; - } - } - - // The stack analysis we have performed earlier contains all the - // operand uses we have to update. - for (auto &sp_use : stack_frame_analysis.instruction_uses) { - - const auto obj = sp_use.use->get(); - - // Convert the `__anvill_sp`-relative offset to a 0-based index - // into our stack frame type - auto zero_based_offset = - sp_use.stack_offset - stack_frame_analysis.lowest_offset; - - // If we added padding, adjust the displacement value. We just have - // to add the amount of bytes we have inserted before the stack pointer - zero_based_offset += stack_frame_lower_padding; - - // Create a GEP instruction that accesses the new stack frame we - // created based on the relative offset - // - // GEP indices for the stack_frame_ptr are constants. It can safely - // inserted after the alloca instead of before the instruction using - // it. - // - // As a reminder, the stack frame type is a StructType that contains - // an ArrayType with int8 elements - llvm::Value *stack_frame_ptr = builder.CreateGEP( - stack_frame_type, stack_frame_alloca, - {builder.getInt32(0), builder.getInt32(0), - builder.getInt32(zero_based_offset / stack_frame_word_size)}); - - auto from_val = sp_use.use->get(); - CopyMetadataTo(from_val, stack_frame_ptr); - - llvm::IntegerType *el_type = nullptr; - unsigned scale = 0; - auto missing = static_cast(zero_based_offset) % - stack_frame_word_size; - switch (missing) { - case 7: - case 5: - case 3: - case 1: - el_type = llvm::Type::getInt8Ty(context); - scale = 1; - break; - - case 4: - el_type = llvm::Type::getInt32Ty(context); - scale = 4; - break; - - case 6: - case 2: - el_type = llvm::Type::getInt16Ty(context); - scale = 2; - break; - case 0: - break; - default: - LOG(FATAL) - << "Unsupported address size: " << missing; - break; - } - - if (el_type) { - llvm::PointerType *ptr_type = llvm::PointerType::get(context, 0); - stack_frame_ptr = builder.CreateBitOrPointerCast( - stack_frame_ptr, ptr_type); - CopyMetadataTo(from_val, stack_frame_ptr); - stack_frame_ptr = builder.CreateGEP( - el_type, stack_frame_ptr, builder.getInt32(missing / scale)); - CopyMetadataTo(from_val, stack_frame_ptr); - } - - stack_frame_ptr = - builder.CreateBitOrPointerCast(stack_frame_ptr, obj->getType()); - CopyMetadataTo(from_val, stack_frame_ptr); - - // We now have to replace the operand; it is not correct to use - // `replaceAllUsesWith` on the operand, because the scope of a constant - // could be bigger than just the function we are using. - sp_use.use->set(stack_frame_ptr); - } -} - -} // namespace - -llvm::PreservedAnalyses RecoverBasicStackFrame::run( - llvm::Function &function, llvm::FunctionAnalysisManager &fam) { - - if (function.isDeclaration()) { - return llvm::PreservedAnalyses::all(); - } - - // Analyze the stack frame first, enumerating the instructions referencing - // the __anvill_sp symbol and determining the boundaries of the stack memory - StackFrameAnalysis stack_frame_analysis = AnalyzeStackFrame(function, options); - if (stack_frame_analysis.instruction_uses.empty()) { - return llvm::PreservedAnalyses::all(); - } - - // It is now time to patch the function. This method will take the stack - // analysis and use it to generate a stack frame type and update all the - // instructions - UpdateFunction(function, options, stack_frame_analysis); - - // Analyze the __anvill_sp usage again; this time, the resulting - // instruction list should be empty - assert(EnumerateStackPointerUsages(function).empty()); - - return llvm::PreservedAnalyses::none(); -} - -llvm::StringRef RecoverBasicStackFrame::name(void) { - return llvm::StringRef("RecoverBasicStackFrame"); -} - -void AddRecoverBasicStackFrame(llvm::FunctionPassManager &fpm, - const StackFrameRecoveryOptions &options) { - fpm.addPass(RecoverBasicStackFrame(options)); -} -} // namespace anvill diff --git a/lib/Passes/RemoveCallIntrinsics.cpp b/lib/Passes/RemoveCallIntrinsics.cpp new file mode 100644 index 000000000..45af36724 --- /dev/null +++ b/lib/Passes/RemoveCallIntrinsics.cpp @@ -0,0 +1,95 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "anvill/Utils.h" + +namespace anvill { +llvm::StringRef RemoveCallIntrinsics::name(void) { + return "Remove call intrinsics."; +} + + +namespace {} + +llvm::PreservedAnalyses +RemoveCallIntrinsics::runOnIntrinsic(llvm::CallInst *remillFunctionCall, + llvm::FunctionAnalysisManager &am, + llvm::PreservedAnalyses prev) { + CHECK(remillFunctionCall->getNumOperands() == 4); + auto target_func = remillFunctionCall->getArgOperand(1); + auto state_ptr = remillFunctionCall->getArgOperand(0); + auto mem_ptr = remillFunctionCall->getArgOperand(2); + + CrossReferenceFolder xref_folder( + this->xref_resolver, + remillFunctionCall->getFunction()->getParent()->getDataLayout()); + auto ra = xref_folder.TryResolveReferenceWithClearedCache(target_func); + auto f = remillFunctionCall->getFunction(); + DCHECK(!llvm::verifyFunction(*f, &llvm::errs())); + + if (ra.references_entity || // Related to an existing lifted entity. + ra.references_global_value || // Related to a global var/func. + ra.references_program_counter) { // Related to `__anvill_pc`. + + std::shared_ptr callable_decl = + spec.FunctionAt(ra.u.address); + + if (auto pc_val = + GetMetadata(lifter.Options().pc_metadata_name, *remillFunctionCall); + pc_val.has_value()) { + if (auto bb_uid = GetBasicBlockUid(f); bb_uid.has_value()) { + auto block_contexts = spec.GetBlockContexts(); + const auto &bb_ctx = block_contexts.GetBasicBlockContextForUid(*bb_uid)->get(); + auto func = bb_ctx.GetParentFunctionAddress(); + if (auto override_decl = spec.CallSiteAt({func, *pc_val})) { + DLOG(INFO) << "Overriding call site at " << std::hex << *pc_val + << " in " << std::hex << func; + callable_decl = std::move(override_decl); + } + } + } + + auto *entity = this->xref_resolver.EntityAtAddress(ra.u.address); + if (callable_decl && entity) { + llvm::IRBuilder<> ir(remillFunctionCall->getParent()); + ir.SetInsertPoint(remillFunctionCall); + + + const remill::IntrinsicTable table(f->getParent()); + DLOG(INFO) << "Replacing call from: " + << remill::LLVMThingToString(remillFunctionCall) + << " with call to " << std::hex << ra.u.address + << " d has: " << std::string(entity->getName()); + auto *new_mem = callable_decl->CallFromLiftedBlock( + entity, lifter.Options().TypeDictionary(), table, ir, state_ptr, + mem_ptr); + + remillFunctionCall->replaceAllUsesWith(new_mem); + remillFunctionCall->eraseFromParent(); + prev.intersect(llvm::PreservedAnalyses::none()); + } + } + + DCHECK(!llvm::verifyFunction(*f, &llvm::errs())); + + return prev; +} + + +llvm::PreservedAnalyses RemoveCallIntrinsics::INIT_RES = + llvm::PreservedAnalyses::all(); + + +bool RemoveCallIntrinsics::isTargetInstrinsic(const llvm::CallInst *callinsn) { + return callinsn->getCalledFunction() != nullptr && + callinsn->getCalledFunction()->getName().startswith( + "__remill_function_call"); +} +} // namespace anvill diff --git a/lib/Passes/RemoveRemillFunctionReturns.cpp b/lib/Passes/RemoveRemillFunctionReturns.cpp deleted file mode 100644 index 154031998..000000000 --- a/lib/Passes/RemoveRemillFunctionReturns.cpp +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright (c) 2019-present, Trail of Bits, Inc. - * All rights reserved. - * - * This source code is licensed in accordance with the terms specified in - * the LICENSE file found in the root directory of this source tree. - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include "Utils.h" - -namespace anvill { -namespace { - -// Remove a single case of a call to `__remill_function_return` where the return -// addresses reaches the `pc` argument of the call. -static void FoldReturnAddressMatch(llvm::CallBase *call) { - auto module = call->getModule(); - auto ret_addr = - llvm::dyn_cast(call->getArgOperand(remill::kPCArgNum)); - auto mem_ptr = call->getArgOperand(remill::kMemoryPointerArgNum); - CopyMetadataTo(call, mem_ptr); - call->replaceAllUsesWith(mem_ptr); - call->eraseFromParent(); - - // Work up the use list of casts back to the source of this return - // address, eliminating as many of those values as possible. - while (ret_addr && ret_addr->use_empty()) { - - // Cast of `llvm.returnaddress`. - if (auto cast_inst = llvm::dyn_cast(ret_addr)) { - auto next_ret_addr = - llvm::dyn_cast(cast_inst->getOperand(0)); - ret_addr->eraseFromParent(); - ret_addr = next_ret_addr; - - // Call to `llvm.returnaddress`. - } else if (IsReturnAddress(module, ret_addr)) { - ret_addr->eraseFromParent(); - break; - - // Who knows?! - } else { - LOG(ERROR) - << "Encountered unexpected instruction when removing return address: " - << remill::LLVMThingToString(ret_addr); - break; - } - } -} - -// Override the return address in the function `func` with values from -// `fixups`. -static void OverwriteReturnAddress( - llvm::Function &func, llvm::Function *addr_of_ret_addr_func, - std::vector> &fixups) { - - // Get the address of our return address. - const auto addr_of_ret_addr = llvm::CallInst::Create( - addr_of_ret_addr_func, {}, llvm::None, llvm::Twine::createNull(), - &(func.getEntryBlock().front())); - - for (auto &[call, ret_addr] : fixups) { - // Store the return address. - llvm::IRBuilder<> ir(call); - auto *bit_cast = ir.CreateBitCast(addr_of_ret_addr, - llvm::PointerType::get(ir.getContext(), 0)); - CopyMetadataTo(call, bit_cast); - auto *store = ir.CreateStore(ret_addr, bit_cast); - CopyMetadataTo(call, store); - - // Get rid of the `__remill_function_return`. - auto *mem_ptr = call->getArgOperand(remill::kMemoryPointerArgNum); - CopyMetadataTo(call, mem_ptr); - call->replaceAllUsesWith(mem_ptr); - call->eraseFromParent(); - } -} - -} // namespace - -llvm::StringRef RemoveRemillFunctionReturns::name(void) { - return "RemoveRemillFunctionReturns"; -} - -// Try to identify the patterns of `__remill_function_call` that we can -// remove. -llvm::PreservedAnalyses -RemoveRemillFunctionReturns::run(llvm::Function &func, - llvm::FunctionAnalysisManager &AM) { - const auto module = func.getParent(); - CrossReferenceFolder xref_folder(xref_resolver, module->getDataLayout()); - StackPointerResolver sp_resolver(module); - - std::vector matches_pattern; - std::vector> fixups; - - for (auto &inst : llvm::instructions(func)) { - if (auto call = llvm::dyn_cast(&inst)) { - if (auto func = call->getCalledFunction(); - func && func->getName() == "__remill_function_return") { - auto ret_addr = call->getArgOperand(remill::kPCArgNum) - ->stripPointerCastsAndAliases(); - switch (QueryReturnAddress(xref_folder, sp_resolver, module, ret_addr)) { - case kFoundReturnAddress: matches_pattern.push_back(call); break; - - // Do nothing if it's a symbolic stack pointer load; we're probably - // running this pass too early. - case kFoundSymbolicStackPointerLoad: break; - - // Here we'll do an arch-specific fixup. - case kUnclassifiableReturnAddress: - fixups.emplace_back(call, ret_addr); - break; - } - } - } - } - - auto ret = false; - - // Go remove all the matches that we can. - for (auto call : matches_pattern) { - FoldReturnAddressMatch(call); - ret = true; - } - - // Go use the `llvm.addressofreturnaddress` to store replace the return - // address. - if (!fixups.empty()) { - if (auto addr_of_ret_addr_func = AddressOfReturnAddressFunction(module)) { - OverwriteReturnAddress(func, addr_of_ret_addr_func, fixups); - ret = true; - } - } - - return ConvertBoolToPreserved(ret); -} - -// Returns `true` if `val` is a return address. -ReturnAddressResult -RemoveRemillFunctionReturns::QueryReturnAddress( - const CrossReferenceFolder &xref_folder, - const StackPointerResolver &sp_resolver, - llvm::Module *module, - llvm::Value *val) const { - - if (IsReturnAddress(module, val)) { - return kFoundReturnAddress; - } - - if (auto call = llvm::dyn_cast(val)) { - if (auto func = call->getCalledFunction()) { - if (func->getName().startswith("__remill_read_memory_")) { - auto addr = call->getArgOperand(1); // Address - if (IsRelatedToStackPointer(module, addr)) { - return kFoundSymbolicStackPointerLoad; - } else { - return kUnclassifiableReturnAddress; - } - } - } - return kUnclassifiableReturnAddress; - - } else if (auto li = llvm::dyn_cast(val)) { - if (IsRelatedToStackPointer(module, li->getPointerOperand())) { - return kFoundSymbolicStackPointerLoad; - } else { - return kUnclassifiableReturnAddress; - } - - } else if (auto pti = llvm::dyn_cast(val)) { - return QueryReturnAddress(xref_folder, sp_resolver, module, - pti->getOperand(0)); - - } else if (auto cast = llvm::dyn_cast(val)) { - return QueryReturnAddress(xref_folder, sp_resolver, module, - cast->getOperand(0)); - - } else if (IsRelatedToStackPointer(module, val)) { - return kFoundSymbolicStackPointerLoad; - - // Sometimes optimizations result in really crazy looking constant expressions - // related to `__anvill_ra`, full of shifts, zexts, etc. We try to detect - // this situation by initializing a "magic" address associated with - // `__anvill_ra`, and then if we find this magic value on something that - // references `__anvill_ra`, then we conclude that all those manipulations - // in the constant expression are actually not important. - } else if (auto xr = xref_folder.TryResolveReferenceWithClearedCache(val); - xr.is_valid && xr.references_return_address && - xr.u.address == xref_folder.MagicReturnAddressValue()) { - return kFoundReturnAddress; - - } else { - return kUnclassifiableReturnAddress; - } -} - -// Transforms the bitcode to eliminate calls to `__remill_function_return`, -// where appropriate. This will not succeed for all architectures, but is -// likely to always succeed for x86(-64) and aarch64, due to their support -// for the `llvm.addressofreturnaddress` intrinsic. -// -// When we lift bitcode, we represent the control-flow transfer semantics of -// function returns with calls to `__remill_function_return`. This is another -// three-argument Remill function, where the second argument is the program -// counter. We're particularly interested in observing this program counter -// value, as it can tell us if this function respects normal return conventions -// (i.e. returns to its return address) or not. The way we try to observe this -// is by inspecting the program counter argument, and seeing if it is -// `__anvill_ra` or the (casted) value returned from the `llvm.returnaddress` -// intrinsic. -// -// When we match the expected pattern, we can eliminate calls to -// `__remill_function_return`. If we don't match the pattern, then it suggests -// that it is possible that the function alters its return address, or that -// something is preventing our analysis from deducing that the return address -// reaches the `__remill_function_return` call's program counter argument. -// -// On x86(-64) and AArch64, we can use the `llvm.addressofreturnaddress` to -// update the return address in place when we fail to match the pattern, -// thereby letting us eliminate the call to `__remill_function_return`. -// -// NOTE(pag): This pass should be applied as late as possible, as the call to -// `__remill_function_return` depends upon the memory pointer. -void AddRemoveRemillFunctionReturns( - llvm::FunctionPassManager &fpm, - const CrossReferenceResolver &xref_resolver) { - fpm.addPass(RemoveRemillFunctionReturns(xref_resolver)); -} - -} // namespace anvill diff --git a/lib/Passes/RemoveStackPointerCExprs.cpp b/lib/Passes/RemoveStackPointerCExprs.cpp index 416c45a60..35c50f511 100644 --- a/lib/Passes/RemoveStackPointerCExprs.cpp +++ b/lib/Passes/RemoveStackPointerCExprs.cpp @@ -6,11 +6,10 @@ * the LICENSE file found in the root directory of this source tree. */ -#include - #include #include #include +#include #include #include #include @@ -26,19 +25,19 @@ namespace { class ConcreteStackPointerResolver final : public NullCrossReferenceResolver { private: - llvm::Module * const module; + llvm::Module *const module; const StackFrameRecoveryOptions &options; public: virtual ~ConcreteStackPointerResolver(void) = default; inline explicit ConcreteStackPointerResolver( - llvm::Module *module_, - const StackFrameRecoveryOptions &options_) - : module(module_), options(options_) {} + llvm::Module *module_, const StackFrameRecoveryOptions &options_) + : module(module_), + options(options_) {} - std::optional AddressOfEntity( - llvm::Constant *ent) const final { + std::optional + AddressOfEntity(llvm::Constant *ent) const final { if (!IsStackPointer(module, ent)) { return std::nullopt; } @@ -88,13 +87,13 @@ RemoveStackPointerCExprs::run(llvm::Function &func, return llvm::PreservedAnalyses::all(); } - llvm::Module * const module = func.getParent(); + llvm::Module *const module = func.getParent(); const llvm::DataLayout &dl = module->getDataLayout(); const auto addr_size = dl.getPointerSizeInBits(0); ConcreteStackPointerResolver resolver(module, options); CrossReferenceFolder folder(resolver, dl); - StackPointerResolver stack_resolver(module); + StackPointerResolver stack_resolver(module, {}); std::vector worklist; diff --git a/lib/Passes/ReplaceStackReferences.cpp b/lib/Passes/ReplaceStackReferences.cpp new file mode 100644 index 000000000..958cd5021 --- /dev/null +++ b/lib/Passes/ReplaceStackReferences.cpp @@ -0,0 +1,380 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "anvill/Declarations.h" +#include "anvill/Utils.h" + +namespace anvill { + +namespace { + +class StackCrossReferenceResolver : public CrossReferenceFolder { + private: + const llvm::DataLayout &dl; + const AbstractStack &abs_stack; + + ResolvedCrossReference StackPtrToXref(std::int64_t off) const { + ResolvedCrossReference rxref; + rxref.is_valid = true; + rxref.references_stack_pointer = true; + rxref.size = dl.getPointerSizeInBits(0); + rxref.u.displacement = off; + return rxref; + } + + public: + StackCrossReferenceResolver(const CrossReferenceResolver &resolver, + const llvm::DataLayout &dl, + const AbstractStack &abs_stack) + : CrossReferenceFolder(resolver, dl), + dl(dl), + abs_stack(abs_stack) {} + + protected: + virtual std::optional + ResolveValueCallback(llvm::Value *v) const override { + DLOG(INFO) << "Looking at: " << remill::LLVMThingToString(v); + auto stack_ref = abs_stack.StackPointerFromStackCompreference(v); + if (stack_ref) { + return this->StackPtrToXref(*stack_ref); + } + + return std::nullopt; + } +}; + + +std::optional +GetPtrToOffsetInto(llvm::IRBuilder<> &ir, const llvm::DataLayout &dl, + llvm::Type *deref_type, llvm::Value *ptr, + size_t offset_into_type) { + if (offset_into_type == 0) { + return ptr; + } + + + llvm::APInt ap_off(64, offset_into_type, false); + auto elem_type = deref_type; + auto index = dl.getGEPIndexForOffset(elem_type, ap_off); + + if (!index) { + return std::nullopt; + } + auto i32 = llvm::IntegerType::getInt32Ty(deref_type->getContext()); + return ir.CreateGEP( + deref_type, ptr, + {llvm::ConstantInt::get(i32, 0), + llvm::ConstantInt::get(llvm::IntegerType::get(deref_type->getContext(), + index->getBitWidth()), + *index)}); +} +} // namespace + +llvm::StringRef ReplaceStackReferences::name(void) { + return "Replace stack references"; +} + + +// Contains a list of `load` and `store` instructions that reference +// the stack pointer +using StackPointerRegisterUsages = std::vector; + +// Enumerates all the store and load instructions that reference +// the stack +static StackPointerRegisterUsages +EnumerateStackPointerUsages(llvm::Function &function, + llvm::ArrayRef additional_sps) { + StackPointerRegisterUsages output; + StackPointerResolver sp_resolver(function.getParent(), additional_sps); + + for (auto &basic_block : function) { + for (auto &instr : basic_block) { + for (auto i = 0u, num_ops = instr.getNumOperands(); i < num_ops; ++i) { + auto &use = instr.getOperandUse(i); + if (auto val = use.get(); llvm::isa(val) && + sp_resolver.IsRelatedToStackPointer(val)) { + output.emplace_back(&use); + } + } + } + } + + return output; +} + +struct BasicBlockVar { + size_t index; + ParameterDecl decl; +}; + + +struct StackVariable { + // offset into this variable + std::int64_t offset; + BasicBlockVar decl; +}; + +class StackModel { + private: + std::map frame; + const remill::Arch *arch; + + public: + uint64_t GetParamDeclSize(const ParameterDecl &decl) { + CHECK(arch->DataLayout().getTypeSizeInBits(decl.type) != 0); + return arch->DataLayout().getTypeSizeInBits(decl.type) / 8; + } + + StackModel(const BasicBlockContext &cont, const remill::Arch *arch, + const AbstractStack &abs_stack) { + this->arch = arch; + size_t index = 0; + // this feels weird maybe it should be all stack variables but then if the variable isnt live... + // we will have discovered something that should have been live. + for (const auto &v : cont.LiveParamsAtEntryAndExit()) { + if (HasMemLoc(v.param) && v.param.ordered_locs.size() == 1 && + v.param.ordered_locs[0].mem_reg->name == + arch->StackPointerRegisterName()) { + this->InsertFrameVar(index, v.param); + } + index += 1; + } + } + + + std::optional GetParamLte(std::int64_t off) { + auto prec = this->frame.lower_bound(off); + if (prec == this->frame.end()) { + if (this->frame.begin() != this->frame.end() && + this->frame.begin()->first <= off) { + return this->frame.begin()->second; + } + return std::nullopt; + } + + if (prec->first == off) { + return {prec->second}; + } + + if (prec == this->frame.begin()) { + return std::nullopt; + } + + + auto prev_decl = (--prec)->second; + CHECK(prev_decl.decl.ordered_locs[0].mem_offset <= off); + return {prev_decl}; + } + + std::optional GetOverlappingParam(std::int64_t off) { + + auto vlte = GetParamLte(off); + + if (!vlte.has_value()) { + return std::nullopt; + } + + DLOG(INFO) << "value found lte offset: " + << vlte->decl.ordered_locs[0].mem_offset << " " << off; + + auto offset_into_var = off - vlte->decl.ordered_locs[0].mem_offset; + if (offset_into_var < + static_cast(GetParamDeclSize(vlte->decl))) { + return {{offset_into_var, *vlte}}; + } + DLOG(INFO) << "Looking for off " << off << " but not fitting " + << offset_into_var << " got off " + << vlte->decl.ordered_locs[0].mem_offset; + return std::nullopt; + } + + + bool VarOverlaps(std::int64_t off) { + + + return GetOverlappingParam(off).has_value(); + } + + + void InsertFrameVar(size_t index, ParameterDecl var) { + if (VarOverlaps(var.ordered_locs[0].mem_offset) || + VarOverlaps(var.ordered_locs[0].mem_offset + GetParamDeclSize(var) - + 1)) { + + auto oparam = GetOverlappingParam(var.ordered_locs[0].mem_offset); + if (!VarOverlaps(var.ordered_locs[0].mem_offset)) { + oparam = GetOverlappingParam(var.ordered_locs[0].mem_offset + + GetParamDeclSize(var) - 1); + } + + LOG(ERROR) << "Inserting variable that overlaps with current frame " + << var.ordered_locs[0].mem_offset + << " with size: " << GetParamDeclSize(var) << " Overlaps with " + << oparam->decl.decl.ordered_locs[0].mem_offset + << " with size " << GetParamDeclSize(oparam->decl.decl); + return; + } + + this->frame.insert({var.ordered_locs[0].mem_offset, {index, var}}); + } +}; + +llvm::PreservedAnalyses ReplaceStackReferences::runOnBasicBlockFunction( + llvm::Function &F, llvm::FunctionAnalysisManager &AM, + const BasicBlockContext &cont, const FunctionDecl &fdecl) { + size_t overrunsz = cont.GetMaxStackSize() - cont.GetStackSize(); + llvm::IRBuilder<> ent_insert(&F.getEntryBlock(), F.getEntryBlock().begin()); + auto overrunptr = ent_insert.CreateAlloca( + AbstractStack::StackTypeFromSize(F.getContext(), overrunsz)); + + DLOG(INFO) << "Replacing stack vars in bb: " << std::hex + << fdecl.address << " " << std::dec + << (*anvill::GetBasicBlockUid(&F)).value; + DLOG(INFO) << "Stack size " << cont.GetStackSize(); + DLOG(INFO) << "Max stack size " << cont.GetMaxStackSize(); + AbstractStack stk( + F.getContext(), + {{cont.GetStackSize(), anvill::GetBasicBlockStackPtr(&F)}, + {overrunsz, overrunptr}}, + lifter.Options().stack_frame_recovery_options.stack_grows_down, + cont.GetPointerDisplacement()); + + StackModel smodel(cont, this->lifter.Options().arch, stk); + + NullCrossReferenceResolver resolver; + StackCrossReferenceResolver folder(resolver, this->lifter.DataLayout(), stk); + + // TODO(Ian): do a fixed size here + std::vector>> + to_replace_vars; + + auto collision = false; + // TODO(Ian): also handle resolving from references where the base is inside a bb var + for (auto use : + EnumerateStackPointerUsages(F, {anvill::GetBasicBlockStackPtr(&F)})) { + const auto reference = folder.TryResolveReferenceWithCaching(use->get()); + if (!reference.is_valid || !reference.references_stack_pointer) { + continue; + } + + // The offset from the stack pointer. Force to a 32-bit, then sign-extend. + int64_t stack_offset = reference.Displacement(this->lifter.DataLayout()); + + auto referenced_variable = smodel.GetOverlappingParam(stack_offset); + + //TODO(Ian) handle nonzero offset + if (referenced_variable.has_value()) { + + auto g = cont.ProvidePointerFromFunctionArgs( + &F, referenced_variable->decl.decl); + auto ptr = GetPtrToOffsetInto(ent_insert, this->lifter.DataLayout(), + referenced_variable->decl.decl.type, g, + referenced_variable->offset); + if (ptr) { + to_replace_vars.push_back({use, *ptr}); + continue; + } + LOG(ERROR) << "Couldnt create a pointer for offset " + << referenced_variable->offset << " into a " + << remill::LLVMThingToString( + referenced_variable->decl.decl.type); + collision = true; + } + + DLOG(INFO) << "Escaping stack access " << stack_offset << " " + << remill::LLVMThingToString(use->get()); + + // otherwise we are going to escape the abstract stack + to_replace_vars.push_back({use, stack_offset}); + } + + if (to_replace_vars.empty()) { + return llvm::PreservedAnalyses::all(); + } + + for (auto [use, v] : to_replace_vars) { + auto use_of_variable = use; + auto replace_use = [use_of_variable, overrunptr](llvm::Value *with_ptr) { + if (llvm::isa(use_of_variable->get()->getType())) { + use_of_variable->set(with_ptr); + } else if (llvm::isa( + use_of_variable->get()->getType())) { + + llvm::IRBuilder<> ir(overrunptr); + + if (auto ptr = llvm::dyn_cast(with_ptr)) { + ir.SetInsertPoint(ptr->getNextNode()); + } + + use_of_variable->set( + ir.CreatePointerCast(with_ptr, use_of_variable->get()->getType())); + } + }; + if (std::holds_alternative(v)) { + replace_use(std::get(v)); + } else { + auto offset = std::get(v); + auto ptr = stk.PointerToStackMemberFromOffset(ent_insert, offset); + if (ptr) { + replace_use(*ptr); + } else { + LOG(ERROR) << "No pointer for offset " << offset; + auto off = stk.StackOffsetFromStackPointer(offset); + if (off) { + LOG(ERROR) << "Was supposed to use offset " << *off; + } + } + } + } + + DCHECK(!llvm::verifyFunction(F, &llvm::errs())); + + + // This isnt a sound check at all we could still derive a pointer to a variable from another variable. Essentially need to check that all + // derivations are in bounds... + if (EnumerateStackPointerUsages(F, {}).empty() && !collision) { + auto noalias = + llvm::Attribute::get(F.getContext(), llvm::Attribute::NoAlias); + + // Note(Ian): the theory here is if all stack references are resolved, then any + // pointer use of the stack only derives from unresolved offsets + // TODO(Ian): this isnt sound if the resolved stack pointer then has further manipulation causing it to land inside a variable + anvill::GetBasicBlockStackPtr(&F)->addAttr(noalias); + + for (auto ¶m : cont.GetParams()) { + cont.ProvidePointerFromFunctionArgs(&F, param)->addAttr(noalias); + } + } + + return to_replace_vars.empty() ? llvm::PreservedAnalyses::all() + : llvm::PreservedAnalyses::none(); +} +} // namespace anvill diff --git a/lib/Passes/RewriteVectorOps.cpp b/lib/Passes/RewriteVectorOps.cpp new file mode 100644 index 000000000..ab71a0cd5 --- /dev/null +++ b/lib/Passes/RewriteVectorOps.cpp @@ -0,0 +1,230 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +// Goal here is to rewrite vector twiddling to integer ops +/* + %.sroa.23.24.vec.expand = shufflevector <4 x i8> %11, <4 x i8> poison, <8 x i32> + %.sroa.23.28.vec.expand = shufflevector <4 x i8> %12, <4 x i8> poison, <8 x i32> + %.sroa.23.28.vecblend = shufflevector <8 x i8> %.sroa.23.24.vec.expand, <8 x i8> %.sroa.23.28.vec.expand, <8 x i32> + + so in this case well get something like (le): + a = shl(zext(%12)), 32 + b = zext(11) + c=or(a,b) + +*/ +namespace anvill { + +llvm::StringRef RewriteVectorOps::name(void) { + return "RewriteVectorOps"; +} + +std::optional IntegerTypeForVector(llvm::VectorType *vec) { + if (!vec->isScalableTy()) { + return llvm::IntegerType::get( + vec->getContext(), vec->getPrimitiveSizeInBits().getFixedValue()); + } + return std::nullopt; +} + +struct RewrittenInteger { + llvm::Value *target; + llvm::IntegerType *to_int_ty; + std::pair bit_range; + uint32_t bitshift; + bool poison; +}; + +struct DecomposeState { + uint32_t curr_index; + const llvm::ShuffleVectorInst &sv; + + + bool ConsumedAll() { + return curr_index >= sv.getShuffleMask().size(); + } + + uint32_t GetOpLengths() { + auto v = llvm::cast(sv.getOperand(0)->getType()); + return v->getElementCount().getKnownMinValue(); + } + + bool isInSameVec(uint32_t ind1, uint32_t ind2) { + return (ind1 < GetOpLengths() && ind2 < GetOpLengths()) || + (ind1 >= GetOpLengths() && ind2 >= GetOpLengths()); + } + + + std::optional ElementSize() { + auto ty = llvm::cast(this->sv.getOperand(0)->getType()); + auto el_ty = ty->getElementType(); + auto sz = el_ty->getPrimitiveSizeInBits(); + if (sz) { + return sz; + } + + return std::nullopt; + } + + + std::optional ConsumeNext() { + uint32_t start_index = this->curr_index; + int first_end = sv.getMaskValue(this->curr_index); + int prev_ind = first_end; + this->curr_index += 1; + DLOG(INFO) << "first: " << first_end; + // We are looking for the last mask index such that [start_index,curr_index) is a seq of either poisons + // or contigous accesses to a single op + while (!this->ConsumedAll()) { + auto next = sv.getMaskValue(this->curr_index); + DLOG(INFO) << "next: " << next; + // we can either group poisons or sequences + if (!(next == llvm::PoisonMaskElem && prev_ind == llvm::PoisonMaskElem) && + (!isInSameVec(prev_ind, next) || prev_ind + 1 != next)) { + break; + } + + prev_ind = next; + this->curr_index += 1; + } + + + bool is_first_op = first_end < static_cast(GetOpLengths()); + + llvm::Value *target = is_first_op ? sv.getOperand(0) : sv.getOperand(1); + std::pair element_range = std::make_pair(0, 0); + auto poison = first_end == llvm::PoisonMaskElem; + if (!poison) { + element_range = std::make_pair(first_end, prev_ind + 1); + if (!is_first_op) { + element_range.first = element_range.first - GetOpLengths(); + element_range.second = element_range.second - GetOpLengths(); + } + } // the prev_ind is the last inclusive indice so bump one to make this an [) range + // convert the element range into a bit range + CHECK(element_range.second >= element_range.first); + auto sz = this->ElementSize(); + if (!sz) { + return std::nullopt; + } + + std::pair bit_range; + // first member of the range is the lshr for cutting off low bits + // second describes the mask + if (sv.getModule()->getDataLayout().isLittleEndian()) { + bit_range = + std::make_pair(element_range.first * *sz, element_range.second * *sz); + } else { + bit_range = std::make_pair((GetOpLengths() - element_range.second) * *sz, + (GetOpLengths() - element_range.first) * *sz); + } + + auto ity = + IntegerTypeForVector(llvm::cast(target->getType())); + if (!ity) { + return std::nullopt; + } + uint32_t bitshift; + if (sv.getModule()->getDataLayout().isLittleEndian()) { + bitshift = *sz * start_index; + } else { + auto op_distance = sv.getType()->getElementCount().getFixedValue() - + (element_range.second - element_range.first); + DLOG(INFO) << remill::LLVMThingToString(target); + DLOG(INFO) << "odist: " << op_distance; + DLOG(INFO) << "start_ind: " << start_index; + DLOG(INFO) << "diff: " << (op_distance - start_index); + bitshift = *sz * (op_distance - start_index); + } + return RewrittenInteger{target, *ity, bit_range, bitshift, poison}; + } +}; + +// this isnt super smart but we just check if +// each vector is extracted once +std::optional> +Rewrite(const llvm::ShuffleVectorInst &sv) { + std::vector rewrites; + DecomposeState st{0, sv}; + while (!st.ConsumedAll()) { + auto nxt = st.ConsumeNext(); + if (!nxt) { + return std::nullopt; + } + rewrites.push_back(*nxt); + } + return rewrites; +} + + +llvm::PreservedAnalyses +RewriteVectorOps::run(llvm::Function &F, llvm::FunctionAnalysisManager &AM) { + std::vector svs; + for (auto &insn : llvm::instructions(F)) { + if (llvm::ShuffleVectorInst *sv = + llvm::dyn_cast(&insn)) { + svs.push_back(sv); + } + } + + auto pres = llvm::PreservedAnalyses::all(); + for (auto sv : svs) { + auto vec_type = sv->getType(); + if (vec_type->isScalableTy()) { + LOG(ERROR) << "Could not rewrite sv, unable to rewrite scalable type" + << remill::LLVMThingToString(sv); + continue; + } + + auto maybe_rws = Rewrite(*sv); + if (!maybe_rws) { + LOG(ERROR) << "Could not rewrite sv, unable to split" + << remill::LLVMThingToString(sv); + continue; + } + auto rws = *maybe_rws; + auto base_int_ty = llvm::IntegerType::get( + F.getContext(), vec_type->getScalarSizeInBits() * + vec_type->getElementCount().getFixedValue()); + llvm::Value *base_value = llvm::Constant::getNullValue(base_int_ty); + llvm::IRBuilder<> ir(sv); + for (const auto &rw : rws) { + // it must be vector as it's an operand to llvm + if (!rw.poison) { + auto init_int = ir.CreateBitCast(rw.target, rw.to_int_ty); + + auto casted = ir.CreateZExtOrTrunc(init_int, base_int_ty); + auto target_itype = + llvm::IntegerType::get(F.getContext(), rw.bit_range.second); + auto dropped_high_bits = ir.CreateAnd( + casted, + llvm::ConstantInt::get(base_int_ty, target_itype->getBitMask())); + auto extracted = ir.CreateLShr(dropped_high_bits, rw.bit_range.first); + auto placed = ir.CreateShl(extracted, rw.bitshift); + base_value = ir.CreateOr(base_value, placed); + } + } + auto r = ir.CreateBitCast(base_value, vec_type); + sv->replaceAllUsesWith(r); + sv->eraseFromParent(); + pres = llvm::PreservedAnalyses::none(); + } + + return pres; +} + + +} // namespace anvill \ No newline at end of file diff --git a/lib/Passes/SliceManager.cpp b/lib/Passes/SliceManager.cpp index 46fdcc8bd..86f464c5f 100644 --- a/lib/Passes/SliceManager.cpp +++ b/lib/Passes/SliceManager.cpp @@ -6,17 +6,17 @@ * the LICENSE file found in the root directory of this source tree. */ +#include #include - #include #include #include #include #include #include + #include #include -#include namespace anvill { @@ -57,14 +57,13 @@ void SliceManager::insertClonedSliceIntoFunction( auto bb = llvm::BasicBlock::Create(this->mod.get()->getContext(), "slicebasicblock." + std::to_string(id.id), targetFunc); + llvm::IRBuilder<> builder(bb); - std::for_each(slice.begin(), slice.end(), [bb](llvm::Instruction *insn) { - bb->getInstList().push_back(insn); - }); + std::for_each(slice.begin(), slice.end(), + [&builder](llvm::Instruction *insn) { builder.Insert(insn); }); - llvm::ReturnInst::Create(this->mod.get()->getContext(), newReturn, - bb); + llvm::ReturnInst::Create(this->mod.get()->getContext(), newReturn, bb); return; } @@ -177,7 +176,6 @@ SliceManager::addSlice(llvm::ArrayRef slice, std::for_each(cloned.begin(), cloned.end(), [&mapper](llvm::Instruction *insn) { llvm::RemapInstruction(insn, mapper); - }); @@ -187,28 +185,30 @@ SliceManager::addSlice(llvm::ArrayRef slice, this->insertClonedSliceIntoFunction(id, slice_repr, new_ret, cloned); // Remove anvill pc to make interpretable - if (auto anvill_pc = this->mod.get()->getGlobalVariable(::anvill::kSymbolicPCName)) { + if (auto anvill_pc = + this->mod.get()->getGlobalVariable(::anvill::kSymbolicPCName)) { remill::ReplaceAllUsesOfConstant( - anvill_pc, llvm::Constant::getNullValue(anvill_pc->getType()), this->mod.get()); + anvill_pc, llvm::Constant::getNullValue(anvill_pc->getType()), + this->mod.get()); } if (!this->replaceAllGVConstantsWithInterpretableValue(cloned)) { slice_repr->eraseFromParent(); return std::nullopt; } - + assert(remill::VerifyModule(this->mod.get())); return {id}; } -InterpreterBuilder SliceManager::IntoInterpreterBuilder(SliceManager&& x) { +InterpreterBuilder SliceManager::IntoInterpreterBuilder(SliceManager &&x) { return InterpreterBuilder(std::move(x.mod)); } InterpreterBuilder::Slice InterpreterBuilder::getSlice(SliceID i) const { auto repr = this->mod->getFunction(SliceManager::getFunctionName(i)); - return InterpreterBuilder::Slice(repr,i); + return InterpreterBuilder::Slice(repr, i); } SliceInterpreter InterpreterBuilder::getInterp() const { diff --git a/lib/Passes/SplitStackFrameAtReturnAddress.cpp b/lib/Passes/SplitStackFrameAtReturnAddress.cpp index cfe8198ea..72b99642a 100644 --- a/lib/Passes/SplitStackFrameAtReturnAddress.cpp +++ b/lib/Passes/SplitStackFrameAtReturnAddress.cpp @@ -6,10 +6,9 @@ * the LICENSE file found in the root directory of this source tree. */ -#include - #include #include +#include #include #include #include @@ -22,6 +21,7 @@ #include #include + #include "Utils.h" namespace anvill { @@ -31,21 +31,10 @@ namespace { static llvm::AllocaInst *FindStackFrameAlloca(llvm::Function &func) { for (auto &inst : func.getEntryBlock()) { auto alloca = llvm::dyn_cast(&inst); - if (!alloca) { + if (!alloca || !alloca->hasMetadata(kStackMetadata)) { continue; } - auto frame_type = llvm::dyn_cast( - alloca->getAllocatedType()); - if (!frame_type || frame_type->isLiteral()) { - continue; - } - - auto frame_name = frame_type->getName(); - if (!frame_name.startswith(func.getName()) || - !frame_name.endswith(kStackFrameTypeNameSuffix)) { - continue; - } return alloca; } @@ -59,8 +48,8 @@ struct FixedOffsetUse { }; // Find all (indirect) uses of the stack frame allocation. -static std::vector FindFixedOffsetUses( - llvm::AllocaInst *alloca) { +static std::vector +FindFixedOffsetUses(llvm::AllocaInst *alloca) { const llvm::DataLayout &dl = alloca->getModule()->getDataLayout(); const auto addr_size = dl.getIndexSizeInBits(0); @@ -70,8 +59,7 @@ static std::vector FindFixedOffsetUses( std::vector> work_list; work_list.emplace_back(alloca, llvm::APInt(addr_size, 0u, true)); - auto add_to_found = [&found] (llvm::Use &use, - llvm::APInt offset) { + auto add_to_found = [&found](llvm::Use &use, llvm::APInt offset) { FixedOffsetUse fou; fou.offset = std::move(offset); fou.use = &use; @@ -95,8 +83,7 @@ static std::vector FindFixedOffsetUses( } switch (user_inst->getOpcode()) { - default: - break; + default: break; case llvm::Instruction::BitCast: case llvm::Instruction::PtrToInt: case llvm::Instruction::IntToPtr: @@ -136,24 +123,24 @@ static void AnnotateStackUses(llvm::AllocaInst *frame_alloca, return; } - auto stack_offset_md_id = context.getMDKindID( - options.stack_offset_metadata_name); + auto stack_offset_md_id = + context.getMDKindID(options.stack_offset_metadata_name); auto zero_offset = zero_val->getSExtValue(); - auto create_metadata = - [=, &context] (llvm::Instruction *inst, int64_t offset) { - int64_t disp = 0; - if (options.stack_grows_down) { - disp = zero_offset - offset; - } else { - disp = offset - zero_offset; - } + auto create_metadata = [=, &context](llvm::Instruction *inst, + int64_t offset) { + int64_t disp = 0; + if (options.stack_grows_down) { + disp = zero_offset - offset; + } else { + disp = offset - zero_offset; + } - auto disp_val = llvm::ConstantInt::get( - zero_val->getType(), static_cast(disp), true); - auto disp_md = llvm::ValueAsMetadata::get(disp_val); - return llvm::MDNode::get(context, disp_md); - }; + auto disp_val = llvm::ConstantInt::get(zero_val->getType(), + static_cast(disp), true); + auto disp_md = llvm::ValueAsMetadata::get(disp_val); + return llvm::MDNode::get(context, disp_md); + }; // Annotate the used instructions. for (const auto &use : uses) { @@ -173,9 +160,9 @@ static void AnnotateStackUses(llvm::AllocaInst *frame_alloca, // Find a `StoreInst` that looks like it puts the return address into the // stack. Failure to find this means it likely stayed in registers. -static const FixedOffsetUse *FindReturnAddressStore( - const std::vector &uses, - const StackFrameRecoveryOptions &options) { +static const FixedOffsetUse * +FindReturnAddressStore(const std::vector &uses, + const StackFrameRecoveryOptions &options) { const FixedOffsetUse *found = nullptr; for (const auto &use : uses) { if (auto store = llvm::dyn_cast(use.use->getUser())) { @@ -258,13 +245,8 @@ static llvm::Instruction *DemandedOffset( ptr_type = llvm::PointerType::get(context, 0); scale = 2; break; - case 0: - el_type = llvm::Type::getIntNTy(context, addr_size * 8u); - break; - default: - LOG(FATAL) - << "Unsupported address size: " << addr_size; - break; + case 0: el_type = llvm::Type::getIntNTy(context, addr_size * 8u); break; + default: LOG(FATAL) << "Unsupported address size: " << addr_size; break; } auto base = pointers[addr_size]; @@ -367,8 +349,8 @@ static void SubstituteUse( // If the user is a `load`, then replace its use of the pointer. case llvm::Instruction::Load: { auto li = llvm::dyn_cast(user_inst); - auto pty = llvm::PointerType::get( - ir.getContext(), li->getPointerAddressSpace()); + auto pty = + llvm::PointerType::get(ir.getContext(), li->getPointerAddressSpace()); auto bc = ir.CreateBitOrPointerCast(ret, pty); CopyMetadataTo(use_inst, bc); use->set(bc); @@ -392,9 +374,10 @@ static void SubstituteUse( use->set(bc); } - // Operating on the pointer. + // Operating on the pointer. } else { - auto pty = llvm::PointerType::get(ir.getContext(), si->getPointerAddressSpace()); + auto pty = llvm::PointerType::get(ir.getContext(), + si->getPointerAddressSpace()); auto bc = ir.CreateBitOrPointerCast(ret, pty); CopyMetadataTo(use_inst, bc); use->set(bc); @@ -414,7 +397,7 @@ static void SubstituteUse( to_replace.emplace(user_inst, bc); } - // This is trickier; we need to form a new GEP or something like it. + // This is trickier; we need to form a new GEP or something like it. } else { llvm::SmallVector const_indices_c; llvm::SmallVector const_indices; @@ -439,13 +422,12 @@ static void SubstituteUse( // This is the easy case, because we can replace the use with // something that was constant calculated. if (const_indices.empty()) { - auto pty = llvm::PointerType::get( - ir.getContext(), addr_space); + auto pty = llvm::PointerType::get(ir.getContext(), addr_space); auto bc = ir.CreateBitOrPointerCast(ret, pty); CopyMetadataTo(use_inst, bc); use->set(bc); - // This is the hard case, because we need to invent a new GEP. + // This is the hard case, because we need to invent a new GEP. } else if (!to_replace.count(user_inst)) { llvm::APInt sub_offset(addr_size * 8u, 0u); auto source_ty = gep->getSourceElementType(); @@ -454,19 +436,18 @@ static void SubstituteUse( source_ty, const_indices_c, dl, sub_offset)); auto effective_sub_offset = static_cast( - static_cast(offset) + - sub_offset.getSExtValue()); - llvm::Instruction *const sub_ret = DemandedOffset( - ir, use_inst, pointers, computed_offsets, - effective_sub_offset, addr_size); + static_cast(offset) + sub_offset.getSExtValue()); + llvm::Instruction *const sub_ret = + DemandedOffset(ir, use_inst, pointers, computed_offsets, + effective_sub_offset, addr_size); CHECK_NOTNULL(sub_ret); CopyMetadataTo(use_inst, sub_ret); - auto sub_ret_ty = llvm::GetElementPtrInst::getIndexedType( - source_ty, const_indices); - auto sub_ret_pty = llvm::PointerType::get( - ir.getContext(), addr_space); + auto sub_ret_ty = + llvm::GetElementPtrInst::getIndexedType(source_ty, const_indices); + auto sub_ret_pty = + llvm::PointerType::get(ir.getContext(), addr_space); auto bc = ir.CreateBitOrPointerCast(ret, sub_ret_pty); CopyMetadataTo(user_inst, bc); @@ -482,17 +463,17 @@ static void SubstituteUse( } } -static void SplitStackFrameAround( - llvm::AllocaInst *frame_alloca, std::vector uses, - const StackFrameRecoveryOptions &options) { +static void SplitStackFrameAround(llvm::AllocaInst *frame_alloca, + std::vector uses, + const StackFrameRecoveryOptions &options) { llvm::LLVMContext &context = frame_alloca->getContext(); - llvm::Module * const module = frame_alloca->getModule(); + llvm::Module *const module = frame_alloca->getModule(); const llvm::DataLayout &dl = module->getDataLayout(); const auto addr_size = dl.getPointerSize(0); const auto addr_size_bits = dl.getPointerSizeInBits(0); - llvm::IntegerType * const addr_type = llvm::Type::getIntNTy( - context, addr_size * 8u); + llvm::IntegerType *const addr_type = + llvm::Type::getIntNTy(context, addr_size * 8u); // If we don't find a return address store, then we'll still split at zero. // @@ -512,16 +493,14 @@ static void SplitStackFrameAround( end_of_ra = offset_of_ra + addr_size; // Log the above scenario out in case it comes up. - if (auto user_inst = llvm::dyn_cast( - store_use->use->getUser()); + if (auto user_inst = + llvm::dyn_cast(store_use->use->getUser()); user_inst && offset_of_ra != 0) { - LOG(INFO) - << "Offset of return address storage location in function " - << frame_alloca->getFunction()->getName().str() - << " is " << offset_of_ra << ": " - << remill::LLVMThingToString(user_inst) - << " in block " << user_inst->getParent()->getName().str(); + LOG(INFO) << "Offset of return address storage location in function " + << frame_alloca->getFunction()->getName().str() << " is " + << offset_of_ra << ": " << remill::LLVMThingToString(user_inst) + << " in block " << user_inst->getParent()->getName().str(); } } @@ -553,29 +532,29 @@ static void SplitStackFrameAround( std::unordered_map computed_offsets; std::unordered_map to_replace; - auto make_subframe = [&] ( - std::vector> use_offsets, - const char *down_name, const char *up_name, uint64_t num_slots) { - auto num_slots_val = ir.getIntN(addr_size_bits, num_slots); - if (options.stack_grows_down) { - sub_frame = ir.CreateAlloca(addr_type, 0u, num_slots_val, down_name); - } else { - sub_frame = ir.CreateAlloca(addr_type, 0u, num_slots_val, up_name); - } + auto make_subframe = + [&](std::vector> use_offsets, + const char *down_name, const char *up_name, uint64_t num_slots) { + auto num_slots_val = ir.getIntN(addr_size_bits, num_slots); + if (options.stack_grows_down) { + sub_frame = ir.CreateAlloca(addr_type, 0u, num_slots_val, down_name); + } else { + sub_frame = ir.CreateAlloca(addr_type, 0u, num_slots_val, up_name); + } - pointers.clear(); - computed_offsets.clear(); + pointers.clear(); + computed_offsets.clear(); - pointers.emplace(addr_size, sub_frame); - computed_offsets.emplace(0, sub_frame); + pointers.emplace(addr_size, sub_frame); + computed_offsets.emplace(0, sub_frame); - CopyMetadataTo(frame_alloca, sub_frame); + CopyMetadataTo(frame_alloca, sub_frame); - for (auto [use, offset] : use_offsets) { - SubstituteUse(ir, use, offset, addr_size, pointers, - computed_offsets, to_replace); - } - }; + for (auto [use, offset] : use_offsets) { + SubstituteUse(ir, use, offset, addr_size, pointers, computed_offsets, + to_replace); + } + }; if (!above.empty()) { auto num_slots = (offset_of_ra + (addr_size - 1u)) / addr_size; @@ -583,8 +562,8 @@ static void SplitStackFrameAround( } if (!below.empty()) { - auto frame_size = dl.getTypeAllocSize( - frame_alloca->getAllocatedType()).getKnownMinSize(); + auto frame_size = dl.getTypeAllocSize(frame_alloca->getAllocatedType()) + .getKnownMinValue(); auto num_slots = ((frame_size - end_of_ra) + (addr_size - 1u)) / addr_size; make_subframe(std::move(below), "locals", "parameters", num_slots); } diff --git a/lib/Passes/TransformRemillJumpIntrinsics.cpp b/lib/Passes/TransformRemillJumpIntrinsics.cpp index a444b9b73..0fca8eab4 100644 --- a/lib/Passes/TransformRemillJumpIntrinsics.cpp +++ b/lib/Passes/TransformRemillJumpIntrinsics.cpp @@ -6,14 +6,12 @@ * the LICENSE file found in the root directory of this source tree. */ -#include - #include #include +#include #include #include #include -#include #include #include #include @@ -24,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -85,8 +84,7 @@ std::vector FindFunctionCalls(llvm::Function &func, T pred) { // Returns `true` if `val` is a possible return address -ReturnAddressResult -TransformRemillJumpIntrinsics::QueryReturnAddress( +ReturnAddressResult TransformRemillJumpIntrinsics::QueryReturnAddress( const CrossReferenceFolder &xref_folder, llvm::Module *module, llvm::Value *val) const { @@ -182,7 +180,7 @@ TransformRemillJumpIntrinsics::run(llvm::Function &func, llvm::FunctionPassManager fpm; fpm.addPass(llvm::DCEPass()); - fpm.addPass(llvm::SROAPass()); + fpm.addPass(llvm::SROAPass(llvm::SROAOptions::ModifyCFG)); fpm.addPass(llvm::SimplifyCFGPass()); fpm.addPass(llvm::InstCombinePass()); fpm.run(func, fam); diff --git a/lib/Passes/Utils.cpp b/lib/Passes/Utils.cpp index 5abbbb788..86c6281ec 100644 --- a/lib/Passes/Utils.cpp +++ b/lib/Passes/Utils.cpp @@ -8,8 +8,9 @@ #include "Utils.h" +#include #include -#include +#include #include #include #include @@ -17,6 +18,7 @@ #include #include #include +#include #include namespace anvill { @@ -46,7 +48,8 @@ llvm::Value *ConvertConstantToPointer(llvm::IRBuilder<> &ir, // Cast a pointer to a pointer type. if (auto ptr_ty = llvm::dyn_cast(type)) { if (ptr_ty->getAddressSpace() != dest_ptr_ty->getAddressSpace()) { - const auto new_ptr_ty = llvm::PointerType::get(ir.getContext(), dest_ptr_ty->getAddressSpace()); + const auto new_ptr_ty = llvm::PointerType::get( + ir.getContext(), dest_ptr_ty->getAddressSpace()); val_to_convert = llvm::ConstantExpr::getAddrSpaceCast(val_to_convert, new_ptr_ty); ptr_ty = new_ptr_ty; @@ -62,7 +65,7 @@ llvm::Value *ConvertConstantToPointer(llvm::IRBuilder<> &ir, // Cast an integer to a pointer type. } else if (auto int_ty = llvm::dyn_cast(type)) { const auto pointer_width = dl.getPointerTypeSizeInBits(dest_ptr_ty); - if (int_ty->getPrimitiveSizeInBits().getKnownMinSize() < pointer_width) { + if (int_ty->getPrimitiveSizeInBits().getKnownMinValue() < pointer_width) { int_ty = llvm::Type::getIntNTy(val_to_convert->getContext(), pointer_width); val_to_convert = llvm::ConstantExpr::getZExt(val_to_convert, int_ty); @@ -89,7 +92,8 @@ llvm::Value *ConvertValueToPointer(llvm::IRBuilder<> &ir, // Cast a pointer to a pointer type. if (auto ptr_ty = llvm::dyn_cast(type)) { if (ptr_ty->getAddressSpace() != dest_ptr_ty->getAddressSpace()) { - const auto new_ptr_ty = llvm::PointerType::get(ir.getContext(), dest_ptr_ty->getAddressSpace()); + const auto new_ptr_ty = llvm::PointerType::get( + ir.getContext(), dest_ptr_ty->getAddressSpace()); auto dest = ir.CreateAddrSpaceCast(val_to_convert, new_ptr_ty); CopyMetadataTo(val_to_convert, dest); val_to_convert = dest; @@ -100,8 +104,8 @@ llvm::Value *ConvertValueToPointer(llvm::IRBuilder<> &ir, return val_to_convert; } else { - auto dest = remill::BuildPointerToOffset( - ir, val_to_convert, 0, dest_ptr_ty); + auto dest = + remill::BuildPointerToOffset(ir, val_to_convert, 0, dest_ptr_ty); CopyMetadataTo(val_to_convert, dest); return dest; } @@ -109,7 +113,7 @@ llvm::Value *ConvertValueToPointer(llvm::IRBuilder<> &ir, // Cast an integer to a pointer type. } else if (auto int_ty = llvm::dyn_cast(type)) { const auto pointer_width = dl.getPointerTypeSizeInBits(dest_ptr_ty); - if (int_ty->getPrimitiveSizeInBits().getKnownMinSize() < pointer_width) { + if (int_ty->getPrimitiveSizeInBits().getKnownMinValue() < pointer_width) { int_ty = llvm::Type::getIntNTy(val_to_convert->getContext(), pointer_width); auto dest = ir.CreateZExt(val_to_convert, int_ty); @@ -180,6 +184,7 @@ bool BasicBlockIsSane(llvm::BasicBlock *block) { return true; } + llvm::PreservedAnalyses ConvertBoolToPreserved(bool modified) { return modified ? llvm::PreservedAnalyses::none() : llvm::PreservedAnalyses::all(); @@ -228,4 +233,31 @@ llvm::Function *AddressOfReturnAddressFunction(llvm::Module *module) { return func; } +llvm::Function *GetOrCreateAnvillReturnFunc(llvm::Module *mod) { + auto tgt_type = + llvm::FunctionType::get(llvm::Type::getVoidTy(mod->getContext()), true); + if (auto res = mod->getFunction(anvill::kAnvillBasicBlockReturn)) { + return res; + } + + + return llvm::Function::Create(tgt_type, llvm::GlobalValue::ExternalLinkage, + anvill::kAnvillBasicBlockReturn, mod); +} + +std::optional UniqueReturn(llvm::Function *func) { + std::optional r = std::nullopt; + for (auto &insn : llvm::instructions(func)) { + if (auto nret = llvm::dyn_cast(&insn)) { + if (r) { + return std::nullopt; + } else { + r = nret; + } + } + } + + return r; +} + } // namespace anvill diff --git a/lib/Passes/Utils.h b/lib/Passes/Utils.h index 38ee1eacc..23d7b949f 100644 --- a/lib/Passes/Utils.h +++ b/lib/Passes/Utils.h @@ -8,6 +8,7 @@ #pragma once +#include #include #include #include @@ -16,8 +17,6 @@ #include #include -#include - namespace llvm { class CallBase; class Function; @@ -30,8 +29,8 @@ namespace anvill { namespace { template -static std::vector SelectInstructions( - llvm::Function &function) { +static std::vector +SelectInstructions(llvm::Function &function) { std::vector output; for (auto &instruction : llvm::instructions(function)) { @@ -70,10 +69,15 @@ std::string GetFunctionIR(llvm::Function &func); // Returns the module's IR std::string GetModuleIR(llvm::Module &module); + llvm::PreservedAnalyses ConvertBoolToPreserved(bool); // Returns the pointer to the function that lets us overwrite the return // address. This is not available on all architectures / OSes. llvm::Function *AddressOfReturnAddressFunction(llvm::Module *module); +llvm::Function *GetOrCreateAnvillReturnFunc(llvm::Module *module); + +std::optional UniqueReturn(llvm::Function *func); + } // namespace anvill diff --git a/lib/Protobuf.cpp b/lib/Protobuf.cpp index 4d69f8766..1064876c6 100644 --- a/lib/Protobuf.cpp +++ b/lib/Protobuf.cpp @@ -11,16 +11,23 @@ #include #include #include +#include +#include #include #include #include #include #include +#include +#include #include #include #include +#include +#include #include +#include #include "anvill/Declarations.h" #include "specification.pb.h" @@ -39,6 +46,7 @@ Result ProtobufTranslator::ParseIntoCallableDecl( CallableDecl &decl) const { decl.arch = arch; decl.is_noreturn = function.is_noreturn(); + decl.is_variadic = function.is_variadic(); decl.calling_convention = static_cast(function.calling_convention()); @@ -154,8 +162,15 @@ Result ProtobufTranslator::ParseIntoCallableDecl( // Get the return address location. if (function.has_return_address()) { auto ret_addr = function.return_address(); - auto maybe_ret = DecodeValue(ret_addr, SizeToType(arch->address_size), - "return address"); + auto maybe_low_loc_ret_addr = DecodeLowLoc(ret_addr, "return address"); + if (!maybe_low_loc_ret_addr.Succeeded()) { + return maybe_low_loc_ret_addr.TakeError(); + } + + std::vector low_loc_ret_addr = { + maybe_low_loc_ret_addr.TakeValue()}; + auto maybe_ret = ValueDeclFromOrderedLowLoc( + low_loc_ret_addr, SizeToType(arch->address_size), "return address"); if (!maybe_ret.Succeeded()) { auto err = maybe_ret.TakeError(); std::stringstream ss; @@ -212,41 +227,26 @@ Result ProtobufTranslator::ParseIntoCallableDecl( } i = 0u; - for (const ::specification::Value &ret : function.return_().values()) { - auto maybe_ret = DecodeValue(ret, maybe_ret_type.Value(), "return value"); - if (maybe_ret.Succeeded()) { - decl.returns.emplace_back(maybe_ret.Value()); - } else { - auto err = maybe_ret.TakeError(); - std::stringstream ss; - ss << "Could not decode " << i << "th return value in function at " - << address_str << ": " << err; - return {ss.str()}; - } - ++i; + + + auto maybe_ret = + DecodeValueDecl(function.return_().values(), maybe_ret_type.TakeValue(), + "return value"); + if (!maybe_ret.Succeeded()) { + auto err = maybe_ret.TakeError(); + std::stringstream ss; + ss << "Could not decode " << i << "th return value in function at " + << address_str << ": " << err; + return {ss.str()}; } + decl.returns = maybe_ret.TakeValue(); + // Figure out the return type of this function based off the return // values. - llvm::Type *ret_type = nullptr; - if (decl.returns.empty()) { + llvm::Type *ret_type = ret_type = decl.returns.type; + if (decl.returns.ordered_locs.empty()) { ret_type = llvm::Type::getVoidTy(context); - - } else if (decl.returns.size() == 1) { - ret_type = decl.returns[0].type; - - // The multiple return value case is most interesting, and somewhere - // where we see some divergence between C and what we will decompile. - // For example, on 32-bit x86, a 64-bit return value might be spread - // across EAX:EDX. Instead of representing this by a single value, we - // represent it as a structure if two 32-bit ints, and make sure to say - // that one part is in EAX, and the other is in EDX. - } else { - llvm::SmallVector ret_types; - for (auto &ret_val : decl.returns) { - ret_types.push_back(ret_val.type); - } - ret_type = llvm::StructType::get(context, ret_types, false); } llvm::SmallVector param_types; @@ -263,43 +263,48 @@ Result ProtobufTranslator::ParseIntoCallableDecl( ProtobufTranslator::ProtobufTranslator( const anvill::TypeTranslator &type_translator_, const remill::Arch *arch_, - std::unordered_map &type_map) + std::unordered_map &type_map, + std::unordered_map &type_names) : arch(arch_), type_translator(type_translator_), context(*(arch->context)), void_type(llvm::Type::getVoidTy(context)), dict_void_type(remill::RecontextualizeType( type_translator.Dictionary().u.named.void_, context)), - type_map(type_map) {} + type_map(type_map), + type_names(type_names) {} -// Decode the location of a value. This applies to both parameters and -// return values. -anvill::Result -ProtobufTranslator::DecodeValue(const ::specification::Value &value, - TypeSpec type, const char *desc) const { - ValueDecl decl; +anvill::Result +ProtobufTranslator::DecodeLowLoc(const ::specification::Value &value, + const char *desc) const { + LowLoc loc; if (value.has_reg()) { auto ® = value.reg(); - decl.reg = arch->RegisterByName(reg.register_name()); - if (!decl.reg) { + loc.reg = arch->RegisterByName(reg.register_name()); + if (!loc.reg) { std::stringstream ss; ss << "Unable to locate register '" << reg.register_name() << "' used for storing " << desc; return ss.str(); } + if (reg.has_subreg_sz()) { + loc.size = reg.subreg_sz(); + } + } else if (value.has_mem()) { auto &mem = value.mem(); if (mem.has_base_reg()) { - decl.mem_reg = arch->RegisterByName(mem.base_reg()); - if (!decl.mem_reg) { + loc.mem_reg = arch->RegisterByName(mem.base_reg()); + if (!loc.mem_reg) { std::stringstream ss; ss << "Unable to locate base register '" << mem.base_reg() << "' used for storing " << desc; return ss.str(); } } - decl.mem_offset = mem.offset(); + loc.mem_offset = mem.offset(); + loc.size = mem.size(); } else { std::stringstream ss; ss << "A " << desc << " declaration must specify its location with " @@ -307,6 +312,16 @@ ProtobufTranslator::DecodeValue(const ::specification::Value &value, return ss.str(); } + return loc; +} + +anvill::Result +ProtobufTranslator::ValueDeclFromOrderedLowLoc(std::vector loc, + TypeSpec type, + const char *desc) const { + + ValueDecl decl; + decl.ordered_locs = std::move(loc); decl.spec_type = type; auto llvm_type = type_translator.DecodeFromSpec(decl.spec_type); if (!llvm_type.Succeeded()) { @@ -320,6 +335,25 @@ ProtobufTranslator::DecodeValue(const ::specification::Value &value, return decl; } + +// Decode the location of a value. This applies to both parameters and +// return values. +anvill::Result ProtobufTranslator::DecodeValueDecl( + const ::google::protobuf::RepeatedPtrField<::specification::Value> &values, + TypeSpec type, const char *desc) const { + std::vector locs; + for (const auto &val : values) { + auto loc = DecodeLowLoc(val, desc); + if (!loc.Succeeded()) { + return loc.TakeError(); + } + locs.push_back(loc.TakeValue()); + } + + return ValueDeclFromOrderedLowLoc(std::move(locs), type, desc); +} + + // Decode a parameter from the JSON spec. Parameters should have names, // as that makes the bitcode slightly easier to read, but names are // not required. They must have types, and these types should be mostly @@ -332,12 +366,6 @@ Result ProtobufTranslator::DecodeParameter( return {"Parameter with no representation"}; } auto &repr_var = param.repr_var(); - if (repr_var.values_size() != 1) { - std::stringstream ss; - ss << "Unsupported number of values for parameter spec: " - << repr_var.values_size(); - return ss.str(); - } if (!repr_var.has_type()) { return {"Parameter without type spec"}; @@ -347,8 +375,8 @@ Result ProtobufTranslator::DecodeParameter( return maybe_type.TakeError(); } - auto &val = repr_var.values()[0]; - auto maybe_decl = DecodeValue(val, maybe_type.Value(), "function parameter"); + auto maybe_decl = DecodeValueDecl(repr_var.values(), maybe_type.Value(), + "function parameter"); if (!maybe_decl.Succeeded()) { return maybe_decl.TakeError(); } @@ -438,7 +466,16 @@ ProtobufTranslator::DecodeType(const ::specification::TypeSpec &obj) const { } } if (obj.has_alias()) { - return type_map.at(obj.alias()); + if (this->type_names.count(obj.alias())) { + TypeSpec res = TypeName(type_names.at(obj.alias())); + return res; + } else if (this->type_map.count(obj.alias())) { + TypeSpec tspec = this->type_map.at(obj.alias()); + return tspec; + } else { + LOG(ERROR) << "Unknown alias id " << obj.alias(); + return {BaseType::Void}; + } } return {"Unknown/invalid data type" + obj.DebugString()}; @@ -487,6 +524,15 @@ Result ProtobufTranslator::DecodeFunction( const ::specification::Function &function) const { FunctionDecl decl; decl.address = function.entry_address(); + decl.entry_uid = Uid{function.entry_uid()}; + + + if (function.binary_addr().has_ext_address()) { + auto ext = function.binary_addr().ext_address(); + decl.binary_addr = RelAddr{ext.entry_vaddr(), ext.displacement()}; + } else { + decl.binary_addr = function.binary_addr().internal_address(); + } if (!function.has_callable()) { return std::string("all functions should have a callable"); @@ -497,8 +543,58 @@ Result ProtobufTranslator::DecodeFunction( if (!parse_res.Succeeded()) { return parse_res.TakeError(); } - decl.context_assignments = {function.context_assignments().begin(), - function.context_assignments().end()}; + + + if (!function.has_frame()) { + return std::string("All functions should have a frame"); + } + const auto &frame = function.frame(); + + decl.stack_depth = frame.frame_size(); + decl.ret_ptr_offset = frame.return_address_offset(); + decl.parameter_size = frame.parameter_size(); + decl.parameter_offset = frame.parameter_offset(); + + decl.maximum_depth = decl.GetPointerDisplacement() + frame.max_frame_depth(); + + for (auto &var : function.in_scope_vars()) { + auto maybe_res = DecodeParameter(var); + if (!maybe_res.Succeeded()) { + LOG(ERROR) << "Couldn't decode live variable: " << var.name() + << " " + maybe_res.TakeError(); + } else { + decl.in_scope_variables.push_back(maybe_res.TakeValue()); + } + } + + if (decl.maximum_depth < decl.stack_depth) { + LOG(ERROR) + << "Analyzed max depth is smaller than the initial depth overriding"; + decl.maximum_depth = decl.stack_depth; + } + + this->ParseCFGIntoFunction(function, decl); + + + for (auto &ty_hint : function.type_hints()) { + auto maybe_type = DecodeType(ty_hint.target_var().type()); + if (maybe_type.Succeeded()) { + auto maybe_var = + DecodeValueDecl(ty_hint.target_var().values(), maybe_type.TakeValue(), + "attempting to decode type hint value"); + if (maybe_var.Succeeded()) { + decl.type_hints.push_back( + {ty_hint.target_addr(), maybe_var.TakeValue()}); + } + } else { + LOG(ERROR) << "Failed to decode type for type hint"; + } + } + + std::sort(decl.type_hints.begin(), decl.type_hints.end(), + [](const TypeHint &hint_lhs, const TypeHint &hint_rhs) { + return hint_lhs.target_addr < hint_rhs.target_addr; + }); auto link = function.func_linkage(); @@ -512,9 +608,144 @@ Result ProtobufTranslator::DecodeFunction( decl.is_extern = false; } + for (auto &[name, local] : function.local_variables()) { + auto type_spec = DecodeType(local.type()); + if (!type_spec.Succeeded()) { + return type_spec.Error(); + } + + auto value_decl = + DecodeValueDecl(local.values(), type_spec.Value(), "local variable"); + if (!value_decl.Succeeded()) { + return value_decl.Error(); + } + + decl.locals[name] = {value_decl.TakeValue(), name}; + } + + return decl; } +void ProtobufTranslator::AddLiveValuesToBB( + std::unordered_map> &map, Uid bb_uid, + const ::google::protobuf::RepeatedPtrField<::specification::Parameter> + &values) const { + auto &v = map.insert({bb_uid, std::vector()}).first->second; + + for (auto var : values) { + auto param = DecodeParameter(var); + if (!param.Succeeded()) { + LOG(ERROR) << "Unable to decode live parameter " << param.TakeError(); + } else { + v.push_back(param.TakeValue()); + } + } +} + +void ProtobufTranslator::ParseCFGIntoFunction( + const ::specification::Function &obj, FunctionDecl &decl) const { + for (const auto &blk : obj.blocks()) { + std::unordered_set tmp; + for (auto o : blk.second.outgoing_blocks()) { + tmp.insert({o}); + } + CodeBlock nblk = { + blk.second.address(), + blk.second.size(), + tmp, + {blk.second.context_assignments().begin(), + blk.second.context_assignments().end()}, + {blk.first}, + }; + decl.cfg.emplace(Uid{blk.first}, std::move(nblk)); + } + + + for (auto &[blk_uid_, ctx] : obj.block_context()) { + std::vector stack_offsets_at_entry, stack_offsets_at_exit; + std::vector constant_values_at_entry, + constant_values_at_exit; + Uid blk_uid = {blk_uid_}; + auto blk = decl.cfg[blk_uid]; + auto symval_to_domains = [&](const specification::ValueMapping &symval, + std::vector &stack_offsets, + std::vector &constant_values) { + if (!symval.has_target_value()) { + LOG(FATAL) << "All equalities must have a target"; + } + + auto stackptr = arch->RegisterByName(arch->StackPointerRegisterName()); + if (!stackptr) { + LOG(FATAL) << "No stack ptr"; + } + + auto target_type_spec = DecodeType(symval.target_value().type()); + if (!target_type_spec.Succeeded()) { + LOG(ERROR) << "Failed to lift target type " + << target_type_spec.TakeError(); + return; + } + + auto target_vdecl = DecodeValueDecl( + symval.target_value().values(), target_type_spec.TakeValue(), + "Unable to get value decl for target"); + + if (!target_vdecl.Succeeded()) { + LOG(ERROR) << "Failed to lift value " << target_vdecl.TakeError(); + return; + } + + if (!symval.has_curr_val()) { + LOG(FATAL) << "Mapping should have current value"; + } + + if (symval.curr_val().has_stack_disp()) { + OffsetDomain reg_off; + + reg_off.stack_offset = symval.curr_val().stack_disp(); + reg_off.target_value = target_vdecl.TakeValue(); + + stack_offsets.push_back(reg_off); + } else if (symval.curr_val().has_constant()) { + ConstantDomain const_val; + + const_val.target_value = target_vdecl.TakeValue(); + const_val.value = symval.curr_val().constant().value(); + const_val.should_taint_by_pc = + symval.curr_val().constant().is_tainted_by_pc(); + + DLOG(INFO) << "Adding global register override for " + << const_val.target_value.ordered_locs[0].reg->name << " " + << std::hex << const_val.value; + constant_values.push_back(const_val); + } else { + LOG(FATAL) << symval.curr_val().GetTypeName() + << " is unimplemented for affine relations"; + } + }; + + for (auto &symval : ctx.symvals_at_entry()) { + symval_to_domains(symval, + decl.stack_offsets_at_entry[blk_uid].affine_equalities, + decl.constant_values_at_entry[blk_uid]); + } + + for (auto &symval : ctx.symvals_at_exit()) { + symval_to_domains(symval, + decl.stack_offsets_at_exit[blk_uid].affine_equalities, + decl.constant_values_at_exit[blk_uid]); + } + + this->AddLiveValuesToBB(decl.live_regs_at_entry, blk_uid, + ctx.live_at_entries()); + + this->AddLiveValuesToBB(decl.live_regs_at_exit, blk_uid, + ctx.live_at_exits()); + } +} + + Result ProtobufTranslator::DecodeGlobalVar( const ::specification::GlobalVariable &obj) const { anvill::VariableDecl decl; @@ -534,8 +765,9 @@ Result ProtobufTranslator::DecodeGlobalVar( << decl.address << ": " << spec_type.Error(); return ss.str(); } + decl.spec_type = spec_type.TakeValue(); - auto llvm_type = type_translator.DecodeFromSpec(spec_type.Value()); + auto llvm_type = type_translator.DecodeFromSpec(decl.spec_type); if (!llvm_type.Succeeded()) { std::stringstream ss; ss << "Cannot translate type for variable at address " << std::hex @@ -564,19 +796,42 @@ Result ProtobufTranslator::DecodeGlobalVar( } decl.type = type; + if (obj.binary_address().has_ext_address()) { + decl.binary_addr = + RelAddr{obj.binary_address().ext_address().entry_vaddr(), + obj.binary_address().ext_address().displacement()}; + } else { + decl.binary_addr = obj.binary_address().internal_address(); + } + + return decl; } anvill::Result ProtobufTranslator::DecodeType( const ::specification::TypeSpec &obj, - const std::unordered_map &map) { + const std::unordered_map &map, + const std::unordered_map &named_types) { if (obj.has_alias()) { auto alias = obj.alias(); + + if (named_types.contains(alias)) { + TypeSpec tname = TypeName(named_types.at(alias)); + return tname; + } + if (type_map.count(alias)) { return type_map[alias]; } auto &type = type_map[alias]; - auto res = DecodeType(map.at(alias), map); + + // The alias may not be present in the map in case of opaque pointers + if (!map.count(alias)) { + LOG(ERROR) << "No alias definition for " << obj.alias(); + return {BaseType::Void}; + } + + auto res = DecodeType(map.at(alias), map, named_types); if (!res.Succeeded()) { return res.TakeError(); } @@ -587,7 +842,7 @@ anvill::Result ProtobufTranslator::DecodeType( auto pointer = obj.pointer(); TypeSpec pointee = BaseType::Void; if (pointer.has_pointee()) { - auto maybe_pointee = DecodeType(pointer.pointee(), map); + auto maybe_pointee = DecodeType(pointer.pointee(), map, named_types); if (!maybe_pointee.Succeeded()) { return maybe_pointee.Error(); } @@ -600,7 +855,7 @@ anvill::Result ProtobufTranslator::DecodeType( if (!vector.has_base()) { return {"Vector type without base type"}; } - auto maybe_base = DecodeType(vector.base(), map); + auto maybe_base = DecodeType(vector.base(), map, named_types); if (!maybe_base.Succeeded()) { return maybe_base.Error(); } @@ -611,7 +866,7 @@ anvill::Result ProtobufTranslator::DecodeType( if (!array.has_base()) { return {"Array type without base type"}; } - auto maybe_base = DecodeType(array.base(), map); + auto maybe_base = DecodeType(array.base(), map, named_types); if (!maybe_base.Succeeded()) { return maybe_base.Error(); } @@ -620,7 +875,7 @@ anvill::Result ProtobufTranslator::DecodeType( if (obj.has_struct_()) { auto res = std::make_shared(); for (auto elem : obj.struct_().members()) { - auto maybe_type = DecodeType(elem, map); + auto maybe_type = DecodeType(elem, map, named_types); if (!maybe_type.Succeeded()) { return maybe_type.Error(); } @@ -634,14 +889,14 @@ anvill::Result ProtobufTranslator::DecodeType( return {"Function without return type"}; } auto res = std::make_shared(); - auto maybe_ret = DecodeType(func.return_type(), map); + auto maybe_ret = DecodeType(func.return_type(), map, named_types); if (!maybe_ret.Succeeded()) { return maybe_ret.Error(); } res->return_type = std::move(maybe_ret.Value()); res->is_variadic = func.is_variadic(); for (auto arg : func.arguments()) { - auto maybe_argtype = DecodeType(arg, map); + auto maybe_argtype = DecodeType(arg, map, named_types); if (!maybe_argtype.Succeeded()) { return maybe_argtype.Error(); } @@ -653,17 +908,39 @@ anvill::Result ProtobufTranslator::DecodeType( } Result ProtobufTranslator::DecodeTypeMap( - const ::google::protobuf::Map - &map) { + const ::google::protobuf::Map &map, + const ::google::protobuf::Map &names) { for (auto &[k, v] : map) { if (type_map.count(k)) { continue; } - auto res = DecodeType(v, {map.begin(), map.end()}); + auto res = + DecodeType(v, {map.begin(), map.end()}, {names.begin(), names.end()}); + if (!res.Succeeded()) { return res.Error(); } - type_map[k] = res.Value(); + + + if (names.contains(k)) { + auto ty = this->type_translator.DecodeFromSpec(res.Value()); + if (!ty.Succeeded()) { + return ty.Error().message; + } + + if (auto *sty = llvm::dyn_cast(ty.Value())) { + + + std::string name = names.at(k); + auto res = getOrCreateNamedStruct(this->context, name); + if (res->isOpaque()) { + res->setBody(sty->elements()); + } + } + type_names[k] = names.at(k); + } else { + type_map[k] = res.Value(); + } } return std::monostate{}; } diff --git a/lib/Protobuf.h b/lib/Protobuf.h index 9100a9404..ad2d9fc48 100644 --- a/lib/Protobuf.h +++ b/lib/Protobuf.h @@ -16,6 +16,7 @@ #include #include #include +#include #include "anvill/Type.h" #include "specification.pb.h" @@ -54,19 +55,32 @@ class ProtobufTranslator { llvm::Type *const dict_void_type; std::unordered_map &type_map; + std::unordered_map &type_names; anvill::Result DecodeType(const ::specification::TypeSpec &obj) const; anvill::Result DecodeType( const ::specification::TypeSpec &obj, - const std::unordered_map &map); + const std::unordered_map &map, + const std::unordered_map &named_types); + // Parse the location of a value. This applies to both parameters and // return values. + anvill::Result + DecodeLowLoc(const ::specification::Value &value, const char *desc) const; + anvill::Result - DecodeValue(const ::specification::Value &obj, TypeSpec type, - const char *desc) const; + ValueDeclFromOrderedLowLoc(std::vector loc, TypeSpec type, + const char *desc) const; + + // Parse the location of a value. This applies to both parameters and + // return values. + anvill::Result DecodeValueDecl( + const ::google::protobuf::RepeatedPtrField<::specification::Value> + &values, + TypeSpec type, const char *desc) const; Result @@ -74,16 +88,28 @@ class ProtobufTranslator { std::optional address, CallableDecl &decl) const; + void ParseCFGIntoFunction(const ::specification::Function &obj, + FunctionDecl &decl) const; + + void AddLiveValuesToBB( + std::unordered_map> &map, Uid bb_uid, + const ::google::protobuf::RepeatedPtrField<::specification::Parameter> + &values) const; + + public: explicit ProtobufTranslator( const anvill::TypeTranslator &type_translator_, const remill::Arch *arch_, - std::unordered_map &type_map); + std::unordered_map &type_map, + std::unordered_map &type_names); inline explicit ProtobufTranslator( const anvill::TypeTranslator &type_translator_, const std::unique_ptr &arch_, - std::unordered_map &type_map) - : ProtobufTranslator(type_translator_, arch_.get(), type_map) {} + std::unordered_map &type_map, + std::unordered_map &type_names) + : ProtobufTranslator(type_translator_, arch_.get(), type_map, + type_names) {} // Parse a parameter from the Protobuf spec. Parameters should have names, // as that makes the bitcode slightly easier to read, but names are @@ -111,9 +137,10 @@ class ProtobufTranslator { Result DecodeDefaultCallableDecl(const ::specification::Function &obj) const; - Result - DecodeTypeMap(const ::google::protobuf::Map &map); + Result DecodeTypeMap( + const ::google::protobuf::Map + &map, + const ::google::protobuf::Map &names); }; } // namespace anvill diff --git a/lib/Providers/TypeProvider.cpp b/lib/Providers/TypeProvider.cpp index 739958be6..dd6516826 100644 --- a/lib/Providers/TypeProvider.cpp +++ b/lib/Providers/TypeProvider.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -18,7 +19,10 @@ #include #include +#include +#include #include +#include #include "Specification.h" @@ -56,29 +60,6 @@ NullTypeProvider::TryGetVariableType(uint64_t, llvm::Type *) const { return std::nullopt; } -// Try to return the type of a function starting at address `to_address`. This -// type is the prototype of the function. The type can be call site specific, -// where the call site is `from_inst`. -std::optional -TypeProvider::TryGetCalledFunctionType(uint64_t function_address, - const remill::Instruction &from_inst, - uint64_t to_address) const { - if (auto decl = TryGetCalledFunctionType(function_address, from_inst)) { - return decl; - } else if (auto func_decl = TryGetFunctionType(to_address)) { - return static_cast(func_decl.value()); - } else { - return std::nullopt; - } -} - -// Try to return the type of a function that has been called from `from_isnt`. -std::optional -TypeProvider::TryGetCalledFunctionType(uint64_t function_address, - const remill::Instruction &) const { - return std::nullopt; -} - BaseTypeProvider::~BaseTypeProvider() {} const ::anvill::TypeDictionary &BaseTypeProvider::Dictionary(void) const { @@ -102,20 +83,8 @@ SpecificationTypeProvider::~SpecificationTypeProvider(void) {} SpecificationTypeProvider::SpecificationTypeProvider(const Specification &spec) : BaseTypeProvider(spec.impl->type_translator), - impl(spec.impl) {} - -// Try to return the type of a function that has been called from `from_isnt`. -std::optional SpecificationTypeProvider::TryGetCalledFunctionType( - uint64_t function_address, const remill::Instruction &from_inst) const { - std::pair loc{function_address, from_inst.pc}; - - auto cs_it = impl->loc_to_call_site.find(loc); - if (cs_it == impl->loc_to_call_site.end()) { - return std::nullopt; - } else { - return *(cs_it->second); - } -} + impl(spec.impl), + layout(spec.Arch()->DataLayout()) {} // Try to return the type of a function starting at address `address`. This // type is the prototype of the function. @@ -129,42 +98,37 @@ SpecificationTypeProvider::TryGetFunctionType(uint64_t address) const { } } -std::optional -SpecificationTypeProvider::TryGetVariableType(uint64_t address, - llvm::Type *) const { - auto var_it = impl->address_to_var.find(address); - if (var_it != impl->address_to_var.end()) { - return *(var_it->second); - } else { - return std::nullopt; - } -} +std::vector +SpecificationTypeProvider::NamedTypes(void) const { + std::vector stys; -// Try to return the type of a function that has been called from `from_isnt`. -std::optional -DefaultCallableTypeProvider::TryGetCalledFunctionType( - uint64_t function_address, const remill::Instruction &from_inst) const { - auto maybe_res = - ProxyTypeProvider::TryGetCalledFunctionType(function_address, from_inst); - if (maybe_res.has_value()) { - return maybe_res; + for (auto nms : this->impl->named_types) { + auto sty = llvm::StructType::getTypeByName(this->context, nms); + if (sty) { + stys.push_back(sty); + } } + return stys; +} + +std::optional +SpecificationTypeProvider::TryGetVariableType(uint64_t address, + llvm::Type *) const { - auto maybe_func_type = - ProxyTypeProvider::TryGetFunctionType(function_address); - if (maybe_func_type.has_value()) { - return maybe_func_type; + auto var_it = impl->address_to_var.lower_bound(address); + if (var_it != impl->address_to_var.begin() && var_it->first != address) { + var_it--; } - if (auto arch_decl = impl->TryGetDeclForArch(from_inst.arch_name)) { - return *arch_decl; + if (var_it == impl->address_to_var.end()) { + return std::nullopt; } - if (from_inst.arch_name != from_inst.sub_arch_name) { - if (auto sub_arch_decl = impl->TryGetDeclForArch(from_inst.sub_arch_name)) { - return *sub_arch_decl; - } + auto v = var_it->second; + if (v->type && address >= v->address && + address < v->address + this->layout.getTypeSizeInBits(v->type) / 8) { + return *v; } return std::nullopt; @@ -211,22 +175,6 @@ ProxyTypeProvider::TryGetFunctionType(uint64_t address) const { return this->deleg.TryGetFunctionType(address); } -// Try to return the type of a function that has been called from `from_isnt`. -std::optional ProxyTypeProvider::TryGetCalledFunctionType( - uint64_t function_address, const remill::Instruction &from_inst) const { - return this->deleg.TryGetCalledFunctionType(function_address, from_inst); -} - -// Try to return the type of a function starting at address `to_address`. This -// type is the prototype of the function. The type can be call site specific, -// where the call site is `from_inst`. -std::optional ProxyTypeProvider::TryGetCalledFunctionType( - uint64_t function_address, const remill::Instruction &from_inst, - uint64_t to_address) const { - return this->deleg.TryGetCalledFunctionType(function_address, from_inst, - to_address); -} - // Try to return the variable at given address or containing the address std::optional ProxyTypeProvider::TryGetVariableType(uint64_t address, @@ -246,6 +194,11 @@ void ProxyTypeProvider::QueryRegisterStateAtInstruction( typed_reg_cb); } +std::vector ProxyTypeProvider::NamedTypes(void) const { + return this->deleg.NamedTypes(); +} + + const ::anvill::TypeDictionary &ProxyTypeProvider::Dictionary(void) const { return this->deleg.Dictionary(); } @@ -274,19 +227,6 @@ TypeProvider::TryGetFunctionTypeOrDefault(uint64_t address) const { return this->GetDefaultFunctionType(address); } - -std::optional TypeProvider::TryGetCalledFunctionTypeOrDefault( - uint64_t function_address, const remill::Instruction &from_inst, - uint64_t to_address) const { - auto res = - this->TryGetCalledFunctionType(function_address, from_inst, to_address); - if (res.has_value()) { - return res; - } - - return this->GetDefaultFunctionType(to_address); -} - std::optional TypeProvider::TryGetVariableTypeOrDefault(uint64_t address, llvm::Type *hinted_value_type) const { diff --git a/lib/Specification.cpp b/lib/Specification.cpp index bfe63097e..51ee5e54f 100644 --- a/lib/Specification.cpp +++ b/lib/Specification.cpp @@ -9,7 +9,10 @@ #include "Specification.h" #include +#include #include +#include +#include #include #include #include @@ -32,8 +35,12 @@ namespace anvill { SpecificationImpl::~SpecificationImpl(void) {} -SpecificationImpl::SpecificationImpl(std::unique_ptr arch_) +SpecificationImpl::SpecificationImpl(std::unique_ptr arch_, + const std::string &image_name_, + std::uint64_t image_base_) : arch(std::move(arch_)), + image_name(image_name_), + image_base(image_base_), type_dictionary(*(arch->context)), type_translator(type_dictionary, arch.get()) {} @@ -42,8 +49,11 @@ SpecificationImpl::ParseSpecification( const ::specification::Specification &spec) { std::vector dec_err; std::unordered_map type_map; - ProtobufTranslator translator(type_translator, arch.get(), type_map); - auto map_res = translator.DecodeTypeMap(spec.type_aliases()); + std::unordered_map type_names; + ProtobufTranslator translator(type_translator, arch.get(), type_map, + type_names); + auto map_res = + translator.DecodeTypeMap(spec.type_aliases(), spec.type_names()); if (!map_res.Succeeded()) { dec_err.push_back(map_res.Error()); } @@ -63,6 +73,16 @@ SpecificationImpl::ParseSpecification( } auto func_ptr = new FunctionDecl(std::move(func_obj)); + + for (const auto &[uid, bb] : func_ptr->cfg) { + if (uid_to_block.count(uid)) { + std::stringstream ss; + ss << "Duplicate block Uid: " << uid.value; + return ss.str(); + } + uid_to_block[uid] = &bb; + } + functions.emplace_back(func_ptr); address_to_function.emplace(func_address, func_ptr); } @@ -80,7 +100,8 @@ SpecificationImpl::ParseSpecification( continue; } auto cs_obj = maybe_cs.Value(); - std::pair loc{cs_obj.function_address, cs_obj.address}; + std::pair loc{cs_obj.function_address, + cs_obj.address}; if (loc_to_call_site.count(loc)) { std::stringstream ss; @@ -111,6 +132,7 @@ SpecificationImpl::ParseSpecification( if (!maybe_var.Succeeded()) { auto err = maybe_var.Error(); dec_err.push_back(err); + continue; } auto var_obj = maybe_var.Value(); auto var_address = var_obj.address; @@ -180,8 +202,6 @@ SpecificationImpl::ParseSpecification( jmp.address = jump.address(); for (auto &target : jump.targets()) { JumpTarget jmp_target; - auto &assignments = target.context_assignments(); - jmp_target.context_assignments = {assignments.begin(), assignments.end()}; jmp_target.address = target.address(); jmp.targets.push_back(jmp_target); } @@ -198,6 +218,7 @@ SpecificationImpl::ParseSpecification( for (auto &call : spec.overrides().calls()) { Call callspec{}; callspec.stop = call.stop(); + callspec.is_noreturn = call.noreturn(); callspec.address = call.address(); if (call.has_return_address()) { callspec.return_address = call.return_address(); @@ -235,7 +256,12 @@ SpecificationImpl::ParseSpecification( std::sort(misc_overrides.begin(), misc_overrides.end(), [](const auto &a, const auto &b) { return a.address < b.address; }); - // TODO(frabert): Parse everything else + required_globals = {spec.required_globals().begin(), + spec.required_globals().end()}; + + for (const auto &[_k, v] : spec.type_names()) { + this->named_types.push_back(v); + } return dec_err; } @@ -250,6 +276,16 @@ std::shared_ptr Specification::Arch(void) const { return std::shared_ptr(impl, impl->arch.get()); } +// Return the architecture used by this specification. +const std::string &Specification::ImageName(void) const { + return impl->image_name; +} + +// Return the architecture used by this specification. +std::uint64_t Specification::ImageBase(void) const { + return impl->image_base; +} + // Return the type dictionary used by this specification. const ::anvill::TypeDictionary &Specification::TypeDictionary(void) const { return impl->type_dictionary; @@ -271,12 +307,12 @@ GetArch(llvm::LLVMContext &context, switch (spec.arch()) { default: return {"Invalid/unrecognized architecture"}; - case ::specification::ARCH_X86: arch_name = remill::kArchX86; break; + case ::specification::ARCH_X86: arch_name = remill::kArchX86_AVX; break; case ::specification::ARCH_X86_AVX: arch_name = remill::kArchX86_AVX; break; case ::specification::ARCH_X86_AVX512: arch_name = remill::kArchX86_AVX512; break; - case ::specification::ARCH_AMD64: arch_name = remill::kArchAMD64; break; + case ::specification::ARCH_AMD64: arch_name = remill::kArchAMD64_AVX; break; case ::specification::ARCH_AMD64_AVX: arch_name = remill::kArchAMD64_AVX; break; @@ -284,13 +320,16 @@ GetArch(llvm::LLVMContext &context, arch_name = remill::kArchAMD64_AVX512; break; case ::specification::ARCH_AARCH64: - arch_name = remill::kArchAArch64LittleEndian; + arch_name = remill::kArchAArch64LittleEndian_SLEIGH; break; case ::specification::ARCH_AARCH32: arch_name = remill::kArchAArch32LittleEndian; break; - case ::specification::ARCH_SPARC32: arch_name = remill::kArchSparc32; break; + case ::specification::ARCH_SPARC32: + arch_name = remill::kArchSparc32_SLEIGH; + break; case ::specification::ARCH_SPARC64: arch_name = remill::kArchSparc64; break; + case ::specification::ARCH_PPC: arch_name = remill::kArchPPC; break; } switch (spec.operating_system()) { @@ -318,7 +357,10 @@ anvill::Result Specification::DecodeFromPB(llvm::LLVMContext &context, const std::string &pb) { ::specification::Specification spec; if (!spec.ParseFromString(pb)) { - return {"Failed to parse specification"}; + auto status = google::protobuf::util::JsonStringToMessage(pb, &spec); + if (!status.ok()) { + return {"Failed to parse specification"}; + } } auto arch{GetArch(context, spec)}; @@ -326,8 +368,11 @@ Specification::DecodeFromPB(llvm::LLVMContext &context, const std::string &pb) { return arch.Error(); } + const auto &image_name = spec.image_name(); + auto image_base = spec.image_base(); + std::shared_ptr pimpl( - new SpecificationImpl(arch.TakeValue())); + new SpecificationImpl(arch.TakeValue(), image_name, image_base)); auto maybe_warnings = pimpl->ParseSpecification(spec); @@ -346,6 +391,7 @@ Specification::DecodeFromPB(llvm::LLVMContext &context, const std::string &pb) { anvill::Result Specification::DecodeFromPB(llvm::LLVMContext &context, std::istream &pb) { ::specification::Specification spec; + if (!spec.ParseFromIstream(&pb)) { return {"Failed to parse specification"}; } @@ -355,8 +401,12 @@ Specification::DecodeFromPB(llvm::LLVMContext &context, std::istream &pb) { return arch.Error(); } + const auto &image_name = spec.image_name(); + auto image_base = spec.image_base(); + + std::shared_ptr pimpl( - new SpecificationImpl(arch.TakeValue())); + new SpecificationImpl(arch.TakeValue(), image_name, image_base)); auto maybe_warnings = pimpl->ParseSpecification(spec); @@ -372,6 +422,16 @@ Specification::DecodeFromPB(llvm::LLVMContext &context, std::istream &pb) { return Specification(std::move(pimpl)); } +// Return the call site at a given function address, instruction address pair, or an empty `shared_ptr`. +std::shared_ptr Specification::CallSiteAt( + const std::pair &loc) const { + auto it = impl->loc_to_call_site.find(loc); + if (it != impl->loc_to_call_site.end()) { + return {impl, it->second}; + } + return {}; +} + // Return the function beginning at `address`, or an empty `shared_ptr`. std::shared_ptr Specification::FunctionAt(std::uint64_t address) const { @@ -383,6 +443,16 @@ Specification::FunctionAt(std::uint64_t address) const { } } +// Return the block with `uid`, or an empty `shared_ptr`. +std::shared_ptr Specification::BlockAt(Uid uid) const { + auto it = impl->uid_to_block.find(uid); + if (it != impl->uid_to_block.end()) { + return std::shared_ptr(impl, it->second); + } else { + return {}; + } +} + // Return the global variable beginning at `address`, or an empty `shared_ptr`. std::shared_ptr Specification::VariableAt(std::uint64_t address) const { @@ -416,6 +486,30 @@ void Specification::ForEachSymbol( } } +SpecBlockContexts::SpecBlockContexts(const Specification &spec) { + spec.ForEachFunction([this](std::shared_ptr decl) { + decl->AddBBContexts(this->contexts); + funcs[decl->address] = decl; + return true; + }); +} + +std::optional> +SpecBlockContexts::GetBasicBlockContextForUid(Uid uid) const { + auto cont = this->contexts.find(uid); + if (cont == this->contexts.end()) { + return std::nullopt; + } + + return std::optional>{ + std::cref(cont->second)}; +} + +const FunctionDecl & +SpecBlockContexts::GetFunctionAtAddress(uint64_t addr) const { + return *funcs.at(addr); +} + // Call `cb` on each function in the spec, until `cb` returns `false`. void Specification::ForEachFunction( std::function)> cb) const { @@ -449,6 +543,7 @@ void Specification::ForEachCallSite( } } + // Call `cb` on each control-flow redirection, until `cb` returns `false`. void Specification::ForEachControlFlowRedirect( std::function cb) const { @@ -493,4 +588,9 @@ void Specification::ForEachMiscOverride( } } +const std::unordered_set & +Specification::GetRequiredGlobals() const { + return impl->required_globals; +} + } // namespace anvill diff --git a/lib/Specification.h b/lib/Specification.h index 61dd194f3..8034acebe 100644 --- a/lib/Specification.h +++ b/lib/Specification.h @@ -14,9 +14,14 @@ #include #include +#include #include #include +#include #include +#include + +#include "anvill/Passes/BasicBlockPass.h" namespace llvm { class LLVMContext; @@ -31,7 +36,8 @@ class SpecificationImpl friend class Specification; SpecificationImpl(void) = delete; - SpecificationImpl(std::unique_ptr arch_); + SpecificationImpl(std::unique_ptr arch_, + const std::string &image_name_, std::uint64_t image_base_); Result, std::string> ParseSpecification(const ::specification::Specification &obj); @@ -42,6 +48,9 @@ class SpecificationImpl // Architecture used by all of the function and global variable declarations. const std::unique_ptr arch; + std::string image_name; + std::uint64_t image_base; + const TypeDictionary type_dictionary; const TypeTranslator type_translator; @@ -57,9 +66,12 @@ class SpecificationImpl // List of functions that have been parsed from the JSON spec. std::unordered_map address_to_function; + // List of basic blocks that have been parsed from the JSON spec. + std::unordered_map uid_to_block; + // Inverted mapping of byte addresses to the variables containing those // addresses. - std::unordered_map address_to_var; + std::map address_to_var; // NOTE(pag): We used ordered containers so that any type of round-tripping @@ -85,6 +97,10 @@ class SpecificationImpl std::vector misc_overrides; std::unordered_map control_flow_overrides; + + std::unordered_set required_globals; + + std::vector named_types; }; } // namespace anvill diff --git a/lib/Type.cpp b/lib/Type.cpp index d4609a529..3b79e8b92 100644 --- a/lib/Type.cpp +++ b/lib/Type.cpp @@ -7,6 +7,10 @@ */ #include +#include +#include + +#include #define ANVILL_USE_WRAPPED_TYPES 0 @@ -19,21 +23,41 @@ #include // clang-format on +#include +#include #include - #include #include #include -#include -#include - #include #include #include namespace anvill { +bool operator==(std::shared_ptr a, + std::shared_ptr b) { + return *a == *b; +} + +bool operator==(std::shared_ptr a, std::shared_ptr b) { + return *a == *b; +} + +bool operator==(std::shared_ptr a, std::shared_ptr b) { + return *a == *b; +} + +bool operator==(std::shared_ptr a, std::shared_ptr b) { + return *a == *b; +} + +bool operator==(std::shared_ptr a, + std::shared_ptr b) { + return *a == *b; +} + class TypeSpecifierImpl { public: llvm::LLVMContext &context; @@ -41,6 +65,8 @@ class TypeSpecifierImpl { const TypeDictionary type_dict; std::unordered_map type_to_id; std::vector id_to_type; + std::unordered_map type_to_md; + std::unordered_map md_to_type; inline TypeSpecifierImpl(const TypeDictionary &type_dict_, const llvm::DataLayout &dl_) @@ -52,12 +78,21 @@ class TypeSpecifierImpl { // TypeSpecification.cpp void EncodeType(llvm::Type &type, std::stringstream &ss, EncodingFormat format); + + llvm::MDNode *TypeToMetadata(BaseType type); + llvm::MDNode *TypeToMetadata(std::shared_ptr type); + llvm::MDNode *TypeToMetadata(std::shared_ptr type); + llvm::MDNode *TypeToMetadata(std::shared_ptr type); + llvm::MDNode *TypeToMetadata(std::shared_ptr type); + llvm::MDNode *TypeToMetadata(std::shared_ptr type); + llvm::MDNode *TypeToMetadata(UnknownType type); + llvm::MDNode *TypeToMetadata(TypeName type); }; // Translates an llvm::Type to a type that conforms to the spec in // TypeSpecification.cpp -void TypeSpecifierImpl::EncodeType( - llvm::Type &type, std::stringstream &ss, EncodingFormat format) { +void TypeSpecifierImpl::EncodeType(llvm::Type &type, std::stringstream &ss, + EncodingFormat format) { const auto alpha_num = format == EncodingFormat::kValidSymbolCharsOnly; switch (type.getTypeID()) { case llvm::Type::VoidTyID: ss << 'v'; break; @@ -188,7 +223,7 @@ void TypeSpecifierImpl::EncodeType( } else if (struct_ptr == type_dict.u.named.padding) { ss << 'p'; - // This is an opaque structure; mark it as a void type. + // This is an opaque structure; mark it as a void type. } else if (struct_ptr->isOpaque()) { ss << 'v'; @@ -200,7 +235,7 @@ void TypeSpecifierImpl::EncodeType( if (type_to_id.count(struct_ptr)) { ss << (alpha_num ? "_M" : "%") << type_to_id[struct_ptr]; - // We've not yet serialized this structure. + // We've not yet serialized this structure. } else { // Start by emitting a new structure ID for this structure and memoizing @@ -227,10 +262,11 @@ void TypeSpecifierImpl::EncodeType( << (alpha_num ? "_D" : "]"); } - // TODO(pag): Investigate this possibility. Does this occur for - // bitfields? + // TODO(pag): Investigate this possibility. Does this occur for + // bitfields? } else if (expected_offset > offset) { - LOG(FATAL) << "TODO?! Maybe bitfields? Structure field offset shenanigans"; + LOG(FATAL) + << "TODO?! Maybe bitfields? Structure field offset shenanigans"; } const auto el_ty = struct_ptr->getElementType(i); @@ -294,13 +330,118 @@ void TypeSpecifierImpl::EncodeType( } } +llvm::MDNode *TypeSpecifierImpl::TypeToMetadata(BaseType type) { + auto str = llvm::MDString::get(context, "BaseType"); + auto value = llvm::ConstantInt::get(llvm::IntegerType::getInt32Ty(context), + static_cast(type)); + return llvm::MDNode::get(context, + {str, llvm::ConstantAsMetadata::get(value)}); +} + +llvm::MDNode * +TypeSpecifierImpl::TypeToMetadata(std::shared_ptr type) { + auto &node = type_to_md[type.get()]; + if (node) { + return node; + } + + auto str = llvm::MDString::get(context, "PointerType"); + auto pointee = + std::visit([this](auto &&t) { return TypeToMetadata(t); }, type->pointee); + return llvm::MDNode::get(context, {str, pointee}); +} + +llvm::MDNode * +TypeSpecifierImpl::TypeToMetadata(std::shared_ptr type) { + auto &node = type_to_md[type.get()]; + if (node) { + return node; + } + + auto str = llvm::MDString::get(context, "VectorType"); + auto base = + std::visit([this](auto &&t) { return TypeToMetadata(t); }, type->base); + auto size = llvm::ConstantInt::get(llvm::IntegerType::getInt32Ty(context), + static_cast(type->size)); + return llvm::MDNode::get(context, + {str, base, llvm::ConstantAsMetadata::get(size)}); +} + +llvm::MDNode * +TypeSpecifierImpl::TypeToMetadata(std::shared_ptr type) { + auto &node = type_to_md[type.get()]; + if (node) { + return node; + } + + auto str = llvm::MDString::get(context, "ArrayType"); + auto base = + std::visit([this](auto &&t) { return TypeToMetadata(t); }, type->base); + auto size = llvm::ConstantInt::get(llvm::IntegerType::getInt32Ty(context), + static_cast(type->size)); + return llvm::MDNode::get(context, + {str, base, llvm::ConstantAsMetadata::get(size)}); +} + +llvm::MDNode * +TypeSpecifierImpl::TypeToMetadata(std::shared_ptr type) { + auto &node = type_to_md[type.get()]; + if (node) { + return node; + } + + auto str = llvm::MDString::get(context, "StructType"); + std::vector md; + md.push_back(str); + for (auto &member : type->members) { + md.push_back( + std::visit([this](auto &&t) { return TypeToMetadata(t); }, member)); + } + return llvm::MDNode::get(context, md); +} + +llvm::MDNode * +TypeSpecifierImpl::TypeToMetadata(std::shared_ptr type) { + auto &node = type_to_md[type.get()]; + if (node) { + return node; + } + + auto str = llvm::MDString::get(context, "FunctionType"); + std::vector md; + md.push_back(str); + md.push_back(llvm::ConstantAsMetadata::get( + llvm::ConstantInt::getBool(context, type->is_variadic))); + md.push_back(std::visit([this](auto &&t) { return TypeToMetadata(t); }, + type->return_type)); + for (auto &arg : type->arguments) { + md.push_back( + std::visit([this](auto &&t) { return TypeToMetadata(t); }, arg)); + } + return llvm::MDNode::get(context, md); +} + +llvm::MDNode *TypeSpecifierImpl::TypeToMetadata(UnknownType type) { + auto str = llvm::MDString::get(context, "UnknownType"); + auto size = + llvm::ConstantInt::get(llvm::IntegerType::getInt32Ty(context), type.size); + return llvm::MDNode::get(context, {str, llvm::ConstantAsMetadata::get(size)}); +} + +llvm::MDNode *TypeSpecifierImpl::TypeToMetadata(TypeName type) { + auto str = llvm::MDString::get(context, "Typename"); + auto nm = llvm::MDString::get(context, type.name); + + return llvm::MDNode::get(context, {str, nm}); +} + namespace { #if ANVILL_USE_WRAPPED_TYPES template -static llvm::Type *GetOrCreateWrapper( - llvm::LLVMContext &context, const char *name, T wrapper) { +static llvm::Type *GetOrCreateWrapper(llvm::LLVMContext &context, + const char *name, T wrapper) { std::string type_name = kAnvillNamePrefix + name; auto ty = llvm::StructType::getTypeByName(context, type_name); if (ty) { @@ -311,25 +452,28 @@ static llvm::Type *GetOrCreateWrapper( return llvm::StructType::create(context, elems, type_name, true); } -static llvm::Type *GetOrCreateInt(llvm::LLVMContext &context, - const char *name, unsigned num_bits) { - return GetOrCreateWrapper(context, name, [=] (llvm::LLVMContext &context_) { +static llvm::Type *GetOrCreateInt(llvm::LLVMContext &context, const char *name, + unsigned num_bits) { + return GetOrCreateWrapper(context, name, [=](llvm::LLVMContext &context_) { return llvm::IntegerType::get(context_, num_bits); }); } static llvm::Type *GetOrCreateFloat(llvm::LLVMContext &context, - const char *name, unsigned num_bits) { - return GetOrCreateWrapper( - context, name, [=] (llvm::LLVMContext &context_) -> llvm::Type * { - switch (num_bits) { - case 16: return llvm::Type::getHalfTy(context_); - case 32: return llvm::Type::getFloatTy(context_); - case 64: return llvm::Type::getDoubleTy(context_); - case 128: return llvm::Type::getFP128Ty(context_); - default: return nullptr; - } - }); + const char *name, unsigned num_bits) { + return GetOrCreateWrapper(context, name, + [=](llvm::LLVMContext &context_) -> llvm::Type * { + switch (num_bits) { + case 16: return llvm::Type::getHalfTy(context_); + case 32: + return llvm::Type::getFloatTy(context_); + case 64: + return llvm::Type::getDoubleTy(context_); + case 128: + return llvm::Type::getFP128Ty(context_); + default: return nullptr; + } + }); } #endif @@ -358,17 +502,18 @@ TypeDictionary::TypeDictionary(llvm::LLVMContext &context) { u.named.float32 = GetOrCreateFloat(context, "float32", 32); u.named.float64 = GetOrCreateFloat(context, "float64", 64); u.named.float80_12 = GetOrCreateWrapper( - context, "float80_12", [] (llvm::LLVMContext &context_) { + context, "float80_12", [](llvm::LLVMContext &context_) { return llvm::ArrayType::get(llvm::Type::getInt8Ty(context_), 10); }); u.named.float80_16 = GetOrCreateWrapper( - context, "float80_16", [] (llvm::LLVMContext &context_) { + context, "float80_16", [](llvm::LLVMContext &context_) { return llvm::ArrayType::get(llvm::Type::getInt8Ty(context_), 12); }); u.named.float128 = GetOrCreateFloat(context, "float128", 128); - u.named.m64 = GetOrCreateWrapper(context, "mmx", [] (llvm::LLVMContext &context_) { - return llvm::Type::getX86_MMXTy(context_); - }); + u.named.m64 = + GetOrCreateWrapper(context, "mmx", [](llvm::LLVMContext &context_) { + return llvm::Type::getX86_MMXTy(context_); + }); u.named.void_ = GetOrCreateInt(context, "void", 8); u.named.padding = GetOrCreateInt(context, "padding", 8); #else @@ -406,7 +551,8 @@ bool TypeDictionary::IsPadding(llvm::Type *type) const noexcept { #if ANVILL_USE_WRAPPED_TYPES switch (type->getTypeID()) { case llvm::Type::StructTyID: - for (auto elem_type : llvm::dyn_cast(type)->elements()) { + for (auto elem_type : + llvm::dyn_cast(type)->elements()) { if (!IsPadding(elem_type)) { return false; } @@ -420,8 +566,7 @@ bool TypeDictionary::IsPadding(llvm::Type *type) const noexcept { auto elem_type = llvm::dyn_cast(type)->getElementType(); return IsPadding(elem_type); } - default: - return type == u.named.padding; + default: return type == u.named.padding; } #else return false; @@ -431,7 +576,7 @@ bool TypeDictionary::IsPadding(llvm::Type *type) const noexcept { TypeTranslator::~TypeTranslator(void) {} TypeTranslator::TypeTranslator(const TypeDictionary &type_dict, - const llvm::DataLayout &dl) + const llvm::DataLayout &dl) : impl(std::make_unique(type_dict, dl)) {} // Delegating constructor using a module's data layout. @@ -462,17 +607,92 @@ const llvm::DataLayout &TypeTranslator::DataLayout(void) const noexcept { // then only alpha_numeric characters (and underscores) are used. The // alpha_numeric representation is always safe to use when appended to // identifier names. -std::string TypeTranslator::EncodeToString( - llvm::Type *type, EncodingFormat format) const { +std::string TypeTranslator::EncodeToString(llvm::Type *type, + EncodingFormat format) const { std::stringstream ss; if (type) { impl->type_to_id.clear(); - impl->EncodeType( - *remill::RecontextualizeType(type, impl->context), ss, format); + impl->EncodeType(*remill::RecontextualizeType(type, impl->context), ss, + format); } return ss.str(); } +llvm::MDNode *TypeTranslator::EncodeToMetadata(TypeSpec spec) const { + return std::visit([this](auto &&t) { return impl->TypeToMetadata(t); }, spec); +} + +TypeSpec TypeTranslator::DecodeFromMetadata(llvm::MDNode *md) const { + if (impl->md_to_type.count(md)) { + return impl->md_to_type[md]; + } + + auto &res = impl->md_to_type[md]; + auto kind = llvm::cast(md->getOperand(0).get()); + if (kind->getString().equals("BaseType")) { + auto const_value = + llvm::cast(md->getOperand(1).get()) + ->getValue(); + auto const_int = llvm::cast(const_value); + res = static_cast(const_int->getZExtValue()); + } else if (kind->getString().equals("PointerType")) { + auto ptrtype = std::make_shared(UnknownType{}, false); + res = ptrtype; + auto pointee = llvm::cast(md->getOperand(1).get()); + ptrtype->pointee = DecodeFromMetadata(pointee); + } else if (kind->getString().equals("VectorType")) { + auto vectype = std::make_shared(UnknownType{}, 0); + res = vectype; + auto base = llvm::cast(md->getOperand(1).get()); + auto const_value = + llvm::cast(md->getOperand(2).get()) + ->getValue(); + auto const_int = llvm::cast(const_value); + vectype->base = DecodeFromMetadata(base); + vectype->size = const_int->getZExtValue(); + } else if (kind->getString().equals("ArrayType")) { + auto arrtype = std::make_shared(UnknownType{}, 0); + res = arrtype; + auto base = llvm::cast(md->getOperand(1).get()); + auto const_value = + llvm::cast(md->getOperand(2).get()) + ->getValue(); + auto const_int = llvm::cast(const_value); + arrtype->base = DecodeFromMetadata(base); + arrtype->size = const_int->getZExtValue(); + } else if (kind->getString().equals("StructType")) { + auto strcttype = std::make_shared(); + res = strcttype; + for (size_t i = 1; i < md->getNumOperands(); ++i) { + strcttype->members.push_back(DecodeFromMetadata( + llvm::cast(md->getOperand(i).get()))); + } + } else if (kind->getString().equals("FunctionType")) { + auto functype = std::make_shared( + UnknownType{}, std::vector{}, false); + res = functype; + auto const_value = + llvm::cast(md->getOperand(1).get()) + ->getValue(); + auto const_int = llvm::cast(const_value); + functype->is_variadic = const_int->getZExtValue(); + functype->return_type = + DecodeFromMetadata(llvm::cast(md->getOperand(2).get())); + for (size_t i = 3; i < md->getNumOperands(); ++i) { + functype->arguments.push_back(DecodeFromMetadata( + llvm::cast(md->getOperand(i).get()))); + } + } else if (kind->getString().equals("UnknownType")) { + auto const_value = + llvm::cast(md->getOperand(1).get()) + ->getValue(); + auto const_int = llvm::cast(const_value); + res = UnknownType{static_cast(const_int->getZExtValue())}; + } + + return res; +} + // Parse an encoded type string into its represented type. Result TypeTranslator::DecodeFromSpec(TypeSpec spec) const { @@ -543,8 +763,26 @@ TypeTranslator::DecodeFromSpec(TypeSpec spec) const { unk.size == UINT32_MAX ? 32 : unk.size * 8); } + + if (std::holds_alternative(spec)) { + auto nm = std::get(spec); + auto sty = getOrCreateNamedStruct(this->impl->context, nm.name); + CHECK(sty); + return sty; + } + return TypeSpecificationError{TypeSpecificationError::ErrorCode::InvalidState, - "Function fell out of bounds"}; + "Unhandled type specification variant"}; +} + +llvm::StructType *getOrCreateNamedStruct(llvm::LLVMContext &context, + llvm::StringRef Name) { + auto res = llvm::StructType::getTypeByName(context, Name); + if (res) { + return res; + } + + return llvm::StructType::create(context, Name); } namespace { @@ -565,9 +803,9 @@ FindTypeInList(llvm::Type *query, llvm::Type *const (&types)[kSize]) { } // namespace // Convert a value to a specific type. -llvm::Value *TypeDictionary::ConvertValueToType( - llvm::IRBuilderBase &ir, llvm::Value *src_val, - llvm::Type *dest_type) const { +llvm::Value *TypeDictionary::ConvertValueToType(llvm::IRBuilderBase &ir, + llvm::Value *src_val, + llvm::Type *dest_type) const { llvm::Type *src_type = src_val->getType(); if (src_type == dest_type) { @@ -586,26 +824,26 @@ llvm::Value *TypeDictionary::ConvertValueToType( // Unpack the source type, and then try to build it into the destination // type. This dispatches to the next case. if (maybe_src_type_index && maybe_dest_type_index) { -// unsigned indexes[] = {0u}; -// auto dest_val = ir.CreateExtractValue(src_val, indexes); -// CopyMetadataTo(src_val, dest_val); -// return ConvertValueToType(ir, dest_val, dest_type); + // unsigned indexes[] = {0u}; + // auto dest_val = ir.CreateExtractValue(src_val, indexes); + // CopyMetadataTo(src_val, dest_val); + // return ConvertValueToType(ir, dest_val, dest_type); LOG(FATAL) << "TODO"; return nullptr; - // Pack this type into a destination structure type. + // Pack this type into a destination structure type. } else if (!maybe_src_type_index && maybe_dest_type_index) { LOG(FATAL) << "TODO"; return nullptr; - // Unpack this type from a source structure type. + // Unpack this type from a source structure type. } else if (maybe_src_type_index && !maybe_dest_type_index) { unsigned indexes[] = {0u}; auto dest_val = ir.CreateExtractValue(src_val, indexes); CopyMetadataTo(src_val, dest_val); return AdaptToType(ir, dest_val, dest_type); - // Raw type adaptation. + // Raw type adaptation. } else { return AdaptToType(ir, src_val, dest_type); } diff --git a/lib/Utils.cpp b/lib/Utils.cpp index c59f65b93..5902bd06d 100644 --- a/lib/Utils.cpp +++ b/lib/Utils.cpp @@ -13,20 +13,41 @@ #include #include #include +#include +#include +#include +#include +#include #include +#include +#include #include #include #include #include #include #include +#include #include #include +#include +#include +#include +#include #include +#include #include #include +#include +#include +#include +#include +#include +#include #include +#include +#include namespace anvill { @@ -157,7 +178,11 @@ llvm::Value *AdaptToType(llvm::IRBuilderBase &ir, llvm::Value *src, // If we want to change the type of a load, then we can change the type of // the loaded pointer. + // TODO(Ian): I think this might be buggy through recursion: + // we set the IP to something above the load so we now arent inserting where we expect to... if (auto li = llvm::dyn_cast(src)) { + auto blk = ir.GetInsertBlock(); + auto preip = ir.GetInsertPoint(); ir.SetInsertPoint(li); auto loaded_ptr = AdaptToType( ir, li->getPointerOperand(), @@ -168,6 +193,8 @@ llvm::Value *AdaptToType(llvm::IRBuilderBase &ir, llvm::Value *src, new_li->setAtomic(li->getOrdering(), li->getSyncScopeID()); new_li->setAlignment(li->getAlign()); CopyMetadataTo(li, new_li); + ir.SetInsertPoint(blk, preip); + return new_li; } @@ -217,6 +244,34 @@ std::string CreateVariableName(std::uint64_t addr) { return ss.str(); } +std::optional GetMetadata(llvm::StringRef tag, + const llvm::Instruction &instr) { + if (auto *metadata = instr.getMetadata(tag)) { + for (const auto &op : metadata->operands()) { + if (auto *md = dyn_cast(op.get())) { + if (auto c = dyn_cast(md->getValue())) { + auto pc_val = c->getValue().getZExtValue(); + return pc_val; + } + } + } + } + + return {}; +} + +void SetMetadata(llvm::StringRef tag, llvm::Instruction &insn, + std::uint64_t pc_val) { + auto &context = insn.getContext(); + auto &dl = insn.getModule()->getDataLayout(); + auto *address_type = + llvm::Type::getIntNTy(context, dl.getPointerSizeInBits(0)); + auto *cam = llvm::ConstantAsMetadata::get( + llvm::ConstantInt::get(address_type, pc_val)); + auto *node = llvm::MDNode::get(insn.getContext(), cam); + insn.setMetadata(tag, node); +} + void CopyMetadataTo(llvm::Value *src, llvm::Value *dst) { if (src == dst) { return; @@ -240,118 +295,84 @@ void CopyMetadataTo(llvm::Value *src, llvm::Value *dst) { } } -// Produce one or more instructions in `in_block` to store the -// native value `native_val` into the lifted state associated -// with `decl`. -llvm::Value *StoreNativeValue(llvm::Value *native_val, const ValueDecl &decl, - const TypeDictionary &types, - const remill::IntrinsicTable &intrinsics, - llvm::BasicBlock *in_block, - llvm::Value *state_ptr, llvm::Value *mem_ptr) { - - auto func = in_block->getParent(); - auto module = func->getParent(); - auto &context = module->getContext(); - - llvm::Type *decl_type = remill::RecontextualizeType(decl.type, context); - - CHECK_EQ(module, intrinsics.read_memory_8->getParent()); - CHECK_EQ(native_val->getType(), decl_type); +void CloneIntrinsicsFromModule(llvm::Module &from, llvm::Module &into) { + //CHECK(&from.getContext() == &into.getContext()); + auto func = from.getFunction("__remill_intrinsics"); + if (!func) { + LOG(FATAL) << "No intrinsics bundle in module"; + } - // Store it to a register. - if (decl.reg) { - auto reg_type = remill::RecontextualizeType(decl.reg->type, context); - auto ptr_to_reg = decl.reg->AddressOf(state_ptr, in_block); - llvm::IRBuilder<> ir(in_block); - if (decl_type != reg_type) { - ir.CreateStore(llvm::Constant::getNullValue(reg_type), ptr_to_reg); - } + if (into.getFunction("__remill_intrinsics")) { + return; + } - llvm::StoreInst *store = nullptr; + auto nfunc = llvm::Function::Create( + llvm::cast(remill::RecontextualizeType( + func->getFunctionType(), into.getContext())), + llvm::GlobalValue::ExternalLinkage, func->getName(), into); - auto ipoint = ir.GetInsertPoint(); - auto iblock = ir.GetInsertBlock(); - auto adapted_val = types.ConvertValueToType(ir, native_val, reg_type); - ir.SetInsertPoint(iblock, ipoint); + remill::CloneFunctionInto(func, nfunc); +} - if (adapted_val) { - store = ir.CreateStore(adapted_val, ptr_to_reg); +void StoreNativeValueToRegister(llvm::Value *native_val, + const remill::Register *reg, + const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + llvm::IRBuilder<> &ir, llvm::Value *state_ptr) { + auto func = ir.GetInsertBlock()->getParent(); + auto module = func->getParent(); + auto &context = module->getContext(); - } else { - auto ptr = ir.CreateBitCast(ptr_to_reg, - llvm::PointerType::get(ir.getContext(), 0)); - CopyMetadataTo(native_val, ptr); - store = ir.CreateStore(native_val, ptr); - } - CopyMetadataTo(native_val, store); + auto reg_type = remill::RecontextualizeType(reg->type, context); + auto ptr_to_reg = reg->AddressOf(state_ptr, ir); - return mem_ptr; + llvm::StoreInst *store = nullptr; - // Store it to memory. - } else if (decl.mem_reg) { - auto mem_reg_type = - remill::RecontextualizeType(decl.mem_reg->type, context); - auto ptr_to_reg = decl.mem_reg->AddressOf(state_ptr, in_block); + auto adapted_val = types.ConvertValueToType(ir, native_val, reg_type); - llvm::IRBuilder<> ir(in_block); - llvm::Value *addr = ir.CreateLoad(mem_reg_type, ptr_to_reg); - CopyMetadataTo(native_val, addr); + if (adapted_val) { + store = ir.CreateStore(adapted_val, ptr_to_reg); - if (0ll < decl.mem_offset) { - addr = ir.CreateAdd( - addr, llvm::ConstantInt::get( - mem_reg_type, static_cast(decl.mem_offset), - false)); - CopyMetadataTo(native_val, addr); - - } else if (0ll > decl.mem_offset) { - addr = ir.CreateSub( - addr, llvm::ConstantInt::get( - mem_reg_type, static_cast(-decl.mem_offset), - false)); - CopyMetadataTo(native_val, addr); - } - - return remill::StoreToMemory(intrinsics, in_block, native_val, mem_ptr, - addr); - - // Store to memory at an absolute offset. - } else if (decl.mem_offset) { - llvm::IRBuilder<> ir(in_block); - const auto addr = llvm::ConstantInt::get( - remill::NthArgument(intrinsics.read_memory_8, 1u)->getType(), - static_cast(decl.mem_offset), false); - return remill::StoreToMemory(intrinsics, in_block, native_val, mem_ptr, - addr); } else { - return llvm::UndefValue::get(mem_ptr->getType()); + auto ptr = ir.CreateBitCast(ptr_to_reg, + llvm::PointerType::get(ir.getContext(), 0)); + CopyMetadataTo(native_val, ptr); + store = ir.CreateStore(native_val, ptr); } + CopyMetadataTo(native_val, store); +} + +void StoreNativeValueToRegister(llvm::Value *native_val, + const remill::Register *reg, + const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + llvm::BasicBlock *in_block, + llvm::Value *state_ptr) { + llvm::IRBuilder<> ir(in_block); + StoreNativeValueToRegister(native_val, reg, types, intrinsics, ir, state_ptr); } -llvm::Value *LoadLiftedValue(const ValueDecl &decl, const TypeDictionary &types, - const remill::IntrinsicTable &intrinsics, - llvm::BasicBlock *in_block, llvm::Value *state_ptr, - llvm::Value *mem_ptr) { - auto func = in_block->getParent(); +llvm::Value *LoadSubcomponent(const LowLoc &loc, llvm::Type *target_type, + const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + llvm::IRBuilder<> &ir, llvm::Value *state_ptr, + llvm::Value *mem_ptr) { + auto func = ir.GetInsertBlock()->getParent(); auto module = func->getParent(); auto &context = module->getContext(); CHECK_EQ(module, intrinsics.read_memory_8->getParent()); - llvm::Type *decl_type = remill::RecontextualizeType(decl.type, context); + llvm::Type *decl_type = remill::RecontextualizeType(target_type, context); // Load it out of a register. - if (decl.reg) { - auto reg_type = remill::RecontextualizeType(decl.reg->type, context); - auto ptr_to_reg = decl.reg->AddressOf(state_ptr, in_block); - llvm::IRBuilder<> ir(in_block); + if (loc.reg) { + auto reg_type = remill::RecontextualizeType(loc.reg->type, context); + auto ptr_to_reg = loc.reg->AddressOf(state_ptr, ir); auto reg = ir.CreateLoad(reg_type, ptr_to_reg); CopyMetadataTo(mem_ptr, reg); - auto ipoint = ir.GetInsertPoint(); - auto iblock = ir.GetInsertBlock(); auto adapted_val = types.ConvertValueToType(ir, reg, decl_type); - ir.SetInsertPoint(iblock, ipoint); if (adapted_val) { return adapted_val; @@ -365,53 +386,270 @@ llvm::Value *LoadLiftedValue(const ValueDecl &decl, const TypeDictionary &types, } // Load it out of memory. + } else if (loc.mem_reg) { + auto mem_reg_type = remill::RecontextualizeType(loc.mem_reg->type, context); + auto ptr_to_reg = loc.mem_reg->AddressOf(state_ptr, ir); + llvm::Value *addr = ir.CreateLoad(mem_reg_type, ptr_to_reg); + CopyMetadataTo(mem_ptr, addr); + if (0ll < loc.mem_offset) { + addr = ir.CreateAdd( + addr, + llvm::ConstantInt::get( + mem_reg_type, static_cast(loc.mem_offset), false)); + CopyMetadataTo(mem_ptr, addr); + + } else if (0ll > loc.mem_offset) { + addr = ir.CreateSub( + addr, llvm::ConstantInt::get( + mem_reg_type, static_cast(-loc.mem_offset), + false)); + CopyMetadataTo(mem_ptr, addr); + } + + if (addr->getType() != loc.mem_reg->arch->AddressType()) { + addr = AdaptToType(ir, addr, loc.mem_reg->arch->AddressType()); + } + + auto val = remill::LoadFromMemory(intrinsics, ir, decl_type, mem_ptr, addr); + + return types.ConvertValueToType(ir, val, decl_type); + + // Store to memory at an absolute offset. + } else if (loc.mem_offset) { + const auto addr = llvm::ConstantInt::get( + remill::NthArgument(intrinsics.read_memory_8, 1u)->getType(), + static_cast(loc.mem_offset), false); + auto val = remill::LoadFromMemory(intrinsics, ir, decl_type, mem_ptr, addr); + + CopyMetadataTo(mem_ptr, val); + return types.ConvertValueToType(ir, val, decl_type); + + } else { + DLOG(ERROR) << "Unable to load lifted value of type: " + << remill::LLVMThingToString(target_type); + return llvm::UndefValue::get(decl_type); + } +} + + +llvm::Value *StoreSubcomponent(llvm::Value *native_sub, const LowLoc &decl, + const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + llvm::IRBuilder<> &ir, llvm::Value *state_ptr, + llvm::Value *mem_ptr) { + + llvm::LLVMContext &context = state_ptr->getContext(); + // Store it to a register. + if (decl.reg) { + StoreNativeValueToRegister(native_sub, decl.reg, types, intrinsics, ir, + state_ptr); + return mem_ptr; + + // Store it to memory. } else if (decl.mem_reg) { auto mem_reg_type = remill::RecontextualizeType(decl.mem_reg->type, context); - auto ptr_to_reg = decl.mem_reg->AddressOf(state_ptr, in_block); - llvm::IRBuilder<> ir(in_block); + auto ptr_to_reg = decl.mem_reg->AddressOf(state_ptr, ir); + llvm::Value *addr = ir.CreateLoad(mem_reg_type, ptr_to_reg); - CopyMetadataTo(mem_ptr, addr); + CopyMetadataTo(native_sub, addr); + + if (0ll < decl.mem_offset) { addr = ir.CreateAdd( addr, llvm::ConstantInt::get( mem_reg_type, static_cast(decl.mem_offset), false)); - CopyMetadataTo(mem_ptr, addr); + CopyMetadataTo(native_sub, addr); } else if (0ll > decl.mem_offset) { addr = ir.CreateSub( addr, llvm::ConstantInt::get( mem_reg_type, static_cast(-decl.mem_offset), false)); - CopyMetadataTo(mem_ptr, addr); + CopyMetadataTo(native_sub, addr); } - auto val = - remill::LoadFromMemory(intrinsics, in_block, decl_type, mem_ptr, addr); - ir.SetInsertPoint(in_block); - return types.ConvertValueToType(ir, val, decl_type); + if (addr->getType() != decl.mem_reg->arch->AddressType()) { + addr = AdaptToType(ir, addr, decl.mem_reg->arch->AddressType()); + } + + return remill::StoreToMemory(intrinsics, ir, native_sub, mem_ptr, addr); // Store to memory at an absolute offset. } else if (decl.mem_offset) { - llvm::IRBuilder<> ir(in_block); const auto addr = llvm::ConstantInt::get( remill::NthArgument(intrinsics.read_memory_8, 1u)->getType(), static_cast(decl.mem_offset), false); - auto val = - remill::LoadFromMemory(intrinsics, in_block, decl_type, mem_ptr, addr); + return remill::StoreToMemory(intrinsics, ir, native_sub, mem_ptr, addr); - CopyMetadataTo(mem_ptr, val); - ir.SetInsertPoint(in_block); - return types.ConvertValueToType(ir, val, decl_type); + } else { + return llvm::UndefValue::get(mem_ptr->getType()); + } +} +llvm::Value *ExtractSubcomponent(unsigned int elem, llvm::Type *dest_type, + llvm::Value *native_val, + llvm::Type *native_type, + llvm::IRBuilder<> &ir) { + auto i32 = llvm::IntegerType::getInt32Ty(native_val->getContext()); + return ir.CreateLoad(dest_type, + ir.CreateGEP(native_type, native_val, + {llvm::ConstantInt::get(i32, 0), + llvm::ConstantInt::get(i32, elem)})); +} + + +llvm::IntegerType *LocType(const LowLoc &loc, llvm::LLVMContext &cont) { + return llvm::IntegerType::get(cont, loc.Size() * 8); +} + +llvm::StructType *CreateDeclSty(const std::vector &lowlocs, + llvm::LLVMContext &cont) { + + std::vector tys; + std::transform(lowlocs.begin(), lowlocs.end(), std::back_inserter(tys), + [&cont](const LowLoc &loc) -> llvm::Type * { + return LocType(loc, cont); + }); + return llvm::StructType::get(cont, tys, true); +} + +llvm::Value *StoreNativeValue(llvm::Value *native_val, const ValueDecl &decl, + const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + llvm::IRBuilder<> &ir, llvm::Value *state_ptr, + llvm::Value *mem_ptr) { + + auto func = ir.GetInsertBlock()->getParent(); + auto module = func->getParent(); + auto &context = module->getContext(); + + llvm::Type *decl_type = remill::RecontextualizeType(decl.type, context); + + CHECK_EQ(module, intrinsics.read_memory_8->getParent()); + CHECK_EQ(native_val->getType(), decl_type); + + if (decl.ordered_locs.size() == 1) { + return StoreSubcomponent(native_val, decl.ordered_locs.at(0), types, + intrinsics, ir, state_ptr, mem_ptr); } else { - DLOG(ERROR) << "Unable to load lifted value of type: " - << remill::LLVMThingToString(decl.type); - return llvm::UndefValue::get(decl_type); + + unsigned int ind = 0; + + auto sty = CreateDeclSty(decl.ordered_locs, context); + auto curr_val = ir.CreateAlloca(sty); + + ir.CreateStore(native_val, curr_val); + auto mem = mem_ptr; + for (const auto &comp : decl.ordered_locs) { + auto compvl = + ExtractSubcomponent(ind, LocType(comp, context), curr_val, sty, ir); + mem = StoreSubcomponent(compvl, comp, types, intrinsics, ir, state_ptr, + mem); + ind++; + } + + return mem; } } +// Produce one or more instructions in `in_block` to store the +// native value `native_val` into the lifted state associated +// with `decl`. +llvm::Value *StoreNativeValue(llvm::Value *native_val, const ValueDecl &decl, + const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + llvm::BasicBlock *in_block, + llvm::Value *state_ptr, llvm::Value *mem_ptr) { + + llvm::IRBuilder<> ir(in_block); + return StoreNativeValue(native_val, decl, types, intrinsics, ir, state_ptr, + mem_ptr); +} + + +std::optional +GetSubcomponentType(const LowLoc &loc, uint64_t offset, llvm::Type *target_type, + llvm::DataLayout &data) { + // there's two situations here, either we have a primitive target type in which case the loc must + // indicate the size for each component, otherwise we decompose the target + if (target_type->isIntegerTy() || target_type->isFloatingPointTy()) { + return llvm::IntegerType::get(target_type->getContext(), loc.Size() * 8); + } else { + llvm::Type *ty = target_type; + llvm::APInt off(64, offset); + auto ind = data.getGEPIndexForOffset(ty, off); + + if (ind) { + return ty; + } + } + + return std::nullopt; +} + + +llvm::Value *BuildMultiComponentValue(llvm::IRBuilder<> &ir, + const std::vector comps, + llvm::Type *sty, llvm::Type *target_type, + llvm::DataLayout &dl) { + auto i32_type = llvm::Type::getInt32Ty(sty->getContext()); + auto storage = ir.CreateAlloca(sty); + uint64_t ind = 0; + for (auto c : comps) { + ir.CreateStore(c, ir.CreateGEP(sty, storage, + {llvm::ConstantInt::get(i32_type, 0), + llvm::ConstantInt::get(i32_type, ind)})); + ind += 1; + } + + return ir.CreateLoad(target_type, storage); +} + + +llvm::Value *LoadLiftedValue(const ValueDecl &decl, const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + const remill::Arch *arch, llvm::IRBuilder<> &ir, + llvm::Value *state_ptr, llvm::Value *mem_ptr) { + if (decl.ordered_locs.size() == 1) { + return LoadSubcomponent(decl.ordered_locs[0], decl.type, types, intrinsics, + ir, state_ptr, mem_ptr); + } else { + uint64_t offset = 0; + std::vector comps; + auto dl = arch->DataLayout(); + + for (const auto &loc : decl.ordered_locs) { + + auto subty = GetSubcomponentType(loc, offset, decl.type, dl); + if (!subty) { + LOG(ERROR) << "Lifted value undef because no subcomponent for " + << remill::LLVMThingToString(decl.type) << " at offset " + << offset; + return llvm::UndefValue::get(decl.type); + } + comps.push_back(LoadSubcomponent(loc, *subty, types, intrinsics, ir, + state_ptr, mem_ptr)); + + offset += loc.Size(); + } + auto sty = CreateDeclSty(decl.ordered_locs, state_ptr->getContext()); + return BuildMultiComponentValue(ir, comps, sty, decl.type, dl); + } +} + + +llvm::Value *LoadLiftedValue(const ValueDecl &decl, const TypeDictionary &types, + const remill::IntrinsicTable &intrinsics, + const remill::Arch *arch, + llvm::BasicBlock *in_block, llvm::Value *state_ptr, + llvm::Value *mem_ptr) { + + llvm::IRBuilder ir(in_block); + return LoadLiftedValue(decl, types, intrinsics, arch, ir, state_ptr, mem_ptr); +} + namespace { // Returns `true` if `reg_name` appears to be the name of the stack pointer @@ -433,6 +671,7 @@ static bool IsStackPointerRegName(llvm::Module *module, case llvm::Triple::ArchType::sparcel: case llvm::Triple::ArchType::sparcv9: return reg_name == "o6" || reg_name == "sp"; + case llvm::Triple::ArchType::ppc: return reg_name == "r1"; default: return false; } } @@ -450,7 +689,8 @@ static bool IsProgramCounterRegName(llvm::Module *module, return reg_name == "pc" || reg_name == "wpc"; case llvm::Triple::ArchType::aarch64_32: case llvm::Triple::ArchType::arm: - case llvm::Triple::ArchType::armeb: return reg_name == "pc"; + case llvm::Triple::ArchType::armeb: + case llvm::Triple::ArchType::ppc: return reg_name == "pc"; case llvm::Triple::ArchType::sparc: case llvm::Triple::ArchType::sparcel: case llvm::Triple::ArchType::sparcv9: @@ -508,13 +748,27 @@ class StackPointerResolverImpl { public: bool ResolveFromValue(llvm::Value *val); bool ResolveFromConstantExpr(llvm::ConstantExpr *ce); + bool IsStackPointerBase(llvm::Value *canidate); - inline explicit StackPointerResolverImpl(llvm::Module *m) : module(m) {} + inline explicit StackPointerResolverImpl( + llvm::Module *m, llvm::ArrayRef additional_base_stack_ptrs) + : module(m) { + this->stack_related_args.insert(additional_base_stack_ptrs.begin(), + additional_base_stack_ptrs.end()); + } llvm::Module *const module; std::unordered_map cache; + + llvm::SmallSet stack_related_args; }; +bool StackPointerResolverImpl::IsStackPointerBase(llvm::Value *canidate) { + return IsStackPointer(module, canidate) || + (this->stack_related_args.find(canidate) != + this->stack_related_args.end()); +} + bool StackPointerResolverImpl::ResolveFromValue(llvm::Value *val) { // Lookup the cache and return the value if it exist @@ -546,7 +800,7 @@ bool StackPointerResolverImpl::ResolveFromValue(llvm::Value *val) { val3 && val3 != val) { result = ResolveFromValue(val3); } else { - result = IsStackPointer(module, val); + result = this->IsStackPointerBase(val); } } @@ -593,8 +847,10 @@ bool StackPointerResolverImpl::ResolveFromConstantExpr(llvm::ConstantExpr *ce) { } StackPointerResolver::~StackPointerResolver(void) {} -StackPointerResolver::StackPointerResolver(llvm::Module *module) - : impl(new StackPointerResolverImpl(module)) {} +StackPointerResolver::StackPointerResolver( + llvm::Module *module, + llvm::ArrayRef additional_base_stack_ptrs) + : impl(new StackPointerResolverImpl(module, additional_base_stack_ptrs)) {} // Returns `true` if it looks like `val` is derived from a symbolic stack // pointer representation. @@ -603,7 +859,7 @@ bool StackPointerResolver::IsRelatedToStackPointer(llvm::Value *val) const { } bool IsRelatedToStackPointer(llvm::Module *module, llvm::Value *val) { - StackPointerResolverImpl impl(module); + StackPointerResolverImpl impl(module, {}); return impl.ResolveFromValue(val); } @@ -741,4 +997,29 @@ bool CanBeAliased(llvm::Value *val) { } } +std::optional GetBasicBlockUid(llvm::Function *func) { + auto meta = func->getMetadata(kBasicBlockUidMetadata); + if (!meta) { + return std::nullopt; + } + + auto v = llvm::cast(meta->getOperand(0))->getValue(); + + return Uid{llvm::cast(v)->getLimitedValue()}; +} + +llvm::Argument *GetBasicBlockStackPtr(llvm::Function *func) { + return func->getArg(0); +} + +bool HasMemLoc(const ValueDecl &v) { + return std::any_of(v.ordered_locs.begin(), v.ordered_locs.end(), + [](const LowLoc &loc) -> bool { return loc.mem_reg; }); +} + +bool HasRegLoc(const ValueDecl &v) { + return std::any_of(v.ordered_locs.begin(), v.ordered_locs.end(), + [](const LowLoc &loc) -> bool { return loc.reg; }); +} + } // namespace anvill diff --git a/libraries/lifting-tools-ci b/libraries/lifting-tools-ci index 1785bf696..95b0aa262 160000 --- a/libraries/lifting-tools-ci +++ b/libraries/lifting-tools-ci @@ -1 +1 @@ -Subproject commit 1785bf69687b24b2611bbf5fdc4fb5caa4c4fdb3 +Subproject commit 95b0aa2621908df4982a18b02ef50fcb94d1044c diff --git a/remill b/remill index a8ead7b58..874490a89 160000 --- a/remill +++ b/remill @@ -1 +1 @@ -Subproject commit a8ead7b58496f0fc90100eee67de1eee74cdc8c3 +Subproject commit 874490a894c5c8f0920af0fb583ca500abc5d65d diff --git a/scripts/build.sh b/scripts/build.sh index 438a74ad2..6acae60e0 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -25,8 +25,8 @@ CURR_DIR=$( pwd ) BUILD_DIR="${CURR_DIR}/anvill-build" REMILL_BUILD_DIR="${CURR_DIR}/remill-build" INSTALL_DIR=/usr/local -LLVM_VERSION=llvm-15 -CXX_COMMON_VERSION="0.2.12" +LLVM_VERSION=llvm-17 +CXX_COMMON_VERSION="0.6.0" OS_VERSION=unknown ARCH_VERSION=unknown BUILD_FLAGS= @@ -175,11 +175,14 @@ function DownloadLibraries #BUILD_FLAGS="${BUILD_FLAGS} -DCMAKE_OSX_SYSROOT=${sdk_root}" # Min version supported - OS_VERSION="macos-11" - XCODE_VERSION="13.0" - if [[ "$(sw_vers -productVersion)" == "11."* ]]; then - echo "Found MacOS Big Sur" - OS_VERSION="macos-11" + OS_VERSION="macos-13" + XCODE_VERSION="15.0" + if [[ "${SYSTEM_VERSION}" == "13.*" ]]; then + echo "Found MacOS Ventura" + OS_VERSION="macos-13" + elif [[ "${SYSTEM_VERSION}" == "12.*" ]]; then + echo "Found MacOS Monterey" + OS_VERSION="macos-12" else echo "WARNING: ****Likely unsupported MacOS Version****" echo "WARNING: ****Using ${OS_VERSION}****" @@ -244,6 +247,7 @@ function BuildRemill -DCMAKE_TOOLCHAIN_FILE="${DOWNLOAD_DIR}/${LIBRARY_VERSION}/scripts/buildsystems/vcpkg.cmake" \ -DVCPKG_TARGET_TRIPLET="${VCPKG_TARGET_TRIPLET}" \ -G Ninja \ + ${BUILD_FLAGS} \ ${SRC_DIR}/remill cmake --build . --target install @@ -338,12 +342,8 @@ function Package function GetLLVMVersion { case ${1} in - 14) - LLVM_VERSION=llvm-14 - return 0 - ;; - 15) - LLVM_VERSION=llvm-15 + 17) + LLVM_VERSION=llvm-17 return 0 ;; *) @@ -361,7 +361,7 @@ function Help echo "" echo "Options:" echo " --prefix Change the default (${INSTALL_DIR}) installation prefix." - echo " --llvm-version Change the default (15) LLVM version." + echo " --llvm-version Change the default (17) LLVM version." echo " --build-dir Change the default (${BUILD_DIR}) build directory." echo " --debug Build with Debug symbols." echo " --extra-cmake-args Extra CMake arguments to build with." diff --git a/scripts/run-on-anghabench.sh b/scripts/run-on-anghabench.sh index a8be0000f..b2917155b 100644 --- a/scripts/run-on-anghabench.sh +++ b/scripts/run-on-anghabench.sh @@ -7,7 +7,7 @@ export BINJA_DECODE_KEY=__BINJA_DECODE_KEY__ export BINJA_CHANNEL=__BINJA_CHANNEL__ export BINJA_VERSION=__BINJA_VERSION__ -export LLVM_VERSION=14 +export LLVM_VERSION=16 export CC=clang-13 CXX=clang++-13 dpkg --add-architecture i386 diff --git a/scripts/test-amp-challenge-bins.sh b/scripts/test-amp-challenge-bins.sh index 87a9c61f2..29339c55f 100755 --- a/scripts/test-amp-challenge-bins.sh +++ b/scripts/test-amp-challenge-bins.sh @@ -11,6 +11,7 @@ function Help echo "Options:" echo " --ghidra-install-dir The ghidra install dir. Default ${GHIDRA_INSTALL_DIR}" echo " --decompile-cmd The anvill decompile command to invoke. Default ${ANVILL_DECOMPILE}" + echo " --jobs The number of jobs that can run concurrently. Defaults to system's CPU count" echo " -h --help Print help." } @@ -72,6 +73,12 @@ while [[ $# -gt 0 ]] ; do shift # past argument ;; + # How many concurrent jobs + --jobs) + NUM_JOBS=${2} + shift # past argument + ;; + *) # unknown option echo "[x] Unknown option: ${key}" @@ -90,7 +97,7 @@ then fi if ! ${ANVILL_DECOMPILE} --version &>/dev/null; -then +then echo "[!] Could not execute anvill decompile cmd: ${ANVILL_DECOMPILE}" exit 1 fi @@ -109,18 +116,23 @@ do done FAILED="no" -for dir in challenge-binaries +for dir in binaries do echo "[+] Testing ${dir}" - ${SRC_DIR}/libraries/lifting-tools-ci/tool_run_scripts/anvill.py \ - --ghidra-install-dir "${GHIDRA_INSTALL_DIR}" \ - --anvill-decompile "${ANVILL_DECOMPILE}" \ - --input-dir "$(pwd)/${dir}" \ - --output-dir "$(pwd)/results/${dir}" \ - --run-name "anvill-live-ci-amp-bins" \ - --test-options "${SRC_DIR}/ci/challenge_bins_test_settings.json" \ - --dump-stats \ + args=( + --ghidra-install-dir "${GHIDRA_INSTALL_DIR}" + --anvill-decompile "${ANVILL_DECOMPILE}" + --input-dir "$(pwd)/${dir}" + --output-dir "$(pwd)/results/${dir}" + --run-name "anvill-live-ci-amp-bins" + --test-options "${SRC_DIR}/ci/challenge_bins_test_settings.json" + --dump-stats --dump-benchmark + ) + if [[ -v NUM_JOBS ]]; then + args+=(--jobs "${NUM_JOBS}") + fi + ${SRC_DIR}/libraries/lifting-tools-ci/tool_run_scripts/anvill.py "${args[@]}" if ! check_test "$(pwd)/results/${dir}/python/stats.json" diff --git a/scripts/test-angha-1k.sh b/scripts/test-angha-50.sh similarity index 78% rename from scripts/test-angha-1k.sh rename to scripts/test-angha-50.sh index d040d782f..c88a81a0c 100755 --- a/scripts/test-angha-1k.sh +++ b/scripts/test-angha-50.sh @@ -1,16 +1,16 @@ #!/bin/bash DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) SRC_DIR=$( cd "$( dirname "${DIR}" )" && pwd ) - -ANVILL_PYTHON="python3 -m anvill" +GHIDRA_INSTALL_DIR="~/ghidra_10.1.5_PUBLIC/" ANVILL_DECOMPILE="anvill-decompile-spec" function Help { - echo "Run Anvill on AnghaBech-1K" + echo "Run Anvill on AnghaBench-50" echo "" echo "Options:" - echo " --python-cmd The anvill Python command to invoke. Default ${ANVILL_PYTHON}" + echo " --ghidra-install-dir The ghidra install dir. Default ${GHIDRA_INSTALL_DIR}" echo " --decompile-cmd The anvill decompile command to invoke. Default ${ANVILL_DECOMPILE}" + echo " --jobs The number of jobs that can run concurrently. Defaults to system's CPU count" echo " -h --help Print help." } @@ -60,16 +60,22 @@ while [[ $# -gt 0 ]] ; do exit 0 ;; - # Anvill python cmd - --python-cmd) - ANVILL_PYTHON=${2} + --ghidra-install-dir) + GHIDRA_INSTALL_DIR=${2} shift # past argument ;; + # How large of a run to get --decompile-cmd) ANVILL_DECOMPILE=${2} shift # past argument + ;; + + # How many concurrent jobs + --jobs) + NUM_JOBS=${2} + shift # past argument ;; *) @@ -89,24 +95,19 @@ then exit 1 fi -if ! ${ANVILL_PYTHON} --help &>/dev/null; -then - echo "[!] Could not execute anvill python cmd: ${ANVILL_PYTHON}" - exit 1 -fi if ! ${ANVILL_DECOMPILE} --version &>/dev/null; -then +then echo "[!] Could not execute anvill decompile cmd: ${ANVILL_DECOMPILE}" exit 1 fi # create a working directory -mkdir -p angha-test-1k -pushd angha-test-1k +mkdir -p angha-test-50 +pushd angha-test-50 -# fetch the test set: 1K binaries (per arch) -${SRC_DIR}/libraries/lifting-tools-ci/datasets/fetch_anghabench.sh --run-size 1k --binaries +# fetch the test set: 50 binaries (per arch) +${SRC_DIR}/libraries/lifting-tools-ci/datasets/fetch_anghabench.sh --run-size 50 --binaries # extract it for tarfile in *.tar.xz do @@ -117,14 +118,20 @@ FAILED="no" for arch in $(ls -1 binaries/) do echo "[+] Testing architecture ${arch}" - ${SRC_DIR}/libraries/lifting-tools-ci/tool_run_scripts/anvill.py \ - --anvill-python "${ANVILL_PYTHON}" \ + args=( + --ghidra-install-dir "${GHIDRA_INSTALL_DIR}" \ --anvill-decompile "${ANVILL_DECOMPILE}" \ --input-dir "$(pwd)/binaries/${arch}" \ --output-dir "$(pwd)/results/${arch}" \ --run-name "anvill-live-ci-${arch}" \ - --test-options "${SRC_DIR}/ci/angha_1k_test_settings.json" \ + --test-options "${SRC_DIR}/ci/angha_50_test_settings.json" \ --dump-stats + ) + if [[ -v NUM_JOBS ]]; then + args+=(--jobs "${NUM_JOBS}") + fi + ${SRC_DIR}/libraries/lifting-tools-ci/tool_run_scripts/anvill.py "${args[@]}" + if ! check_test "$(pwd)/results/${arch}/python/stats.json" diff --git a/tests/anvill_passes/CMakeLists.txt b/tests/anvill_passes/CMakeLists.txt index c26c97728..56a7bdeb4 100644 --- a/tests/anvill_passes/CMakeLists.txt +++ b/tests/anvill_passes/CMakeLists.txt @@ -12,17 +12,17 @@ add_executable(test_anvill_passes src/Utils.h src/Utils.cpp - src/RecoverStackFrameInformation.cpp src/SinkSelectionsIntoBranchTargets.cpp src/SplitStackFrameAtReturnAddress.cpp src/InstructionFolderPass.cpp src/BrightenPointers.cpp src/TransformRemillJump.cpp - src/SwitchLoweringPass.cpp src/XorConversionPass.cpp src/BranchRecoveryPass.cpp src/RemoveStackPointerCExprs.cpp src/RecoverEntityUses.cpp + src/TestAbstractStackBB.cpp + src/VectorRW.cpp ) target_link_libraries(test_anvill_passes PRIVATE @@ -43,6 +43,6 @@ target_include_directories(test_anvill_passes PRIVATE add_test( NAME test_anvill_passes - COMMAND "$" + COMMAND test_anvill_passes WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" ) diff --git a/tests/anvill_passes/data/MainBasicBlocks.ll b/tests/anvill_passes/data/MainBasicBlocks.ll new file mode 100644 index 000000000..c69340898 --- /dev/null +++ b/tests/anvill_passes/data/MainBasicBlocks.ll @@ -0,0 +1,5036 @@ +; ModuleID = 'lifted_code' +source_filename = "lifted_code" +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu-elf" + +%struct.State = type { %struct.X86State } +%struct.X86State = type { %struct.ArchState, [32 x %union.VectorReg], %struct.ArithFlags, %union.anon, %struct.Segments, %struct.AddressSpace, %struct.GPR, %struct.X87Stack, %struct.MMX, %struct.FPUStatusFlags, %union.anon, %union.FPU, %struct.SegmentCaches, %struct.K_REG } +%struct.ArchState = type { i32, i32, %union.anon } +%union.VectorReg = type { %union.vec512_t } +%union.vec512_t = type { %struct.uint64v8_t } +%struct.uint64v8_t = type { [8 x i64] } +%struct.ArithFlags = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } +%struct.Segments = type { i16, %union.SegmentSelector, i16, %union.SegmentSelector, i16, %union.SegmentSelector, i16, %union.SegmentSelector, i16, %union.SegmentSelector, i16, %union.SegmentSelector } +%union.SegmentSelector = type { i16 } +%struct.AddressSpace = type { i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg } +%struct.Reg = type { %union.anon } +%struct.GPR = type { i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg, i64, %struct.Reg } +%struct.X87Stack = type { [8 x %struct.anon.3] } +%struct.anon.3 = type { [6 x i8], %struct.float80_t } +%struct.float80_t = type { [10 x i8] } +%struct.MMX = type { [8 x %struct.anon.4] } +%struct.anon.4 = type { i64, %union.vec64_t } +%union.vec64_t = type { %struct.uint64v1_t } +%struct.uint64v1_t = type { [1 x i64] } +%struct.FPUStatusFlags = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, [4 x i8] } +%union.anon = type { i64 } +%union.FPU = type { %struct.anon.13 } +%struct.anon.13 = type { %struct.FpuFXSAVE, [96 x i8] } +%struct.FpuFXSAVE = type { %union.SegmentSelector, %union.SegmentSelector, %union.FPUAbridgedTagWord, i8, i16, i32, %union.SegmentSelector, i16, i32, %union.SegmentSelector, i16, %union.FPUControlStatus, %union.FPUControlStatus, [8 x %struct.FPUStackElem], [16 x %union.vec128_t] } +%union.FPUAbridgedTagWord = type { i8 } +%union.FPUControlStatus = type { i32 } +%struct.FPUStackElem = type { %union.anon.11, [6 x i8] } +%union.anon.11 = type { %struct.float80_t } +%union.vec128_t = type { %struct.uint128v1_t } +%struct.uint128v1_t = type { [1 x i128] } +%struct.SegmentCaches = type { %struct.SegmentShadow, %struct.SegmentShadow, %struct.SegmentShadow, %struct.SegmentShadow, %struct.SegmentShadow, %struct.SegmentShadow } +%struct.SegmentShadow = type { %union.anon, i32, i32 } +%struct.K_REG = type { [8 x %struct.anon.18] } +%struct.anon.18 = type { i64, i64 } + +@__anvill_reg_RAX = external local_unnamed_addr global i64 +@__anvill_reg_RBX = external local_unnamed_addr global i64 +@__anvill_reg_RCX = external local_unnamed_addr global i64 +@__anvill_reg_RDX = external local_unnamed_addr global i64 +@__anvill_reg_RDI = external local_unnamed_addr global i64 +@__anvill_reg_RBP = external local_unnamed_addr global i64 +@__anvill_reg_R8 = external local_unnamed_addr global i64 +@__anvill_reg_R9 = external local_unnamed_addr global i64 +@__anvill_reg_R10 = external local_unnamed_addr global i64 +@__anvill_reg_R11 = external local_unnamed_addr global i64 +@__anvill_reg_R12 = external local_unnamed_addr global i64 +@__anvill_reg_R13 = external local_unnamed_addr global i64 +@__anvill_reg_R14 = external local_unnamed_addr global i64 +@__anvill_reg_R15 = external local_unnamed_addr global i64 +@__anvill_reg_SS = external local_unnamed_addr global i16 +@__anvill_reg_ES = external local_unnamed_addr global i16 +@__anvill_reg_GS = external local_unnamed_addr global i16 +@__anvill_reg_FS = external local_unnamed_addr global i16 +@__anvill_reg_DS = external local_unnamed_addr global i16 +@__anvill_reg_CS = external local_unnamed_addr global i16 +@__anvill_reg_XMM0 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM1 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM2 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM3 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM4 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM5 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM6 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM7 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM8 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM9 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM10 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM11 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM12 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM13 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM14 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_XMM15 = external local_unnamed_addr global [16 x i8] +@__anvill_reg_ST0 = external local_unnamed_addr global x86_fp80 +@__anvill_reg_ST1 = external local_unnamed_addr global x86_fp80 +@__anvill_reg_ST2 = external local_unnamed_addr global x86_fp80 +@__anvill_reg_ST3 = external local_unnamed_addr global x86_fp80 +@__anvill_reg_ST4 = external local_unnamed_addr global x86_fp80 +@__anvill_reg_ST5 = external local_unnamed_addr global x86_fp80 +@__anvill_reg_ST6 = external local_unnamed_addr global x86_fp80 +@__anvill_reg_ST7 = external local_unnamed_addr global x86_fp80 +@__anvill_reg_MM0 = external local_unnamed_addr global i64 +@__anvill_reg_MM1 = external local_unnamed_addr global i64 +@__anvill_reg_MM2 = external local_unnamed_addr global i64 +@__anvill_reg_MM3 = external local_unnamed_addr global i64 +@__anvill_reg_MM4 = external local_unnamed_addr global i64 +@__anvill_reg_MM5 = external local_unnamed_addr global i64 +@__anvill_reg_MM6 = external local_unnamed_addr global i64 +@__anvill_reg_MM7 = external local_unnamed_addr global i64 +@__anvill_reg_AF = external local_unnamed_addr global i8 +@__anvill_reg_CF = external local_unnamed_addr global i8 +@__anvill_reg_DF = external local_unnamed_addr global i8 +@__anvill_reg_OF = external local_unnamed_addr global i8 +@__anvill_reg_PF = external local_unnamed_addr global i8 +@__anvill_reg_SF = external local_unnamed_addr global i8 +@__anvill_reg_ZF = external local_unnamed_addr global i8 +@__anvill_ra = external global i64 +@__anvill_pc = external global i64 +@var_402020__CBx0_D = local_unnamed_addr constant [0 x i8] zeroinitializer +@var_40203a__CBx0_D = local_unnamed_addr constant [0 x i8] zeroinitializer +@var_40204d_B = local_unnamed_addr constant i8 119 +@var_40204f_B = local_unnamed_addr constant i8 37 +@var_402052_B = local_unnamed_addr constant i8 49 +@var_402057__CBx0_D = local_unnamed_addr constant [0 x i8] zeroinitializer +@var_402060__CBx0_D = local_unnamed_addr constant [0 x i8] zeroinitializer +@var_402098_B = local_unnamed_addr constant i8 67 +@var_40209c__CBx0_D = local_unnamed_addr constant [0 x i8] zeroinitializer +@var_4020b1_B = local_unnamed_addr constant i8 111 +@var_4020b3_B = local_unnamed_addr constant i8 120 +@var_4020b5__CBx0_D = local_unnamed_addr constant [0 x i8] zeroinitializer +@var_4020c4__CBx0_D = local_unnamed_addr constant [0 x i8] zeroinitializer +@__anvill_stack_0 = external local_unnamed_addr global i64 + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199049(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %EAX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !0 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %RBX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 3, i32 0, i32 0, !remill_register !2 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 3 + %2 = load i64, ptr %RBX, align 8 + %3 = inttoptr i64 %2 to ptr + %4 = load i64, ptr %3, align 8 + store i64 %4, ptr %RSI, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %5 = add i64 %program_counter, 8 + store i64 4202528, ptr %RDI, align 8, !tbaa !5 + store i64 %5, ptr %PC, align 8 + %6 = add i64 %program_counter, 10 + %7 = load i64, ptr %EAX, align 8 + %8 = load i32, ptr %EAX, align 4 + %conv.i.i = trunc i64 %7 to i32 + %xor3.i.i = xor i32 %8, %conv.i.i + %conv.i27.i = zext i32 %xor3.i.i to i64 + store i64 %conv.i27.i, ptr %EAX, align 8, !tbaa !5 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i = trunc i32 %xor3.i.i to i8 + %9 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i), !range !26 + %10 = and i8 %9, 1 + %11 = xor i8 %10, 1 + %pf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %11, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i = icmp eq i32 %xor3.i.i, 0 + %conv3.i.i = zext i1 %cmp.i.i.i to i8 + %zf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv3.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i = icmp slt i32 %xor3.i.i, 0 + %conv6.i.i = zext i1 %cmp.i19.i.i to i8 + %sf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv6.i.i, ptr %sf.i.i, align 1, !tbaa !29 + %of.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + %af.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %6, ptr %PC, align 8 + %12 = add i64 %program_counter, 15 + %13 = add i64 %program_counter, -505 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %14 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %14, -8 + %15 = inttoptr i64 %sub.i.i to ptr + store i64 %12, ptr %15, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %13, ptr %rip.i, align 8, !tbaa !5 + %16 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %12, ptr %PC, align 8 + %17 = add i64 %program_counter, 20 + store i64 1, ptr %EAX, align 8, !tbaa !5 + store i64 %17, ptr %PC, align 8 + %18 = add i64 %program_counter, 873 + store i64 %18, ptr %rip.i, align 8, !tbaa !5 + store i64 %18, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn +declare i8 @llvm.ctpop.i8(i8) #1 + +; Function Attrs: mustprogress noduplicate nofree noinline nosync nounwind optnone readnone willreturn +declare zeroext i1 @__remill_flag_computation_zero(i1 noundef zeroext, ...) local_unnamed_addr #2 + +; Function Attrs: mustprogress noduplicate nofree noinline nosync nounwind optnone readnone willreturn +declare zeroext i1 @__remill_flag_computation_sign(i1 noundef zeroext, ...) local_unnamed_addr #2 + +; Function Attrs: mustprogress noduplicate nofree noinline nosync nounwind optnone readnone willreturn +declare zeroext i8 @__remill_undefined_8() local_unnamed_addr #2 + +; Function Attrs: noduplicate noinline nounwind optnone +declare ptr @__remill_function_call(ptr noundef nonnull align 1, i64 noundef, ptr noundef) local_unnamed_addr #3 + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199174(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %RBP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 15, i32 0, i32 0, !remill_register !33 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %R14 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 29, i32 0, i32 0, !remill_register !35 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + %2 = load i64, ptr %RSP, align 8 + %3 = add i64 %2, 24 + store i64 %3, ptr %R14, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %4 = add i64 %program_counter, 10 + %5 = add i64 %2, 86 + store i64 %5, ptr %RBP, align 8, !tbaa !5 + store i64 %4, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199922(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %RBP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 15, i32 0, i32 0, !remill_register !33 + %R15 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 31, i32 0, i32 0, !remill_register !36 + %R14 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 29, i32 0, i32 0, !remill_register !35 + %R13 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 27, i32 0, i32 0, !remill_register !37 + %R12 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 25, i32 0, i32 0, !remill_register !38 + %RBX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 3, i32 0, i32 0, !remill_register !2 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 7 + %2 = load i64, ptr %RSP, align 8 + %add.i.i = add i64 %2, 248 + store i64 %add.i.i, ptr %RSP, align 8, !tbaa !5 + %cmp.i.i.i = icmp ugt i64 %2, -249 + %conv.i.i = zext i1 %cmp.i.i.i to i8 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 %conv.i.i, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i.i = trunc i64 %add.i.i to i8 + %3 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i.i), !range !26 + %4 = and i8 %3, 1 + %5 = xor i8 %4, 1 + %pf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %5, ptr %pf.i.i.i, align 1, !tbaa !27 + %6 = xor i64 %2, %add.i.i + %7 = trunc i64 %6 to i8 + %8 = xor i8 %7, -1 + %9 = lshr i8 %8, 4 + %10 = and i8 %9, 1 + %af.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 %10, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i = icmp eq i64 %add.i.i, 0 + %conv5.i.i.i = zext i1 %cmp.i.i.i.i to i8 + %zf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv5.i.i.i, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i = icmp slt i64 %add.i.i, 0 + %conv8.i.i.i = zext i1 %cmp.i27.i.i.i to i8 + %sf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv8.i.i.i, ptr %sf.i.i.i, align 1, !tbaa !29 + %shr.i.i.i.i = lshr i64 %2, 63 + %shr2.i.i.i.i = lshr i64 %add.i.i, 63 + %xor.i28.i.i.i = xor i64 %shr2.i.i.i.i, %shr.i.i.i.i + %add.i.i.i.i = add nuw nsw i64 %xor.i28.i.i.i, %shr2.i.i.i.i + %cmp.i29.i.i.i = icmp eq i64 %add.i.i.i.i, 2 + %conv11.i.i.i = zext i1 %cmp.i29.i.i.i to i8 + %of.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 %conv11.i.i.i, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %1, ptr %PC, align 8 + %11 = add i64 %program_counter, 8 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %12 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %add.i.i1 = add i64 %12, 8 + store i64 %add.i.i1, ptr %rsp.i, align 8, !tbaa !5 + %13 = inttoptr i64 %12 to ptr + %14 = load i64, ptr %13, align 8 + store i64 %14, ptr %RBX, align 8, !tbaa !5 + store i64 %11, ptr %PC, align 8 + %15 = add i64 %program_counter, 10 + %add.i.i3 = add i64 %12, 16 + store i64 %add.i.i3, ptr %rsp.i, align 8, !tbaa !5 + %16 = inttoptr i64 %add.i.i1 to ptr + %17 = load i64, ptr %16, align 8 + store i64 %17, ptr %R12, align 8, !tbaa !5 + store i64 %15, ptr %PC, align 8 + %18 = add i64 %program_counter, 12 + %add.i.i6 = add i64 %12, 24 + store i64 %add.i.i6, ptr %rsp.i, align 8, !tbaa !5 + %19 = inttoptr i64 %add.i.i3 to ptr + %20 = load i64, ptr %19, align 8 + store i64 %20, ptr %R13, align 8, !tbaa !5 + store i64 %18, ptr %PC, align 8 + %21 = add i64 %program_counter, 14 + %add.i.i9 = add i64 %12, 32 + store i64 %add.i.i9, ptr %rsp.i, align 8, !tbaa !5 + %22 = inttoptr i64 %add.i.i6 to ptr + %23 = load i64, ptr %22, align 8 + store i64 %23, ptr %R14, align 8, !tbaa !5 + store i64 %21, ptr %PC, align 8 + %24 = add i64 %program_counter, 16 + %add.i.i12 = add i64 %12, 40 + store i64 %add.i.i12, ptr %rsp.i, align 8, !tbaa !5 + %25 = inttoptr i64 %add.i.i9 to ptr + %26 = load i64, ptr %25, align 8 + store i64 %26, ptr %R15, align 8, !tbaa !5 + store i64 %24, ptr %PC, align 8 + %27 = add i64 %program_counter, 17 + %add.i.i15 = add i64 %12, 48 + store i64 %add.i.i15, ptr %rsp.i, align 8, !tbaa !5 + %28 = inttoptr i64 %add.i.i12 to ptr + %29 = load i64, ptr %28, align 8 + store i64 %29, ptr %RBP, align 8, !tbaa !5 + store i64 %27, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: mustprogress noduplicate nofree noinline nosync nounwind optnone readnone willreturn +declare zeroext i1 @__remill_flag_computation_carry(i1 noundef zeroext, ...) local_unnamed_addr #2 + +; Function Attrs: mustprogress noduplicate nofree noinline nosync nounwind optnone readnone willreturn +declare zeroext i1 @__remill_flag_computation_overflow(i1 noundef zeroext, ...) local_unnamed_addr #2 + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199673(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %RBX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 3, i32 0, i32 0, !remill_register !2 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + %2 = load i64, ptr %RSP, align 8 + %3 = add i64 %2, 8 + store i64 %3, ptr %RDI, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %4 = add i64 %program_counter, 8 + %5 = load i64, ptr %RBX, align 8 + store i64 %5, ptr %RSI, align 8, !tbaa !5 + store i64 %4, ptr %PC, align 8 + %6 = add i64 %program_counter, 13 + %7 = add i64 %program_counter, 407 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %8 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %8, -8 + %9 = inttoptr i64 %sub.i.i to ptr + store i64 %6, ptr %9, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %7, ptr %rip.i, align 8, !tbaa !5 + %10 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %6, ptr %PC, align 8 + %11 = add i64 %program_counter, 28 + store i64 %11, ptr %rip.i, align 8, !tbaa !5 + store i64 %11, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199701(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %AL = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !39 + %R8 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 17, i32 0, i32 0, !remill_register !40 + %RCX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 5, i32 0, i32 0, !remill_register !41 + %RDX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 7, i32 0, i32 0, !remill_register !42 + %EBP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 15, i32 0, i32 0, !remill_register !43 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %RBX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 3, i32 0, i32 0, !remill_register !2 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 3 + %2 = load i64, ptr %RBX, align 8 + store i64 %2, ptr %RDI, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %3 = add i64 %program_counter, 8 + %4 = add i64 %program_counter, 1867 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %5 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %5, -8 + %6 = inttoptr i64 %sub.i.i to ptr + store i64 %3, ptr %6, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %4, ptr %rip.i, align 8, !tbaa !5 + %7 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %3, ptr %PC, align 8 + %8 = add i64 %program_counter, 11 + %9 = load i64, ptr %RBX, align 8 + store i64 %9, ptr %RDI, align 8, !tbaa !5 + store i64 %8, ptr %PC, align 8 + %10 = add i64 %program_counter, 16 + %11 = add i64 %program_counter, 1979 + %12 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i2 = add i64 %12, -8 + %13 = inttoptr i64 %sub.i.i2 to ptr + store i64 %10, ptr %13, align 8 + store i64 %sub.i.i2, ptr %rsp.i, align 8, !tbaa !5 + store i64 %11, ptr %rip.i, align 8, !tbaa !5 + %14 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %10, ptr %PC, align 8 + %15 = add i64 %program_counter, 19 + %16 = load i64, ptr %RBX, align 8 + %17 = inttoptr i64 %16 to ptr + %18 = load i8, ptr %17, align 1 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %19 = call i8 @llvm.ctpop.i8(i8 %18), !range !26 + %20 = and i8 %19, 1 + %21 = xor i8 %20, 1 + %pf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %21, ptr %pf.i.i.i, align 1, !tbaa !27 + %af.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 0, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i = icmp eq i8 %18, 0 + %conv5.i.i.i = zext i1 %cmp.i.i.i.i to i8 + %zf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv5.i.i.i, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i = icmp slt i8 %18, 0 + %conv8.i.i.i = zext i1 %cmp.i27.i.i.i to i8 + %sf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv8.i.i.i, ptr %sf.i.i.i, align 1, !tbaa !29 + %of.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 0, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %15, ptr %PC, align 8 + %22 = add i64 %program_counter, 24 + store i64 100, ptr %RSI, align 8, !tbaa !5 + store i64 %22, ptr %PC, align 8 + %23 = add i64 %program_counter, 29 + store i64 10, ptr %EBP, align 8, !tbaa !5 + store i64 %23, ptr %PC, align 8 + %24 = add i64 %program_counter, 32 + %cond1.i.v.i = select i1 %cmp.i.i.i.i, i64 10, i64 100 + store i64 %cond1.i.v.i, ptr %RSI, align 8, !tbaa !5 + store i64 %24, ptr %PC, align 8 + %25 = add i64 %program_counter, 37 + store i64 1, ptr %RDI, align 8, !tbaa !5 + store i64 %25, ptr %PC, align 8 + %26 = add i64 %program_counter, 42 + %27 = add i64 %program_counter, 1499 + %28 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i7 = add i64 %28, -8 + %29 = inttoptr i64 %sub.i.i7 to ptr + store i64 %26, ptr %29, align 8 + store i64 %sub.i.i7, ptr %rsp.i, align 8, !tbaa !5 + store i64 %27, ptr %rip.i, align 8, !tbaa !5 + %30 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %26, ptr %PC, align 8 + %31 = add i64 %program_counter, 46 + %32 = load i64, ptr %RBX, align 8 + %33 = add i64 %32, 1 + %34 = inttoptr i64 %33 to ptr + %35 = load i8, ptr %34, align 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %36 = call i8 @llvm.ctpop.i8(i8 %35), !range !26 + %37 = and i8 %36, 1 + %38 = xor i8 %37, 1 + store i8 %38, ptr %pf.i.i.i, align 1, !tbaa !27 + store i8 0, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i18 = icmp eq i8 %35, 0 + %conv5.i.i.i20 = zext i1 %cmp.i.i.i.i18 to i8 + store i8 %conv5.i.i.i20, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i22 = icmp slt i8 %35, 0 + %conv8.i.i.i24 = zext i1 %cmp.i27.i.i.i22 to i8 + store i8 %conv8.i.i.i24, ptr %sf.i.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %31, ptr %PC, align 8 + %39 = add i64 %program_counter, 51 + store i64 100, ptr %RSI, align 8, !tbaa !5 + store i64 %39, ptr %PC, align 8 + %40 = add i64 %program_counter, 54 + %41 = load i32, ptr %EBP, align 4 + %42 = zext i32 %41 to i64 + %cond1.i.v.i36 = select i1 %cmp.i.i.i.i18, i64 %42, i64 100 + store i64 %cond1.i.v.i36, ptr %RSI, align 8, !tbaa !5 + store i64 %40, ptr %PC, align 8 + %43 = add i64 %program_counter, 59 + store i64 2, ptr %RDI, align 8, !tbaa !5 + store i64 %43, ptr %PC, align 8 + %44 = add i64 %program_counter, 64 + %45 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i39 = add i64 %45, -8 + %46 = inttoptr i64 %sub.i.i39 to ptr + store i64 %44, ptr %46, align 8 + store i64 %sub.i.i39, ptr %rsp.i, align 8, !tbaa !5 + store i64 %27, ptr %rip.i, align 8, !tbaa !5 + %47 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %44, ptr %PC, align 8 + %48 = add i64 %program_counter, 68 + %49 = load i64, ptr %RBX, align 8 + %50 = add i64 %49, 2 + %51 = inttoptr i64 %50 to ptr + %52 = load i8, ptr %51, align 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %53 = call i8 @llvm.ctpop.i8(i8 %52), !range !26 + %54 = and i8 %53, 1 + %55 = xor i8 %54, 1 + store i8 %55, ptr %pf.i.i.i, align 1, !tbaa !27 + store i8 0, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i50 = icmp eq i8 %52, 0 + %conv5.i.i.i52 = zext i1 %cmp.i.i.i.i50 to i8 + store i8 %conv5.i.i.i52, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i54 = icmp slt i8 %52, 0 + %conv8.i.i.i56 = zext i1 %cmp.i27.i.i.i54 to i8 + store i8 %conv8.i.i.i56, ptr %sf.i.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %48, ptr %PC, align 8 + %56 = add i64 %program_counter, 73 + store i64 100, ptr %RSI, align 8, !tbaa !5 + store i64 %56, ptr %PC, align 8 + %57 = add i64 %program_counter, 76 + %58 = load i32, ptr %EBP, align 4 + %59 = zext i32 %58 to i64 + %cond1.i.v.i68 = select i1 %cmp.i.i.i.i50, i64 %59, i64 100 + store i64 %cond1.i.v.i68, ptr %RSI, align 8, !tbaa !5 + store i64 %57, ptr %PC, align 8 + %60 = add i64 %program_counter, 81 + store i64 3, ptr %RDI, align 8, !tbaa !5 + store i64 %60, ptr %PC, align 8 + %61 = add i64 %program_counter, 86 + %62 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i71 = add i64 %62, -8 + %63 = inttoptr i64 %sub.i.i71 to ptr + store i64 %61, ptr %63, align 8 + store i64 %sub.i.i71, ptr %rsp.i, align 8, !tbaa !5 + store i64 %27, ptr %rip.i, align 8, !tbaa !5 + %64 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %61, ptr %PC, align 8 + %65 = add i64 %program_counter, 90 + %66 = load i64, ptr %RBX, align 8 + %67 = add i64 %66, 3 + %68 = inttoptr i64 %67 to ptr + %69 = load i8, ptr %68, align 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %70 = call i8 @llvm.ctpop.i8(i8 %69), !range !26 + %71 = and i8 %70, 1 + %72 = xor i8 %71, 1 + store i8 %72, ptr %pf.i.i.i, align 1, !tbaa !27 + store i8 0, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i82 = icmp eq i8 %69, 0 + %conv5.i.i.i84 = zext i1 %cmp.i.i.i.i82 to i8 + store i8 %conv5.i.i.i84, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i86 = icmp slt i8 %69, 0 + %conv8.i.i.i88 = zext i1 %cmp.i27.i.i.i86 to i8 + store i8 %conv8.i.i.i88, ptr %sf.i.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %65, ptr %PC, align 8 + %73 = add i64 %program_counter, 95 + store i64 100, ptr %RSI, align 8, !tbaa !5 + store i64 %73, ptr %PC, align 8 + %74 = add i64 %program_counter, 98 + %75 = load i32, ptr %EBP, align 4 + %76 = zext i32 %75 to i64 + %cond1.i.v.i100 = select i1 %cmp.i.i.i.i82, i64 %76, i64 100 + store i64 %cond1.i.v.i100, ptr %RSI, align 8, !tbaa !5 + store i64 %74, ptr %PC, align 8 + %77 = add i64 %program_counter, 103 + store i64 4, ptr %RDI, align 8, !tbaa !5 + store i64 %77, ptr %PC, align 8 + %78 = add i64 %program_counter, 108 + %79 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i103 = add i64 %79, -8 + %80 = inttoptr i64 %sub.i.i103 to ptr + store i64 %78, ptr %80, align 8 + store i64 %sub.i.i103, ptr %rsp.i, align 8, !tbaa !5 + store i64 %27, ptr %rip.i, align 8, !tbaa !5 + %81 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %78, ptr %PC, align 8 + %82 = add i64 %program_counter, 111 + %83 = load i64, ptr %RBX, align 8 + %84 = inttoptr i64 %83 to ptr + %85 = load i8, ptr %84, align 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %86 = call i8 @llvm.ctpop.i8(i8 %85), !range !26 + %87 = and i8 %86, 1 + %88 = xor i8 %87, 1 + store i8 %88, ptr %pf.i.i.i, align 1, !tbaa !27 + store i8 0, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i114 = icmp eq i8 %85, 0 + %conv5.i.i.i116 = zext i1 %cmp.i.i.i.i114 to i8 + store i8 %conv5.i.i.i116, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i118 = icmp slt i8 %85, 0 + %conv8.i.i.i120 = zext i1 %cmp.i27.i.i.i118 to i8 + store i8 %conv8.i.i.i120, ptr %sf.i.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %82, ptr %PC, align 8 + %89 = add i64 %program_counter, 116 + store i64 4202673, ptr %RSI, align 8, !tbaa !5 + store i64 %89, ptr %PC, align 8 + %90 = add i64 %program_counter, 121 + store i64 4202675, ptr %AL, align 8, !tbaa !5 + store i64 %90, ptr %PC, align 8 + %91 = add i64 %program_counter, 125 + %cond1.i.i = select i1 %cmp.i.i.i.i114, i64 4202675, i64 4202673 + store i64 %cond1.i.i, ptr %RSI, align 8, !tbaa !5 + store i64 %91, ptr %PC, align 8 + %92 = add i64 %program_counter, 129 + %93 = add i64 %83, 1 + %94 = inttoptr i64 %93 to ptr + %95 = load i8, ptr %94, align 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %96 = call i8 @llvm.ctpop.i8(i8 %95), !range !26 + %97 = and i8 %96, 1 + %98 = xor i8 %97, 1 + store i8 %98, ptr %pf.i.i.i, align 1, !tbaa !27 + store i8 0, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i140 = icmp eq i8 %95, 0 + %conv5.i.i.i142 = zext i1 %cmp.i.i.i.i140 to i8 + store i8 %conv5.i.i.i142, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i144 = icmp slt i8 %95, 0 + %conv8.i.i.i146 = zext i1 %cmp.i27.i.i.i144 to i8 + store i8 %conv8.i.i.i146, ptr %sf.i.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %92, ptr %PC, align 8 + %99 = add i64 %program_counter, 134 + store i64 4202673, ptr %RDX, align 8, !tbaa !5 + store i64 %99, ptr %PC, align 8 + %100 = add i64 %program_counter, 138 + %cond1.i.i158 = select i1 %cmp.i.i.i.i140, i64 4202675, i64 4202673 + store i64 %cond1.i.i158, ptr %RDX, align 8, !tbaa !5 + store i64 %100, ptr %PC, align 8 + %101 = add i64 %program_counter, 142 + %102 = add i64 %83, 2 + %103 = inttoptr i64 %102 to ptr + %104 = load i8, ptr %103, align 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %105 = call i8 @llvm.ctpop.i8(i8 %104), !range !26 + %106 = and i8 %105, 1 + %107 = xor i8 %106, 1 + store i8 %107, ptr %pf.i.i.i, align 1, !tbaa !27 + store i8 0, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i167 = icmp eq i8 %104, 0 + %conv5.i.i.i169 = zext i1 %cmp.i.i.i.i167 to i8 + store i8 %conv5.i.i.i169, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i171 = icmp slt i8 %104, 0 + %conv8.i.i.i173 = zext i1 %cmp.i27.i.i.i171 to i8 + store i8 %conv8.i.i.i173, ptr %sf.i.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %101, ptr %PC, align 8 + %108 = add i64 %program_counter, 147 + store i64 4202673, ptr %RCX, align 8, !tbaa !5 + store i64 %108, ptr %PC, align 8 + %109 = add i64 %program_counter, 151 + %cond1.i.i185 = select i1 %cmp.i.i.i.i167, i64 4202675, i64 4202673 + store i64 %cond1.i.i185, ptr %RCX, align 8, !tbaa !5 + store i64 %109, ptr %PC, align 8 + %110 = add i64 %program_counter, 155 + %111 = add i64 %83, 3 + %112 = inttoptr i64 %111 to ptr + %113 = load i8, ptr %112, align 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %114 = call i8 @llvm.ctpop.i8(i8 %113), !range !26 + %115 = and i8 %114, 1 + %116 = xor i8 %115, 1 + store i8 %116, ptr %pf.i.i.i, align 1, !tbaa !27 + store i8 0, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i194 = icmp eq i8 %113, 0 + %conv5.i.i.i196 = zext i1 %cmp.i.i.i.i194 to i8 + store i8 %conv5.i.i.i196, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i198 = icmp slt i8 %113, 0 + %conv8.i.i.i200 = zext i1 %cmp.i27.i.i.i198 to i8 + store i8 %conv8.i.i.i200, ptr %sf.i.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %110, ptr %PC, align 8 + %117 = add i64 %program_counter, 161 + store i64 4202673, ptr %R8, align 8, !tbaa !5 + store i64 %117, ptr %PC, align 8 + %118 = add i64 %program_counter, 165 + %cond1.i.i212 = select i1 %cmp.i.i.i.i194, i64 4202675, i64 4202673 + store i64 %cond1.i.i212, ptr %R8, align 8, !tbaa !5 + store i64 %118, ptr %PC, align 8 + %119 = add i64 %program_counter, 170 + store i64 4202652, ptr %RDI, align 8, !tbaa !5 + store i64 %119, ptr %PC, align 8 + %120 = add i64 %program_counter, 172 + store i64 0, ptr %AL, align 8, !tbaa !5 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %121 = call i8 @llvm.ctpop.i8(i8 0), !range !26 + %122 = and i8 %121, 1 + %123 = xor i8 %122, 1 + store i8 %123, ptr %pf.i.i.i, align 1, !tbaa !27 + store i8 1, ptr %zf.i.i.i, align 1, !tbaa !28 + store i8 0, ptr %sf.i.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i.i, align 1, !tbaa !30 + store i8 undef, ptr %af.i.i.i, align 1, !tbaa !31 + store i64 %120, ptr %PC, align 8 + %124 = add i64 %program_counter, 177 + %125 = add i64 %program_counter, -1157 + %126 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i217 = add i64 %126, -8 + %127 = inttoptr i64 %sub.i.i217 to ptr + store i64 %124, ptr %127, align 8 + store i64 %sub.i.i217, ptr %rsp.i, align 8, !tbaa !5 + store i64 %125, ptr %rip.i, align 8, !tbaa !5 + %128 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %124, ptr %PC, align 8 + %129 = add i64 %program_counter, 179 + store i8 1, ptr %AL, align 1, !tbaa !32 + store i64 %129, ptr %PC, align 8 + %130 = add i64 %program_counter, 181 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + store i8 0, ptr %pf.i.i.i, align 1, !tbaa !27 + store i8 0, ptr %zf.i.i.i, align 1, !tbaa !28 + store i8 0, ptr %sf.i.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i.i, align 1, !tbaa !30 + store i8 undef, ptr %af.i.i.i, align 1, !tbaa !31 + store i64 %130, ptr %PC, align 8 + %cond1.i.i233 = add i64 -309, %program_counter + store i64 %cond1.i.i233, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: mustprogress noduplicate nofree noinline nosync nounwind optnone readnone willreturn +declare zeroext i1 @__remill_compare_eq(i1 noundef zeroext) local_unnamed_addr #2 + +; Function Attrs: mustprogress noduplicate nofree noinline nosync nounwind optnone readnone willreturn +declare zeroext i1 @__remill_compare_neq(i1 noundef zeroext) local_unnamed_addr #2 + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199888(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 30 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %1, ptr %rip.i, align 8, !tbaa !5 + store i64 %1, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199497(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %R8 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 17, i32 0, i32 0, !remill_register !40 + %RAX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !44 + %RDX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 7, i32 0, i32 0, !remill_register !42 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %R11 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 23, i32 0, i32 0, !remill_register !45 + %R10 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 21, i32 0, i32 0, !remill_register !46 + %R13 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 27, i32 0, i32 0, !remill_register !37 + %R15 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 31, i32 0, i32 0, !remill_register !36 + %RCX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 5, i32 0, i32 0, !remill_register !41 + %R9 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 19, i32 0, i32 0, !remill_register !47 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %R14 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 29, i32 0, i32 0, !remill_register !35 + %R12D = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 25, i32 0, i32 0, !remill_register !48 + %EBP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 15, i32 0, i32 0, !remill_register !43 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 3 + %2 = load i64, ptr %RSP, align 8 + %3 = inttoptr i64 %2 to ptr + %4 = load i32, ptr %3, align 4 + %conv.i.i = zext i32 %4 to i64 + store i64 %conv.i.i, ptr %EBP, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %5 = add i64 %program_counter, 6 + %6 = load i32, ptr %EBP, align 4 + %7 = zext i32 %6 to i64 + store i64 %7, ptr %R12D, align 8, !tbaa !5 + store i64 %5, ptr %PC, align 8 + %8 = add i64 %program_counter, 13 + %and3.i.i = and i32 %6, 536870911 + %conv.i22.i = zext i32 %and3.i.i to i64 + store i64 %conv.i22.i, ptr %R12D, align 8, !tbaa !5 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i = trunc i32 %6 to i8 + %9 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i), !range !26 + %10 = and i8 %9, 1 + %11 = xor i8 %10, 1 + %pf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %11, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i = icmp eq i32 %and3.i.i, 0 + %conv3.i.i = zext i1 %cmp.i.i.i to i8 + %zf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv3.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %sf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 0, ptr %sf.i.i, align 1, !tbaa !29 + %of.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + %af.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 0, ptr %af.i.i, align 1, !tbaa !31 + store i64 %8, ptr %PC, align 8 + %12 = add i64 %program_counter, 17 + %13 = load i32, ptr %R12D, align 4 + store i32 %13, ptr %3, align 4 + store i64 %12, ptr %PC, align 8 + %14 = add i64 %program_counter, 22 + %15 = add i64 %2, 112 + store i64 %15, ptr %R14, align 8, !tbaa !5 + store i64 %14, ptr %PC, align 8 + %16 = add i64 %program_counter, 25 + store i64 %15, ptr %RDI, align 8, !tbaa !5 + store i64 %16, ptr %PC, align 8 + %17 = add i64 %program_counter, 30 + %18 = add i64 %program_counter, -857 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %19 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %19, -8 + %20 = inttoptr i64 %sub.i.i to ptr + store i64 %17, ptr %20, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %18, ptr %rip.i, align 8, !tbaa !5 + %21 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %17, ptr %PC, align 8 + %22 = add i64 %program_counter, 33 + %23 = load i64, ptr %R14, align 8 + store i64 %23, ptr %RDI, align 8, !tbaa !5 + store i64 %22, ptr %PC, align 8 + %24 = add i64 %program_counter, 38 + %25 = add i64 %program_counter, -969 + %26 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i5 = add i64 %26, -8 + %27 = inttoptr i64 %sub.i.i5 to ptr + store i64 %24, ptr %27, align 8 + store i64 %sub.i.i5, ptr %rsp.i, align 8, !tbaa !5 + store i64 %25, ptr %rip.i, align 8, !tbaa !5 + %28 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %24, ptr %PC, align 8 + %29 = add i64 %program_counter, 44 + %30 = load i64, ptr %RSP, align 8 + %31 = add i64 %30, 8 + %32 = inttoptr i64 %31 to ptr + %33 = load i8, ptr %32, align 1 + %conv.i.i9 = zext i8 %33 to i64 + store i64 %conv.i.i9, ptr %R9, align 8, !tbaa !5 + store i64 %29, ptr %PC, align 8 + %34 = add i64 %30, 9 + %35 = inttoptr i64 %34 to ptr + %36 = load i8, ptr %35, align 1 + %conv.i.i11 = zext i8 %36 to i64 + store i64 %conv.i.i11, ptr %RCX, align 8, !tbaa !5 + %37 = add i64 %program_counter, 54 + %38 = add i64 %30, 72 + %39 = inttoptr i64 %38 to ptr + store i64 %conv.i.i11, ptr %39, align 8 + store i64 %37, ptr %PC, align 8 + %40 = add i64 %30, 10 + %41 = inttoptr i64 %40 to ptr + %42 = load i8, ptr %41, align 1 + %conv.i.i14 = zext i8 %42 to i64 + store i64 %conv.i.i14, ptr %RCX, align 8, !tbaa !5 + %43 = add i64 %program_counter, 64 + %44 = add i64 %30, 64 + %45 = inttoptr i64 %44 to ptr + store i64 %conv.i.i14, ptr %45, align 8 + store i64 %43, ptr %PC, align 8 + %46 = add i64 %program_counter, 70 + %47 = add i64 %30, 11 + %48 = inttoptr i64 %47 to ptr + %49 = load i8, ptr %48, align 1 + %conv.i.i17 = zext i8 %49 to i64 + store i64 %conv.i.i17, ptr %R15, align 8, !tbaa !5 + store i64 %46, ptr %PC, align 8 + %50 = add i64 %program_counter, 76 + %51 = add i64 %30, 12 + %52 = inttoptr i64 %51 to ptr + %53 = load i8, ptr %52, align 1 + %conv.i.i19 = zext i8 %53 to i64 + store i64 %conv.i.i19, ptr %R13, align 8, !tbaa !5 + store i64 %50, ptr %PC, align 8 + %54 = add i64 %program_counter, 82 + %55 = add i64 %30, 13 + %56 = inttoptr i64 %55 to ptr + %57 = load i8, ptr %56, align 1 + %conv.i.i21 = zext i8 %57 to i64 + store i64 %conv.i.i21, ptr %R14, align 8, !tbaa !5 + store i64 %54, ptr %PC, align 8 + %58 = add i64 %program_counter, 88 + %59 = add i64 %30, 14 + %60 = inttoptr i64 %59 to ptr + %61 = load i8, ptr %60, align 1 + %conv.i.i23 = zext i8 %61 to i64 + store i64 %conv.i.i23, ptr %R10, align 8, !tbaa !5 + store i64 %58, ptr %PC, align 8 + %62 = add i64 %program_counter, 94 + %63 = add i64 %30, 15 + %64 = inttoptr i64 %63 to ptr + %65 = load i8, ptr %64, align 1 + %conv.i.i25 = zext i8 %65 to i64 + store i64 %conv.i.i25, ptr %R11, align 8, !tbaa !5 + store i64 %62, ptr %PC, align 8 + %66 = add i64 %program_counter, 98 + %sub.i.i26 = add i64 %30, -8 + store i64 %sub.i.i26, ptr %RSP, align 8, !tbaa !5 + %cmp.i.i.i27 = icmp ult i64 %30, 8 + %conv.i.i29 = zext i1 %cmp.i.i.i27 to i8 + store i8 %conv.i.i29, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i.i = trunc i64 %sub.i.i26 to i8 + %67 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i.i), !range !26 + %68 = and i8 %67, 1 + %69 = xor i8 %68, 1 + store i8 %69, ptr %pf.i.i, align 1, !tbaa !27 + %70 = xor i64 %30, %sub.i.i26 + %71 = trunc i64 %70 to i8 + %72 = lshr i8 %71, 4 + %73 = and i8 %72, 1 + store i8 %73, ptr %af.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i = icmp eq i64 %30, 8 + %conv5.i.i.i = zext i1 %cmp.i.i.i.i to i8 + store i8 %conv5.i.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i = icmp slt i64 %sub.i.i26, 0 + %conv8.i.i.i = zext i1 %cmp.i27.i.i.i to i8 + store i8 %conv8.i.i.i, ptr %sf.i.i, align 1, !tbaa !29 + %shr.i.i.i.i = lshr i64 %30, 63 + %shr2.i.i.i.i = lshr i64 %sub.i.i26, 63 + %xor3.i.i.i.i = xor i64 %shr2.i.i.i.i, %shr.i.i.i.i + %add.i.i.i.i = add nuw nsw i64 %xor3.i.i.i.i, %shr.i.i.i.i + %cmp.i29.i.i.i = icmp eq i64 %add.i.i.i.i, 2 + %conv11.i.i.i = zext i1 %cmp.i29.i.i.i to i8 + store i8 %conv11.i.i.i, ptr %of.i.i, align 1, !tbaa !30 + store i64 %66, ptr %PC, align 8 + %74 = add i64 %program_counter, 103 + store i64 4202592, ptr %RSI, align 8, !tbaa !5 + store i64 %74, ptr %PC, align 8 + %75 = add i64 %program_counter, 108 + store i64 4202648, ptr %RCX, align 8, !tbaa !5 + store i64 %75, ptr %PC, align 8 + %76 = add i64 %program_counter, 113 + %77 = add i64 %30, 40 + %78 = inttoptr i64 %77 to ptr + %79 = load i64, ptr %78, align 8 + store i64 %79, ptr %RDI, align 8, !tbaa !5 + store i64 %76, ptr %PC, align 8 + %80 = add i64 %program_counter, 116 + %81 = load i64, ptr %RAX, align 8 + store i64 %81, ptr %RDX, align 8, !tbaa !5 + store i64 %80, ptr %PC, align 8 + %82 = add i64 %program_counter, 119 + %83 = load i32, ptr %R12D, align 4 + %84 = zext i32 %83 to i64 + store i64 %84, ptr %R8, align 8, !tbaa !5 + store i64 %82, ptr %PC, align 8 + %85 = add i64 %program_counter, 124 + store i64 0, ptr %RAX, align 8, !tbaa !5 + store i64 %85, ptr %PC, align 8 + %86 = add i64 %program_counter, 126 + %87 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i.i = add i64 %87, -8 + %88 = inttoptr i64 %sub.i.i.i to ptr + store i64 %conv.i.i25, ptr %88, align 8 + store i64 %sub.i.i.i, ptr %rsp.i, align 8, !tbaa !5 + store i64 %86, ptr %PC, align 8 + %89 = add i64 %program_counter, 128 + %sub.i.i.i35 = add i64 %87, -16 + %90 = inttoptr i64 %sub.i.i.i35 to ptr + store i64 %conv.i.i23, ptr %90, align 8 + store i64 %sub.i.i.i35, ptr %rsp.i, align 8, !tbaa !5 + store i64 %89, ptr %PC, align 8 + %91 = add i64 %program_counter, 130 + %sub.i.i.i38 = add i64 %87, -24 + %92 = inttoptr i64 %sub.i.i.i38 to ptr + store i64 %conv.i.i21, ptr %92, align 8 + store i64 %sub.i.i.i38, ptr %rsp.i, align 8, !tbaa !5 + store i64 %91, ptr %PC, align 8 + %93 = add i64 %program_counter, 132 + %sub.i.i.i41 = add i64 %87, -32 + %94 = inttoptr i64 %sub.i.i.i41 to ptr + store i64 %conv.i.i19, ptr %94, align 8 + store i64 %sub.i.i.i41, ptr %rsp.i, align 8, !tbaa !5 + store i64 %93, ptr %PC, align 8 + %95 = add i64 %program_counter, 134 + %sub.i.i.i44 = add i64 %87, -40 + %96 = inttoptr i64 %sub.i.i.i44 to ptr + store i64 %conv.i.i17, ptr %96, align 8 + store i64 %sub.i.i.i44, ptr %rsp.i, align 8, !tbaa !5 + store i64 %95, ptr %PC, align 8 + %97 = add i64 %program_counter, 138 + %98 = load i64, ptr %RSP, align 8 + %99 = add i64 %98, 112 + %100 = inttoptr i64 %99 to ptr + %101 = load i64, ptr %100, align 8 + %sub.i.i.i48 = add i64 %87, -48 + %102 = inttoptr i64 %sub.i.i.i48 to ptr + store i64 %101, ptr %102, align 8 + store i64 %sub.i.i.i48, ptr %rsp.i, align 8, !tbaa !5 + store i64 %97, ptr %PC, align 8 + %103 = add i64 %program_counter, 145 + %104 = load i64, ptr %RSP, align 8 + %105 = add i64 %104, 128 + %106 = inttoptr i64 %105 to ptr + %107 = load i64, ptr %106, align 8 + %sub.i.i.i52 = add i64 %87, -56 + %108 = inttoptr i64 %sub.i.i.i52 to ptr + store i64 %107, ptr %108, align 8 + store i64 %sub.i.i.i52, ptr %rsp.i, align 8, !tbaa !5 + store i64 %103, ptr %PC, align 8 + %109 = add i64 %program_counter, 150 + %110 = add i64 %program_counter, -873 + %sub.i.i55 = add i64 %87, -64 + %111 = inttoptr i64 %sub.i.i55 to ptr + store i64 %109, ptr %111, align 8 + store i64 %sub.i.i55, ptr %rsp.i, align 8, !tbaa !5 + store i64 %110, ptr %rip.i, align 8, !tbaa !5 + %112 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %109, ptr %PC, align 8 + %113 = add i64 %program_counter, 154 + %114 = load i64, ptr %RSP, align 8 + %add.i.i = add i64 %114, 64 + store i64 %add.i.i, ptr %RSP, align 8, !tbaa !5 + %cmp.i.i.i58 = icmp ugt i64 %114, -65 + %conv.i.i60 = zext i1 %cmp.i.i.i58 to i8 + store i8 %conv.i.i60, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i.i62 = trunc i64 %add.i.i to i8 + %115 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i.i62), !range !26 + %116 = and i8 %115, 1 + %117 = xor i8 %116, 1 + store i8 %117, ptr %pf.i.i, align 1, !tbaa !27 + %118 = xor i64 %114, %add.i.i + %119 = trunc i64 %118 to i8 + %120 = lshr i8 %119, 4 + %121 = and i8 %120, 1 + store i8 %121, ptr %af.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i67 = icmp eq i64 %add.i.i, 0 + %conv5.i.i.i69 = zext i1 %cmp.i.i.i.i67 to i8 + store i8 %conv5.i.i.i69, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i71 = icmp slt i64 %add.i.i, 0 + %conv8.i.i.i73 = zext i1 %cmp.i27.i.i.i71 to i8 + store i8 %conv8.i.i.i73, ptr %sf.i.i, align 1, !tbaa !29 + %shr.i.i.i.i75 = lshr i64 %114, 63 + %shr2.i.i.i.i76 = lshr i64 %add.i.i, 63 + %xor.i28.i.i.i = xor i64 %shr2.i.i.i.i76, %shr.i.i.i.i75 + %add.i.i.i.i77 = add nuw nsw i64 %xor.i28.i.i.i, %shr2.i.i.i.i76 + %cmp.i29.i.i.i78 = icmp eq i64 %add.i.i.i.i77, 2 + %conv11.i.i.i80 = zext i1 %cmp.i29.i.i.i78 to i8 + store i8 %conv11.i.i.i80, ptr %of.i.i, align 1, !tbaa !30 + store i64 %113, ptr %PC, align 8 + %122 = add i64 %program_counter, 160 + %123 = load i64, ptr %EBP, align 8 + %conv.i.i82 = trunc i64 %123 to i32 + %and3.i.i83 = and i32 %conv.i.i82, 134217472 + %conv.i22.i84 = zext i32 %and3.i.i83 to i64 + store i64 %conv.i22.i84, ptr %EBP, align 8, !tbaa !5 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + store i8 1, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i88 = icmp eq i32 %and3.i.i83, 0 + %conv3.i.i90 = zext i1 %cmp.i.i.i88 to i8 + store i8 %conv3.i.i90, ptr %zf.i.i, align 1, !tbaa !28 + store i8 0, ptr %sf.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + store i8 0, ptr %af.i.i, align 1, !tbaa !31 + store i64 %122, ptr %PC, align 8 + %124 = add i64 %program_counter, 166 + %125 = load i32, ptr %EBP, align 4 + %sub.i.i97 = add i32 %125, -16632832 + %cmp.i.i.i98 = icmp ult i32 %125, 16632832 + %conv.i12.i = zext i1 %cmp.i.i.i98 to i8 + store i8 %conv.i12.i, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i.i101 = trunc i32 %sub.i.i97 to i8 + %126 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i.i101), !range !26 + %127 = and i8 %126, 1 + %128 = xor i8 %127, 1 + store i8 %128, ptr %pf.i.i, align 1, !tbaa !27 + %129 = xor i32 %125, %sub.i.i97 + %130 = trunc i32 %129 to i8 + %131 = lshr i8 %130, 4 + %132 = and i8 %131, 1 + store i8 %132, ptr %af.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i106 = icmp eq i32 %125, 16632832 + %conv5.i.i.i108 = zext i1 %cmp.i.i.i.i106 to i8 + store i8 %conv5.i.i.i108, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i110 = icmp slt i32 %sub.i.i97, 0 + %conv8.i.i.i112 = zext i1 %cmp.i27.i.i.i110 to i8 + store i8 %conv8.i.i.i112, ptr %sf.i.i, align 1, !tbaa !29 + %shr.i.i.i.i114 = lshr i32 %125, 31 + %shr2.i.i.i.i115 = lshr i32 %sub.i.i97, 31 + %xor3.i.i.i.i116 = xor i32 %shr2.i.i.i.i115, %shr.i.i.i.i114 + %add.i.i.i.i117 = add nuw nsw i32 %xor3.i.i.i.i116, %shr.i.i.i.i114 + %cmp.i29.i.i.i118 = icmp eq i32 %add.i.i.i.i117, 2 + %conv11.i.i.i120 = zext i1 %cmp.i29.i.i.i118 to i8 + store i8 %conv11.i.i.i120, ptr %of.i.i, align 1, !tbaa !30 + store i64 %124, ptr %PC, align 8 + %cond1.i.i.v = select i1 %cmp.i.i.i.i106, i64 191, i64 168 + %cond1.i.i = add i64 %cond1.i.i.v, %program_counter + store i64 %cond1.i.i, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199688(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %RBX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 3, i32 0, i32 0, !remill_register !2 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + %2 = load i64, ptr %RSP, align 8 + %3 = add i64 %2, 8 + store i64 %3, ptr %RDI, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %4 = add i64 %program_counter, 8 + %5 = load i64, ptr %RBX, align 8 + store i64 %5, ptr %RSI, align 8, !tbaa !5 + store i64 %4, ptr %PC, align 8 + %6 = add i64 %program_counter, 13 + %7 = add i64 %program_counter, 456 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %8 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %8, -8 + %9 = inttoptr i64 %sub.i.i to ptr + store i64 %6, ptr %9, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %7, ptr %rip.i, align 8, !tbaa !5 + %10 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %6, ptr %PC, align 8 + store i64 %6, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199297(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %ECX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 5, i32 0, i32 0, !remill_register !49 + %EDX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 7, i32 0, i32 0, !remill_register !50 + %EAX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !0 + %ESI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !51 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %RBX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 3, i32 0, i32 0, !remill_register !2 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + store i64 32, ptr %RDI, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %2 = add i64 %program_counter, 10 + %3 = add i64 %program_counter, -625 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %4 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %4, -8 + %5 = inttoptr i64 %sub.i.i to ptr + store i64 %2, ptr %5, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %3, ptr %rip.i, align 8, !tbaa !5 + %6 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %2, ptr %PC, align 8 + %7 = add i64 %program_counter, 13 + %8 = load i64, ptr %EAX, align 8 + store i64 %8, ptr %RBX, align 8, !tbaa !5 + store i64 %7, ptr %PC, align 8 + %9 = add i64 %program_counter, 16 + store i64 %8, ptr %RDI, align 8, !tbaa !5 + store i64 %9, ptr %PC, align 8 + %10 = add i64 %program_counter, 21 + %11 = add i64 %program_counter, 2047 + %12 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i2 = add i64 %12, -8 + %13 = inttoptr i64 %sub.i.i2 to ptr + store i64 %10, ptr %13, align 8 + store i64 %sub.i.i2, ptr %rsp.i, align 8, !tbaa !5 + store i64 %11, ptr %rip.i, align 8, !tbaa !5 + %14 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %10, ptr %PC, align 8 + %15 = add i64 %program_counter, 26 + %16 = load i64, ptr %RSP, align 8 + %17 = add i64 %16, 32 + %18 = inttoptr i64 %17 to ptr + %19 = load i64, ptr %18, align 8 + store i64 %19, ptr %ESI, align 8, !tbaa !5 + store i64 %15, ptr %PC, align 8 + %20 = add i64 %program_counter, 29 + %21 = add i64 %19, 63 + %22 = trunc i64 %21 to i32 + %23 = zext i32 %22 to i64 + store i64 %23, ptr %EAX, align 8, !tbaa !5 + store i64 %20, ptr %PC, align 8 + %24 = add i64 %program_counter, 31 + %25 = load i32, ptr %ESI, align 4 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i = trunc i32 %25 to i8 + %26 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i), !range !26 + %27 = and i8 %26, 1 + %28 = xor i8 %27, 1 + %pf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %28, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i = icmp eq i32 %25, 0 + %conv3.i.i = zext i1 %cmp.i.i.i to i8 + %zf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv3.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i = icmp slt i32 %25, 0 + %conv6.i.i = zext i1 %cmp.i19.i.i to i8 + %sf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv6.i.i, ptr %sf.i.i, align 1, !tbaa !29 + %of.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + %af.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %24, ptr %PC, align 8 + %29 = add i64 %program_counter, 34 + %30 = zext i32 %25 to i64 + %cond1.i.v.i = select i1 %cmp.i19.i.i, i64 %21, i64 %30 + %31 = trunc i64 %cond1.i.v.i to i32 + %32 = zext i32 %31 to i64 + store i64 %32, ptr %EAX, align 8, !tbaa !5 + store i64 %29, ptr %PC, align 8 + %33 = add i64 %program_counter, 36 + %34 = load i32, ptr %EAX, align 4 + %35 = zext i32 %34 to i64 + store i64 %35, ptr %EDX, align 8, !tbaa !5 + store i64 %33, ptr %PC, align 8 + %36 = add i64 %program_counter, 39 + %and3.i.i7 = and i32 %34, -64 + %conv.i22.i = zext i32 %and3.i.i7 to i64 + store i64 %conv.i22.i, ptr %EDX, align 8, !tbaa !5 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i9 = trunc i32 %and3.i.i7 to i8 + %37 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i9), !range !26 + %38 = and i8 %37, 1 + %39 = xor i8 %38, 1 + store i8 %39, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i11 = icmp eq i32 %and3.i.i7, 0 + %conv3.i.i13 = zext i1 %cmp.i.i.i11 to i8 + store i8 %conv3.i.i13, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i15 = icmp slt i32 %and3.i.i7, 0 + %conv6.i.i17 = zext i1 %cmp.i19.i.i15 to i8 + store i8 %conv6.i.i17, ptr %sf.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + store i8 0, ptr %af.i.i, align 1, !tbaa !31 + store i64 %36, ptr %PC, align 8 + %40 = add i64 %program_counter, 41 + store i64 %30, ptr %ECX, align 8, !tbaa !5 + store i64 %40, ptr %PC, align 8 + %41 = add i64 %program_counter, 43 + %42 = load i32, ptr %EDX, align 4 + %sub.i.i23 = sub i32 %25, %42 + %conv.i22.i24 = zext i32 %sub.i.i23 to i64 + store i64 %conv.i22.i24, ptr %ECX, align 8, !tbaa !5 + %cmp.i.i.i25 = icmp ult i32 %25, %42 + %conv.i23.i = zext i1 %cmp.i.i.i25 to i8 + store i8 %conv.i23.i, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i.i = trunc i32 %sub.i.i23 to i8 + %43 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i.i), !range !26 + %44 = and i8 %43, 1 + %45 = xor i8 %44, 1 + store i8 %45, ptr %pf.i.i, align 1, !tbaa !27 + %xor.i.i.i.i = xor i32 %42, %25 + %xor1.i.i.i.i = xor i32 %xor.i.i.i.i, %sub.i.i23 + %46 = trunc i32 %xor1.i.i.i.i to i8 + %47 = lshr i8 %46, 4 + %48 = and i8 %47, 1 + store i8 %48, ptr %af.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i = icmp eq i32 %25, %42 + %conv5.i.i.i = zext i1 %cmp.i.i.i.i to i8 + store i8 %conv5.i.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i = icmp slt i32 %sub.i.i23, 0 + %conv8.i.i.i = zext i1 %cmp.i27.i.i.i to i8 + store i8 %conv8.i.i.i, ptr %sf.i.i, align 1, !tbaa !29 + %shr.i.i.i.i = lshr i32 %25, 31 + %shr1.i.i.i.i = lshr i32 %42, 31 + %shr2.i.i.i.i = lshr i32 %sub.i.i23, 31 + %xor.i28.i.i.i = xor i32 %shr1.i.i.i.i, %shr.i.i.i.i + %xor3.i.i.i.i = xor i32 %shr2.i.i.i.i, %shr.i.i.i.i + %add.i.i.i.i = add nuw nsw i32 %xor3.i.i.i.i, %xor.i28.i.i.i + %cmp.i29.i.i.i = icmp eq i32 %add.i.i.i.i, 2 + %conv11.i.i.i = zext i1 %cmp.i29.i.i.i to i8 + store i8 %conv11.i.i.i, ptr %of.i.i, align 1, !tbaa !30 + store i64 %41, ptr %PC, align 8 + %49 = add i64 %program_counter, 48 + store i64 1, ptr %EDX, align 8, !tbaa !5 + store i64 %49, ptr %PC, align 8 + %50 = load i8, ptr %ECX, align 1 + %51 = and i8 %50, 63 + %and.i.i = zext i8 %51 to i64 + switch i64 %and.i.i, label %if.then35.i [ + i64 0, label %do.body16.i + i64 1, label %do.body55.i + ] + +do.body16.i: ; preds = %0 + store i64 1, ptr %EDX, align 8, !tbaa !5 + br label %_ZN12_GLOBAL__N_13SHLI3RnWIyE2RnIyLb1EES4_EEP6MemoryS6_R5StateT_T0_T1_.exit + +if.then35.i: ; preds = %0 + %sub.i.i28 = add nsw i64 %and.i.i, -1 + %shl.i145.i = shl i64 1, %sub.i.i28 + %shl.i147.i = shl i64 2, %sub.i.i28 + %phi.bo = lshr i64 %shl.i145.i, 63 + %phi.cast = trunc i64 %phi.bo to i8 + br label %do.body55.i + +do.body55.i: ; preds = %0, %if.then35.i + %new_cf.0.shrunk.in.i = phi i8 [ %phi.cast, %if.then35.i ], [ 0, %0 ] + %new_val.0.i = phi i64 [ %shl.i147.i, %if.then35.i ], [ 2, %0 ] + store i64 %new_val.0.i, ptr %EDX, align 8, !tbaa !5 + store i8 %new_cf.0.shrunk.in.i, ptr %cf.i.i, align 1, !tbaa !32 + %conv.i.i29 = trunc i64 %new_val.0.i to i8 + %52 = call i8 @llvm.ctpop.i8(i8 %conv.i.i29), !range !26 + %53 = and i8 %52, 1 + %54 = xor i8 %53, 1 + store i8 %54, ptr %pf.i.i, align 1, !tbaa !32 + store i8 undef, ptr %af.i.i, align 1, !tbaa !32 + %cmp.i148.i = icmp eq i64 %new_val.0.i, 0 + %conv85.i = zext i1 %cmp.i148.i to i8 + store i8 %conv85.i, ptr %zf.i.i, align 1, !tbaa !32 + %new_val.0.lobit.i = lshr i64 %new_val.0.i, 63 + %55 = trunc i64 %new_val.0.lobit.i to i8 + store i8 %55, ptr %sf.i.i, align 1, !tbaa !32 + store i8 0, ptr %of.i.i, align 1, !tbaa !32 + br label %_ZN12_GLOBAL__N_13SHLI3RnWIyE2RnIyLb1EES4_EEP6MemoryS6_R5StateT_T0_T1_.exit + +_ZN12_GLOBAL__N_13SHLI3RnWIyE2RnIyLb1EES4_EEP6MemoryS6_R5StateT_T0_T1_.exit: ; preds = %do.body55.i, %do.body16.i + %56 = add i64 %program_counter, 51 + store i64 %56, ptr %PC, align 8 + %57 = add i64 %program_counter, 56 + %58 = add i64 %16, 56 + %59 = load i64, ptr %EDX, align 8 + %60 = inttoptr i64 %58 to ptr + store i64 %59, ptr %60, align 8 + store i64 %57, ptr %PC, align 8 + %61 = add i64 %program_counter, 59 + %62 = load i64, ptr %ESI, align 8 + %63 = add i64 %62, 1 + %64 = trunc i64 %63 to i32 + %65 = zext i32 %64 to i64 + store i64 %65, ptr %ECX, align 8, !tbaa !5 + store i64 %61, ptr %PC, align 8 + %66 = add i64 %program_counter, 63 + %67 = add i64 %16, 28 + %68 = load i32, ptr %ECX, align 4 + %69 = inttoptr i64 %67 to ptr + store i32 %68, ptr %69, align 4 + store i64 %66, ptr %PC, align 8 + %70 = add i64 %program_counter, 66 + %71 = load i64, ptr %EAX, align 8 + %sext167.i = shl i64 %71, 32 + %72 = lshr i64 %sext167.i, 37 + %shr.i160.i = ashr i64 %sext167.i, 38 + %new_val.0.i34 = trunc i64 %shr.i160.i to i32 + %73 = zext i32 %new_val.0.i34 to i64 + store i64 %73, ptr %EAX, align 8, !tbaa !5 + %74 = trunc i64 %72 to i8 + %75 = and i8 %74, 1 + store i8 %75, ptr %cf.i.i, align 1, !tbaa !32 + %conv.i164.i = trunc i64 %shr.i160.i to i8 + %76 = call i8 @llvm.ctpop.i8(i8 %conv.i164.i), !range !26 + %77 = and i8 %76, 1 + %78 = xor i8 %77, 1 + store i8 %78, ptr %pf.i.i, align 1, !tbaa !32 + store i8 undef, ptr %af.i.i, align 1, !tbaa !32 + %cmp.i165.i = icmp eq i32 %new_val.0.i34, 0 + %conv88.i = zext i1 %cmp.i165.i to i8 + store i8 %conv88.i, ptr %zf.i.i, align 1, !tbaa !32 + %new_val.0.lobit.i40 = lshr i32 %new_val.0.i34, 31 + %79 = trunc i32 %new_val.0.lobit.i40 to i8 + store i8 %79, ptr %sf.i.i, align 1, !tbaa !32 + store i8 0, ptr %of.i.i, align 1, !tbaa !32 + store i64 %70, ptr %PC, align 8 + %80 = add i64 %program_counter, 68 + %rax.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 1 + %81 = load i32, ptr %rax.i, align 8, !tbaa !32 + %conv.i.i42 = sext i32 %81 to i64 + store i64 %conv.i.i42, ptr %rax.i, align 8, !tbaa !5 + store i64 %80, ptr %PC, align 8 + %82 = add i64 %16, 48 + %83 = load i64, ptr %EAX, align 8 + %84 = inttoptr i64 %82 to ptr + store i64 %83, ptr %84, align 8 + %85 = add i64 %16, 20 + %86 = inttoptr i64 %85 to ptr + store i32 0, ptr %86, align 4 + %87 = add i64 %program_counter, 91 + store i64 %87, ptr %PC, align 8 + %88 = add i64 %program_counter, 95 + store i64 %88, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199665(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %EBP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 15, i32 0, i32 0, !remill_register !43 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 6 + %2 = load i32, ptr %EBP, align 4 + %sub.i.i = add i32 %2, -16707840 + %cmp.i.i.i = icmp ult i32 %2, 16707840 + %conv.i12.i = zext i1 %cmp.i.i.i to i8 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 %conv.i12.i, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i.i = trunc i32 %sub.i.i to i8 + %3 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i.i), !range !26 + %4 = and i8 %3, 1 + %5 = xor i8 %4, 1 + %pf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %5, ptr %pf.i.i.i, align 1, !tbaa !27 + %6 = xor i32 %2, %sub.i.i + %7 = trunc i32 %6 to i8 + %8 = lshr i8 %7, 4 + %9 = and i8 %8, 1 + %af.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 %9, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i = icmp eq i32 %2, 16707840 + %conv5.i.i.i = zext i1 %cmp.i.i.i.i to i8 + %zf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv5.i.i.i, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i = icmp slt i32 %sub.i.i, 0 + %conv8.i.i.i = zext i1 %cmp.i27.i.i.i to i8 + %sf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv8.i.i.i, ptr %sf.i.i.i, align 1, !tbaa !29 + %shr.i.i.i.i = lshr i32 %2, 31 + %shr2.i.i.i.i = lshr i32 %sub.i.i, 31 + %xor3.i.i.i.i = xor i32 %shr2.i.i.i.i, %shr.i.i.i.i + %add.i.i.i.i = add nuw nsw i32 %xor3.i.i.i.i, %shr.i.i.i.i + %cmp.i29.i.i.i = icmp eq i32 %add.i.i.i.i, 2 + %conv11.i.i.i = zext i1 %cmp.i29.i.i.i to i8 + %of.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 %conv11.i.i.i, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %1, ptr %PC, align 8 + %cond1.i.i.v = select i1 %cmp.i.i.i.i, i64 8, i64 36 + %cond1.i.i = add i64 %cond1.i.i.v, %program_counter + store i64 %cond1.i.i, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199918(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %RAX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !44 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 4 + %2 = load i64, ptr %RSP, align 8 + %3 = add i64 %2, 20 + %4 = inttoptr i64 %3 to ptr + %5 = load i32, ptr %4, align 4 + %conv.i.i = zext i32 %5 to i64 + store i64 %conv.i.i, ptr %RAX, align 8, !tbaa !5 + store i64 %1, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199219(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %EAX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !0 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + store i64 4202677, ptr %RDI, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %2 = add i64 %program_counter, 10 + %3 = add i64 %program_counter, -755 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %4 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %4, -8 + %5 = inttoptr i64 %sub.i.i to ptr + store i64 %2, ptr %5, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %3, ptr %rip.i, align 8, !tbaa !5 + %6 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %2, ptr %PC, align 8 + %7 = add i64 %program_counter, 12 + %8 = load i64, ptr %EAX, align 8 + %9 = load i32, ptr %EAX, align 4 + %conv.i.i = trunc i64 %8 to i32 + %xor3.i.i = xor i32 %9, %conv.i.i + %conv.i27.i = zext i32 %xor3.i.i to i64 + store i64 %conv.i27.i, ptr %EAX, align 8, !tbaa !5 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i = trunc i32 %xor3.i.i to i8 + %10 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i), !range !26 + %11 = and i8 %10, 1 + %12 = xor i8 %11, 1 + %pf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %12, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i = icmp eq i32 %xor3.i.i, 0 + %conv3.i.i = zext i1 %cmp.i.i.i to i8 + %zf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv3.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i = icmp slt i32 %xor3.i.i, 0 + %conv6.i.i = zext i1 %cmp.i19.i.i to i8 + %sf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv6.i.i, ptr %sf.i.i, align 1, !tbaa !29 + %of.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + %af.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %7, ptr %PC, align 8 + %13 = add i64 %program_counter, 17 + %14 = add i64 %program_counter, 941 + %15 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i2 = add i64 %15, -8 + %16 = inttoptr i64 %sub.i.i2 to ptr + store i64 %13, ptr %16, align 8 + store i64 %sub.i.i2, ptr %rsp.i, align 8, !tbaa !5 + store i64 %14, ptr %rip.i, align 8, !tbaa !5 + %17 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + %18 = add i64 %program_counter, 27 + store i64 %18, ptr %PC, align 8 + %19 = add i64 %program_counter, 29 + store i64 %19, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199392(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %ECX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 5, i32 0, i32 0, !remill_register !49 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %EAX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !0 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + store i64 16, ptr %ECX, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %2 = add i64 %program_counter, 7 + %3 = load i64, ptr %EAX, align 8 + %4 = load i32, ptr %EAX, align 4 + %conv.i.i = trunc i64 %3 to i32 + %xor3.i.i = xor i32 %4, %conv.i.i + %conv.i27.i = zext i32 %xor3.i.i to i64 + store i64 %conv.i27.i, ptr %EAX, align 8, !tbaa !5 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i = trunc i32 %xor3.i.i to i8 + %5 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i), !range !26 + %6 = and i8 %5, 1 + %7 = xor i8 %6, 1 + %pf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %7, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i = icmp eq i32 %xor3.i.i, 0 + %conv3.i.i = zext i1 %cmp.i.i.i to i8 + %zf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv3.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i = icmp slt i32 %xor3.i.i, 0 + %conv6.i.i = zext i1 %cmp.i19.i.i to i8 + %sf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv6.i.i, ptr %sf.i.i, align 1, !tbaa !29 + %of.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + %af.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %2, ptr %PC, align 8 + %8 = add i64 %program_counter, 12 + %9 = load i64, ptr %RSP, align 8 + %10 = add i64 %9, 120 + store i64 %10, ptr %RSI, align 8, !tbaa !5 + store i64 %8, ptr %PC, align 8 + %11 = add i64 %program_counter, 15 + store i64 %10, ptr %RDI, align 8, !tbaa !5 + store i64 %11, ptr %PC, align 8 + %12 = add i64 %program_counter, 16 + %df.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 11 + store i8 0, ptr %df.i, align 1, !tbaa !52 + store i64 %12, ptr %PC, align 8 + %rcx.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 5 + %13 = load i64, ptr %rcx.i, align 8, !tbaa !32 + %cmp.i.not14.i = icmp eq i64 %13, 0 + br i1 %cmp.i.not14.i, label %_ZN12_GLOBAL__N_111DoREP_STOSQEP6MemoryR5State.exit, label %while.body.lr.ph.i + +while.body.lr.ph.i: ; preds = %0 + %rdi.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 11 + %rax.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 1 + %14 = load i64, ptr %rax.i.i, align 8, !tbaa !32 + %rdi.i.promoted.i = load i64, ptr %rdi.i.i, align 8, !tbaa !32 + %15 = shl i64 %13, 3 + br label %while.body.i + +while.body.i: ; preds = %while.body.i, %while.body.lr.ph.i + %next_addr.0.i17.i = phi i64 [ %rdi.i.promoted.i, %while.body.lr.ph.i ], [ %next_addr.0.i.i, %while.body.i ] + %count_reg.016.i = phi i64 [ %13, %while.body.lr.ph.i ], [ %sub.i.i, %while.body.i ] + %16 = inttoptr i64 %next_addr.0.i17.i to ptr + store i64 %14, ptr %16, align 8 + %next_addr.0.i.i = add i64 %next_addr.0.i17.i, 8 + %sub.i.i = add i64 %count_reg.016.i, -1 + %cmp.i.not.i = icmp eq i64 %sub.i.i, 0 + br i1 %cmp.i.not.i, label %while.cond.while.end_crit_edge.i, label %while.body.i + +while.cond.while.end_crit_edge.i: ; preds = %while.body.i + %17 = add i64 %rdi.i.promoted.i, %15 + store i64 %17, ptr %rdi.i.i, align 8, !tbaa !32 + store i64 0, ptr %rcx.i, align 8, !tbaa !5 + br label %_ZN12_GLOBAL__N_111DoREP_STOSQEP6MemoryR5State.exit + +_ZN12_GLOBAL__N_111DoREP_STOSQEP6MemoryR5State.exit: ; preds = %while.cond.while.end_crit_edge.i, %0 + %18 = add i64 %program_counter, 19 + %EDX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 7, i32 0, i32 0, !remill_register !50 + %R8 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 17, i32 0, i32 0, !remill_register !40 + store i64 %18, ptr %PC, align 8 + %19 = add i64 %program_counter, 24 + %20 = add i64 %9, 56 + %21 = inttoptr i64 %20 to ptr + %22 = load i64, ptr %21, align 8 + store i64 %22, ptr %EAX, align 8, !tbaa !5 + store i64 %19, ptr %PC, align 8 + %23 = add i64 %program_counter, 29 + %24 = add i64 %9, 48 + %25 = inttoptr i64 %24 to ptr + %26 = load i64, ptr %25, align 8 + store i64 %26, ptr %ECX, align 8, !tbaa !5 + store i64 %23, ptr %PC, align 8 + %27 = shl i64 %26, 3 + %28 = add i64 %9, %27 + %29 = add i64 %28, 120 + %30 = inttoptr i64 %29 to ptr + %31 = load i64, ptr %30, align 8 + %or.i.i = or i64 %31, %22 + store i64 %or.i.i, ptr %30, align 8 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i5 = trunc i64 %or.i.i to i8 + %32 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i5), !range !26 + %33 = and i8 %32, 1 + %34 = xor i8 %33, 1 + store i8 %34, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i7 = icmp eq i64 %or.i.i, 0 + %conv3.i.i9 = zext i1 %cmp.i.i.i7 to i8 + store i8 %conv3.i.i9, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i11 = icmp slt i64 %or.i.i, 0 + %conv6.i.i13 = zext i1 %cmp.i19.i.i11 to i8 + store i8 %conv6.i.i13, ptr %sf.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + %35 = add i64 %9, 96 + %36 = inttoptr i64 %35 to ptr + store i64 0, ptr %36, align 8 + %37 = add i64 %program_counter, 52 + %38 = add i64 %9, 104 + %39 = inttoptr i64 %38 to ptr + store i64 50, ptr %39, align 8 + store i64 %37, ptr %PC, align 8 + %40 = add i64 %program_counter, 56 + %41 = add i64 %9, 28 + %42 = inttoptr i64 %41 to ptr + %43 = load i32, ptr %42, align 4 + %conv.i.i21 = zext i32 %43 to i64 + store i64 %conv.i.i21, ptr %RDI, align 8, !tbaa !5 + store i64 %40, ptr %PC, align 8 + %44 = add i64 %program_counter, 58 + %45 = load i64, ptr %EDX, align 8 + %46 = load i32, ptr %EDX, align 4 + %conv.i.i22 = trunc i64 %45 to i32 + %xor3.i.i23 = xor i32 %46, %conv.i.i22 + %conv.i27.i24 = zext i32 %xor3.i.i23 to i64 + store i64 %conv.i27.i24, ptr %EDX, align 8, !tbaa !5 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i26 = trunc i32 %xor3.i.i23 to i8 + %47 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i26), !range !26 + %48 = and i8 %47, 1 + %49 = xor i8 %48, 1 + store i8 %49, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i28 = icmp eq i32 %xor3.i.i23, 0 + %conv3.i.i30 = zext i1 %cmp.i.i.i28 to i8 + store i8 %conv3.i.i30, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i32 = icmp slt i32 %xor3.i.i23, 0 + %conv6.i.i34 = zext i1 %cmp.i19.i.i32 to i8 + store i8 %conv6.i.i34, ptr %sf.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %44, ptr %PC, align 8 + %50 = add i64 %program_counter, 60 + %51 = load i32, ptr %ECX, align 4 + %conv.i.i39 = trunc i64 %26 to i32 + %xor3.i.i40 = xor i32 %51, %conv.i.i39 + %conv.i27.i41 = zext i32 %xor3.i.i40 to i64 + store i64 %conv.i27.i41, ptr %ECX, align 8, !tbaa !5 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i43 = trunc i32 %xor3.i.i40 to i8 + %52 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i43), !range !26 + %53 = and i8 %52, 1 + %54 = xor i8 %53, 1 + store i8 %54, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i45 = icmp eq i32 %xor3.i.i40, 0 + %conv3.i.i47 = zext i1 %cmp.i.i.i45 to i8 + store i8 %conv3.i.i47, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i49 = icmp slt i32 %xor3.i.i40, 0 + %conv6.i.i51 = zext i1 %cmp.i19.i.i49 to i8 + store i8 %conv6.i.i51, ptr %sf.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %50, ptr %PC, align 8 + %55 = add i64 %program_counter, 65 + store i64 %35, ptr %R8, align 8, !tbaa !5 + store i64 %55, ptr %PC, align 8 + %56 = add i64 %program_counter, 70 + %57 = add i64 %program_counter, -736 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %58 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i56 = add i64 %58, -8 + %59 = inttoptr i64 %sub.i.i56 to ptr + store i64 %56, ptr %59, align 8 + store i64 %sub.i.i56, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %57, ptr %rip.i, align 8, !tbaa !5 + %60 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %56, ptr %PC, align 8 + %61 = add i64 %program_counter, 72 + %62 = load i32, ptr %EAX, align 4 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i59 = trunc i32 %62 to i8 + %63 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i59), !range !26 + %64 = and i8 %63, 1 + %65 = xor i8 %64, 1 + store i8 %65, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i61 = icmp eq i32 %62, 0 + %conv3.i.i63 = zext i1 %cmp.i.i.i61 to i8 + store i8 %conv3.i.i63, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i65 = icmp slt i32 %62, 0 + %conv6.i.i67 = zext i1 %cmp.i19.i.i65 to i8 + store i8 %conv6.i.i67, ptr %sf.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %61, ptr %PC, align 8 + %66 = or i1 %cmp.i.i.i61, %cmp.i19.i.i65 + %cond1.i.i.v = select i1 %66, i64 309, i64 78 + %cond1.i.i = add i64 %cond1.i.i.v, %program_counter + store i64 %cond1.i.i, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: mustprogress noduplicate nofree noinline nosync nounwind optnone readnone willreturn +declare zeroext i1 @__remill_compare_sle(i1 noundef zeroext) local_unnamed_addr #2 + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199024(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %EDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !53 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %RBX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 3, i32 0, i32 0, !remill_register !2 + %R12 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 25, i32 0, i32 0, !remill_register !38 + %R13 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 27, i32 0, i32 0, !remill_register !37 + %R14 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 29, i32 0, i32 0, !remill_register !35 + %R15 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 31, i32 0, i32 0, !remill_register !36 + %RBP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 15, i32 0, i32 0, !remill_register !33 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 1 + %2 = load i64, ptr %RBP, align 8 + %rsp.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %3 = load i64, ptr %rsp.i.i, align 8, !tbaa !32 + %sub.i.i.i = add i64 %3, -8 + %4 = inttoptr i64 %sub.i.i.i to ptr + store i64 %2, ptr %4, align 8 + store i64 %sub.i.i.i, ptr %rsp.i.i, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %5 = add i64 %program_counter, 3 + %6 = load i64, ptr %R15, align 8 + %sub.i.i.i2 = add i64 %3, -16 + %7 = inttoptr i64 %sub.i.i.i2 to ptr + store i64 %6, ptr %7, align 8 + store i64 %sub.i.i.i2, ptr %rsp.i.i, align 8, !tbaa !5 + store i64 %5, ptr %PC, align 8 + %8 = add i64 %program_counter, 5 + %9 = load i64, ptr %R14, align 8 + %sub.i.i.i5 = add i64 %3, -24 + %10 = inttoptr i64 %sub.i.i.i5 to ptr + store i64 %9, ptr %10, align 8 + store i64 %sub.i.i.i5, ptr %rsp.i.i, align 8, !tbaa !5 + store i64 %8, ptr %PC, align 8 + %11 = add i64 %program_counter, 7 + %12 = load i64, ptr %R13, align 8 + %sub.i.i.i8 = add i64 %3, -32 + %13 = inttoptr i64 %sub.i.i.i8 to ptr + store i64 %12, ptr %13, align 8 + store i64 %sub.i.i.i8, ptr %rsp.i.i, align 8, !tbaa !5 + store i64 %11, ptr %PC, align 8 + %14 = add i64 %program_counter, 9 + %15 = load i64, ptr %R12, align 8 + %sub.i.i.i11 = add i64 %3, -40 + %16 = inttoptr i64 %sub.i.i.i11 to ptr + store i64 %15, ptr %16, align 8 + store i64 %sub.i.i.i11, ptr %rsp.i.i, align 8, !tbaa !5 + store i64 %14, ptr %PC, align 8 + %17 = add i64 %program_counter, 10 + %18 = load i64, ptr %RBX, align 8 + %sub.i.i.i14 = add i64 %3, -48 + %19 = inttoptr i64 %sub.i.i.i14 to ptr + store i64 %18, ptr %19, align 8 + store i64 %sub.i.i.i14, ptr %rsp.i.i, align 8, !tbaa !5 + store i64 %17, ptr %PC, align 8 + %20 = add i64 %program_counter, 17 + %21 = load i64, ptr %RSP, align 8 + %sub.i.i = add i64 %21, -248 + store i64 %sub.i.i, ptr %RSP, align 8, !tbaa !5 + %cmp.i.i.i = icmp ult i64 %21, 248 + %conv.i.i = zext i1 %cmp.i.i.i to i8 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 %conv.i.i, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i.i = trunc i64 %sub.i.i to i8 + %22 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i.i), !range !26 + %23 = and i8 %22, 1 + %24 = xor i8 %23, 1 + %pf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %24, ptr %pf.i.i.i, align 1, !tbaa !27 + %25 = xor i64 %21, %sub.i.i + %26 = trunc i64 %25 to i8 + %27 = xor i8 %26, -1 + %28 = lshr i8 %27, 4 + %29 = and i8 %28, 1 + %af.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 %29, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i = icmp eq i64 %21, 248 + %conv5.i.i.i = zext i1 %cmp.i.i.i.i to i8 + %zf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv5.i.i.i, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i = icmp slt i64 %sub.i.i, 0 + %conv8.i.i.i = zext i1 %cmp.i27.i.i.i to i8 + %sf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv8.i.i.i, ptr %sf.i.i.i, align 1, !tbaa !29 + %shr.i.i.i.i = lshr i64 %21, 63 + %shr2.i.i.i.i = lshr i64 %sub.i.i, 63 + %xor3.i.i.i.i = xor i64 %shr2.i.i.i.i, %shr.i.i.i.i + %add.i.i.i.i = add nuw nsw i64 %xor3.i.i.i.i, %shr.i.i.i.i + %cmp.i29.i.i.i = icmp eq i64 %add.i.i.i.i, 2 + %conv11.i.i.i = zext i1 %cmp.i29.i.i.i to i8 + %of.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 %conv11.i.i.i, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %20, ptr %PC, align 8 + %30 = add i64 %program_counter, 20 + %31 = load i64, ptr %RSI, align 8 + store i64 %31, ptr %RBX, align 8, !tbaa !5 + store i64 %30, ptr %PC, align 8 + %32 = add i64 %program_counter, 23 + %33 = load i32, ptr %EDI, align 4 + %sub.i.i17 = add i32 %33, -1 + %cmp.i.i.i18 = icmp eq i32 %33, 0 + %conv.i12.i = zext i1 %cmp.i.i.i18 to i8 + store i8 %conv.i12.i, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i.i21 = trunc i32 %sub.i.i17 to i8 + %34 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i.i21), !range !26 + %35 = and i8 %34, 1 + %36 = xor i8 %35, 1 + store i8 %36, ptr %pf.i.i.i, align 1, !tbaa !27 + %37 = xor i32 %33, %sub.i.i17 + %38 = trunc i32 %37 to i8 + %39 = lshr i8 %38, 4 + %40 = and i8 %39, 1 + store i8 %40, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i26 = icmp eq i32 %33, 1 + %conv5.i.i.i28 = zext i1 %cmp.i.i.i.i26 to i8 + store i8 %conv5.i.i.i28, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i30 = icmp slt i32 %sub.i.i17, 0 + %conv8.i.i.i32 = zext i1 %cmp.i27.i.i.i30 to i8 + store i8 %conv8.i.i.i32, ptr %sf.i.i.i, align 1, !tbaa !29 + %shr.i.i.i.i34 = lshr i32 %33, 31 + %shr2.i.i.i.i35 = lshr i32 %sub.i.i17, 31 + %xor3.i.i.i.i36 = xor i32 %shr2.i.i.i.i35, %shr.i.i.i.i34 + %add.i.i.i.i37 = add nuw nsw i32 %xor3.i.i.i.i36, %shr.i.i.i.i34 + %cmp.i29.i.i.i38 = icmp eq i32 %add.i.i.i.i37, 2 + %conv11.i.i.i40 = zext i1 %cmp.i29.i.i.i38 to i8 + store i8 %conv11.i.i.i40, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %32, ptr %PC, align 8 + %41 = xor i1 %cmp.i27.i.i.i30, %cmp.i29.i.i.i38 + %.demorgan = or i1 %cmp.i.i.i.i26, %41 + %42 = xor i1 %.demorgan, true + %cond1.i.i.v = select i1 %42, i64 50, i64 25 + %cond1.i.i = add i64 %cond1.i.i.v, %program_counter + store i64 %cond1.i.i, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: mustprogress noduplicate nofree noinline nosync nounwind optnone readnone willreturn +declare zeroext i1 @__remill_compare_sgt(i1 noundef zeroext) local_unnamed_addr #2 + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199184(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %EAX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !0 + %RBP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 15, i32 0, i32 0, !remill_register !33 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %R14 = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 29, i32 0, i32 0, !remill_register !35 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + store i64 4202692, ptr %RDI, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %2 = add i64 %program_counter, 10 + %3 = add i64 %program_counter, -720 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %4 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %4, -8 + %5 = inttoptr i64 %sub.i.i to ptr + store i64 %2, ptr %5, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %3, ptr %rip.i, align 8, !tbaa !5 + %6 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %2, ptr %PC, align 8 + %7 = add i64 %program_counter, 15 + store i64 3000, ptr %RDI, align 8, !tbaa !5 + store i64 %7, ptr %PC, align 8 + %8 = add i64 %program_counter, 20 + %9 = add i64 %program_counter, -432 + %10 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i2 = add i64 %10, -8 + %11 = inttoptr i64 %sub.i.i2 to ptr + store i64 %8, ptr %11, align 8 + store i64 %sub.i.i2, ptr %rsp.i, align 8, !tbaa !5 + store i64 %9, ptr %rip.i, align 8, !tbaa !5 + %12 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %8, ptr %PC, align 8 + %13 = add i64 %program_counter, 23 + %14 = load i64, ptr %R14, align 8 + store i64 %14, ptr %RDI, align 8, !tbaa !5 + store i64 %13, ptr %PC, align 8 + %15 = add i64 %program_counter, 26 + %16 = load i64, ptr %RBP, align 8 + store i64 %16, ptr %RSI, align 8, !tbaa !5 + store i64 %15, ptr %PC, align 8 + %17 = add i64 %program_counter, 31 + %18 = add i64 %program_counter, 768 + %19 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i6 = add i64 %19, -8 + %20 = inttoptr i64 %sub.i.i6 to ptr + store i64 %17, ptr %20, align 8 + store i64 %sub.i.i6, ptr %rsp.i, align 8, !tbaa !5 + store i64 %18, ptr %rip.i, align 8, !tbaa !5 + %21 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %17, ptr %PC, align 8 + %22 = add i64 %program_counter, 33 + %23 = load i32, ptr %EAX, align 4 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i = trunc i32 %23 to i8 + %24 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i), !range !26 + %25 = and i8 %24, 1 + %26 = xor i8 %25, 1 + %pf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %26, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i = icmp eq i32 %23, 0 + %conv3.i.i = zext i1 %cmp.i.i.i to i8 + %zf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv3.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i = icmp slt i32 %23, 0 + %conv6.i.i = zext i1 %cmp.i19.i.i to i8 + %sf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv6.i.i, ptr %sf.i.i, align 1, !tbaa !29 + %of.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + %af.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %22, ptr %PC, align 8 + %27 = add i64 %program_counter, 35 + %tobool.not.i = xor i1 %cmp.i.i.i, true + %cond1.i.i = select i1 %tobool.not.i, i64 %program_counter, i64 %27 + store i64 %cond1.i.i, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199248(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %RBX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 3, i32 0, i32 0, !remill_register !2 + %RDX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 7, i32 0, i32 0, !remill_register !42 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + store i64 4202578, ptr %RSI, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %2 = add i64 %program_counter, 7 + %3 = load i32, ptr %RBX, align 4 + %4 = zext i32 %3 to i64 + store i64 %4, ptr %RDI, align 8, !tbaa !5 + store i64 %2, ptr %PC, align 8 + %5 = add i64 %program_counter, 12 + store i64 5, ptr %RDX, align 8, !tbaa !5 + store i64 %5, ptr %PC, align 8 + %6 = add i64 %program_counter, 17 + %7 = add i64 %program_counter, 1824 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %8 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %8, -8 + %9 = inttoptr i64 %sub.i.i to ptr + store i64 %6, ptr %9, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %7, ptr %rip.i, align 8, !tbaa !5 + %10 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %6, ptr %PC, align 8 + %11 = add i64 %program_counter, 19 + %12 = load i32, ptr %RBX, align 4 + %13 = zext i32 %12 to i64 + store i64 %13, ptr %RDI, align 8, !tbaa !5 + store i64 %11, ptr %PC, align 8 + %14 = add i64 %program_counter, 24 + store i64 10, ptr %RSI, align 8, !tbaa !5 + store i64 %14, ptr %PC, align 8 + %15 = add i64 %program_counter, 29 + %16 = add i64 %program_counter, 1952 + %17 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i3 = add i64 %17, -8 + %18 = inttoptr i64 %sub.i.i3 to ptr + store i64 %15, ptr %18, align 8 + store i64 %sub.i.i3, ptr %rsp.i, align 8, !tbaa !5 + store i64 %16, ptr %rip.i, align 8, !tbaa !5 + %19 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %15, ptr %PC, align 8 + %20 = add i64 %program_counter, 31 + %21 = load i32, ptr %RBX, align 4 + %22 = zext i32 %21 to i64 + store i64 %22, ptr %RDI, align 8, !tbaa !5 + store i64 %20, ptr %PC, align 8 + %23 = add i64 %program_counter, 36 + store i64 1, ptr %RSI, align 8, !tbaa !5 + store i64 %23, ptr %PC, align 8 + %24 = add i64 %program_counter, 41 + %25 = add i64 %program_counter, 1488 + %26 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i8 = add i64 %26, -8 + %27 = inttoptr i64 %sub.i.i8 to ptr + store i64 %24, ptr %27, align 8 + store i64 %sub.i.i8, ptr %rsp.i, align 8, !tbaa !5 + store i64 %25, ptr %rip.i, align 8, !tbaa !5 + %28 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %24, ptr %PC, align 8 + %29 = add i64 %program_counter, 44 + %30 = load i64, ptr %RBX, align 8 + %conv.i.i = trunc i64 %30 to i32 + %add.i.i = add i32 %conv.i.i, 1 + %conv.i22.i = zext i32 %add.i.i to i64 + store i64 %conv.i22.i, ptr %RBX, align 8, !tbaa !5 + %31 = icmp eq i32 %conv.i.i, -1 + %conv.i23.i = zext i1 %31 to i8 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 %conv.i23.i, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i.i = trunc i32 %add.i.i to i8 + %32 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i.i), !range !26 + %33 = and i8 %32, 1 + %34 = xor i8 %33, 1 + %pf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %34, ptr %pf.i.i.i, align 1, !tbaa !27 + %35 = xor i32 %add.i.i, %conv.i.i + %36 = trunc i32 %35 to i8 + %37 = lshr i8 %36, 4 + %38 = and i8 %37, 1 + %af.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 %38, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i = icmp eq i32 %add.i.i, 0 + %conv5.i.i.i = zext i1 %cmp.i.i.i.i to i8 + %zf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv5.i.i.i, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i = icmp slt i32 %add.i.i, 0 + %conv8.i.i.i = zext i1 %cmp.i27.i.i.i to i8 + %sf.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv8.i.i.i, ptr %sf.i.i.i, align 1, !tbaa !29 + %shr.i.i.i.i = lshr i32 %conv.i.i, 31 + %shr2.i.i.i.i = lshr i32 %add.i.i, 31 + %xor.i28.i.i.i = xor i32 %shr2.i.i.i.i, %shr.i.i.i.i + %add.i.i.i.i = add nuw nsw i32 %xor.i28.i.i.i, %shr2.i.i.i.i + %cmp.i29.i.i.i = icmp eq i32 %add.i.i.i.i, 2 + %conv11.i.i.i = zext i1 %cmp.i29.i.i.i to i8 + %of.i.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 %conv11.i.i.i, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %29, ptr %PC, align 8 + %39 = add i64 %program_counter, 47 + %40 = load i32, ptr %RBX, align 4 + %sub.i.i11 = add i32 %40, -5 + %cmp.i.i.i12 = icmp ult i32 %40, 5 + %conv.i12.i = zext i1 %cmp.i.i.i12 to i8 + store i8 %conv.i12.i, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i.i15 = trunc i32 %sub.i.i11 to i8 + %41 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i.i15), !range !26 + %42 = and i8 %41, 1 + %43 = xor i8 %42, 1 + store i8 %43, ptr %pf.i.i.i, align 1, !tbaa !27 + %44 = xor i32 %40, %sub.i.i11 + %45 = trunc i32 %44 to i8 + %46 = lshr i8 %45, 4 + %47 = and i8 %46, 1 + store i8 %47, ptr %af.i.i.i, align 1, !tbaa !31 + %cmp.i.i.i.i20 = icmp eq i32 %40, 5 + %conv5.i.i.i22 = zext i1 %cmp.i.i.i.i20 to i8 + store i8 %conv5.i.i.i22, ptr %zf.i.i.i, align 1, !tbaa !28 + %cmp.i27.i.i.i24 = icmp slt i32 %sub.i.i11, 0 + %conv8.i.i.i26 = zext i1 %cmp.i27.i.i.i24 to i8 + store i8 %conv8.i.i.i26, ptr %sf.i.i.i, align 1, !tbaa !29 + %shr.i.i.i.i28 = lshr i32 %40, 31 + %shr2.i.i.i.i29 = lshr i32 %sub.i.i11, 31 + %xor3.i.i.i.i = xor i32 %shr2.i.i.i.i29, %shr.i.i.i.i28 + %add.i.i.i.i30 = add nuw nsw i32 %xor3.i.i.i.i, %shr.i.i.i.i28 + %cmp.i29.i.i.i31 = icmp eq i32 %add.i.i.i.i30, 2 + %conv11.i.i.i33 = zext i1 %cmp.i29.i.i.i31 to i8 + store i8 %conv11.i.i.i33, ptr %of.i.i.i, align 1, !tbaa !30 + store i64 %39, ptr %PC, align 8 + %48 = add i64 %program_counter, 49 + %cond1.i.i = select i1 %cmp.i.i.i.i20, i64 %48, i64 %program_counter + store i64 %cond1.i.i, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199074(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %EAX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !0 + %RDX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 7, i32 0, i32 0, !remill_register !42 + %RBP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 15, i32 0, i32 0, !remill_register !33 + %RBX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 3, i32 0, i32 0, !remill_register !2 + %RCX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 5, i32 0, i32 0, !remill_register !41 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + store i64 4202554, ptr %RDI, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %2 = add i64 %program_counter, 10 + store i64 4202573, ptr %RSI, align 8, !tbaa !5 + store i64 %2, ptr %PC, align 8 + %3 = add i64 %program_counter, 15 + %4 = add i64 %program_counter, -354 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %5 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %5, -8 + %6 = inttoptr i64 %sub.i.i to ptr + store i64 %3, ptr %6, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %4, ptr %rip.i, align 8, !tbaa !5 + %7 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %3, ptr %PC, align 8 + %8 = add i64 %program_counter, 20 + %9 = load i64, ptr %RSP, align 8 + %10 = add i64 %9, 40 + %11 = load i64, ptr %EAX, align 8 + %12 = inttoptr i64 %10 to ptr + store i64 %11, ptr %12, align 8 + store i64 %8, ptr %PC, align 8 + %13 = add i64 %program_counter, 24 + %14 = load i64, ptr %RBX, align 8 + %15 = add i64 %14, 8 + %16 = inttoptr i64 %15 to ptr + %17 = load i64, ptr %16, align 8 + store i64 %17, ptr %RCX, align 8, !tbaa !5 + store i64 %13, ptr %PC, align 8 + %18 = add i64 %program_counter, 29 + %19 = add i64 %9, 86 + store i64 %19, ptr %RBP, align 8, !tbaa !5 + store i64 %18, ptr %PC, align 8 + %20 = add i64 %program_counter, 34 + store i64 10, ptr %RSI, align 8, !tbaa !5 + store i64 %20, ptr %PC, align 8 + %21 = add i64 %program_counter, 39 + store i64 4202575, ptr %RDX, align 8, !tbaa !5 + store i64 %21, ptr %PC, align 8 + %22 = add i64 %program_counter, 42 + store i64 %19, ptr %RDI, align 8, !tbaa !5 + store i64 %22, ptr %PC, align 8 + %23 = add i64 %program_counter, 44 + %24 = load i32, ptr %EAX, align 4 + %conv.i.i = trunc i64 %11 to i32 + %xor3.i.i = xor i32 %24, %conv.i.i + %conv.i27.i = zext i32 %xor3.i.i to i64 + store i64 %conv.i27.i, ptr %EAX, align 8, !tbaa !5 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i = trunc i32 %xor3.i.i to i8 + %25 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i), !range !26 + %26 = and i8 %25, 1 + %27 = xor i8 %26, 1 + %pf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %27, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i = icmp eq i32 %xor3.i.i, 0 + %conv3.i.i = zext i1 %cmp.i.i.i to i8 + %zf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv3.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i = icmp slt i32 %xor3.i.i, 0 + %conv6.i.i = zext i1 %cmp.i19.i.i to i8 + %sf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv6.i.i, ptr %sf.i.i, align 1, !tbaa !29 + %of.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + %af.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %23, ptr %PC, align 8 + %28 = add i64 %program_counter, 49 + %29 = add i64 %program_counter, -514 + %30 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i4 = add i64 %30, -8 + %31 = inttoptr i64 %sub.i.i4 to ptr + store i64 %28, ptr %31, align 8 + store i64 %sub.i.i4, ptr %rsp.i, align 8, !tbaa !5 + store i64 %29, ptr %rip.i, align 8, !tbaa !5 + %32 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %28, ptr %PC, align 8 + %33 = add i64 %program_counter, 54 + store i64 1, ptr %RBX, align 8, !tbaa !5 + store i64 %33, ptr %PC, align 8 + %34 = add i64 %program_counter, 59 + store i64 29, ptr %RDI, align 8, !tbaa !5 + store i64 %34, ptr %PC, align 8 + %35 = add i64 %program_counter, 64 + store i64 3, ptr %RSI, align 8, !tbaa !5 + store i64 %35, ptr %PC, align 8 + %36 = add i64 %program_counter, 69 + store i64 1, ptr %RDX, align 8, !tbaa !5 + store i64 %36, ptr %PC, align 8 + %37 = add i64 %program_counter, 74 + %38 = add i64 %program_counter, -306 + %39 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i8 = add i64 %39, -8 + %40 = inttoptr i64 %sub.i.i8 to ptr + store i64 %37, ptr %40, align 8 + store i64 %sub.i.i8, ptr %rsp.i, align 8, !tbaa !5 + store i64 %38, ptr %rip.i, align 8, !tbaa !5 + %41 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %37, ptr %PC, align 8 + %42 = add i64 %program_counter, 79 + %43 = load i64, ptr %RSP, align 8 + %44 = add i64 %43, 32 + %45 = load i64, ptr %EAX, align 8 + %46 = inttoptr i64 %44 to ptr + store i64 %45, ptr %46, align 8 + store i64 %42, ptr %PC, align 8 + %47 = add i64 %program_counter, 83 + %48 = add i64 %43, 24 + %49 = load i32, ptr %EAX, align 4 + %50 = inttoptr i64 %48 to ptr + store i32 %49, ptr %50, align 4 + store i64 %47, ptr %PC, align 8 + %51 = add i64 %program_counter, 88 + store i64 %48, ptr %RDI, align 8, !tbaa !5 + store i64 %51, ptr %PC, align 8 + %52 = add i64 %program_counter, 91 + %53 = load i64, ptr %RBP, align 8 + store i64 %53, ptr %RSI, align 8, !tbaa !5 + store i64 %52, ptr %PC, align 8 + %54 = add i64 %program_counter, 96 + %55 = add i64 %program_counter, 878 + %56 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i14 = add i64 %56, -8 + %57 = inttoptr i64 %sub.i.i14 to ptr + store i64 %54, ptr %57, align 8 + store i64 %sub.i.i14, ptr %rsp.i, align 8, !tbaa !5 + store i64 %55, ptr %rip.i, align 8, !tbaa !5 + %58 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %54, ptr %PC, align 8 + %59 = add i64 %program_counter, 98 + %60 = load i32, ptr %EAX, align 4 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i18 = trunc i32 %60 to i8 + %61 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i18), !range !26 + %62 = and i8 %61, 1 + %63 = xor i8 %62, 1 + store i8 %63, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i20 = icmp eq i32 %60, 0 + %conv3.i.i22 = zext i1 %cmp.i.i.i20 to i8 + store i8 %conv3.i.i22, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i24 = icmp slt i32 %60, 0 + %conv6.i.i26 = zext i1 %cmp.i19.i.i24 to i8 + store i8 %conv6.i.i26, ptr %sf.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %59, ptr %PC, align 8 + %cond1.i.i.v = select i1 %cmp.i.i.i20, i64 145, i64 100 + %cond1.i.i = add i64 %cond1.i.i.v, %program_counter + store i64 %cond1.i.i, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199470(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %RAX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !44 + %RSI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !remill_register !3 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %RDX = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 7, i32 0, i32 0, !remill_register !42 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + store i64 16, ptr %RDX, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %2 = add i64 %program_counter, 10 + %3 = load i64, ptr %RSP, align 8 + %4 = add i64 %3, 32 + %5 = inttoptr i64 %4 to ptr + %6 = load i64, ptr %5, align 8 + store i64 %6, ptr %RDI, align 8, !tbaa !5 + store i64 %2, ptr %PC, align 8 + %7 = add i64 %program_counter, 13 + store i64 %3, ptr %RSI, align 8, !tbaa !5 + store i64 %7, ptr %PC, align 8 + %8 = add i64 %program_counter, 18 + %9 = add i64 %program_counter, -862 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %10 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %10, -8 + %11 = inttoptr i64 %sub.i.i to ptr + store i64 %8, ptr %11, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %9, ptr %rip.i, align 8, !tbaa !5 + %12 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %8, ptr %PC, align 8 + %13 = add i64 %program_counter, 21 + %14 = load i64, ptr %RAX, align 8 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i = trunc i64 %14 to i8 + %15 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i), !range !26 + %16 = and i8 %15, 1 + %17 = xor i8 %16, 1 + %pf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %17, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i = icmp eq i64 %14, 0 + %conv3.i.i = zext i1 %cmp.i.i.i to i8 + %zf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv3.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i = icmp slt i64 %14, 0 + %conv6.i.i = zext i1 %cmp.i19.i.i to i8 + %sf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv6.i.i, ptr %sf.i.i, align 1, !tbaa !29 + %of.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + %af.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %13, ptr %PC, align 8 + %cond1.i.i.v = select i1 %cmp.i19.i.i, i64 420, i64 27 + %cond1.i.i = add i64 %cond1.i.i.v, %program_counter + store i64 %cond1.i.i, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define internal fastcc ptr @basic_block_func4199890(ptr %state, i64 %program_counter, ptr %memory, ptr %next_pc_out) unnamed_addr #0 { + %AL = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !remill_register !39 + %RSP = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !remill_register !34 + %RDI = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !remill_register !1 + %PC = getelementptr inbounds %struct.State, ptr %state, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !remill_register !4 + store i64 %program_counter, ptr %PC, align 8 + %1 = add i64 %program_counter, 5 + store i64 4202583, ptr %RDI, align 8, !tbaa !5 + store i64 %1, ptr %PC, align 8 + %2 = add i64 %program_counter, 10 + %3 = add i64 %program_counter, -1154 + %rsp.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 13 + %4 = load i64, ptr %rsp.i, align 8, !tbaa !32 + %sub.i.i = add i64 %4, -8 + %5 = inttoptr i64 %sub.i.i to ptr + store i64 %2, ptr %5, align 8 + store i64 %sub.i.i, ptr %rsp.i, align 8, !tbaa !5 + %rip.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 6, i32 33 + store i64 %3, ptr %rip.i, align 8, !tbaa !5 + %6 = call ptr @__remill_function_call(ptr %state, i64 %program_counter, ptr %memory) + store i64 %2, ptr %PC, align 8 + %7 = add i64 %program_counter, 18 + %8 = load i64, ptr %RSP, align 8 + %9 = add i64 %8, 20 + %10 = inttoptr i64 %9 to ptr + store i32 1, ptr %10, align 4 + store i64 %7, ptr %PC, align 8 + %11 = add i64 %program_counter, 20 + %12 = load i64, ptr %AL, align 8 + %13 = load i32, ptr %AL, align 4 + %conv.i.i = trunc i64 %12 to i32 + %xor3.i.i = xor i32 %13, %conv.i.i + %conv.i27.i = zext i32 %xor3.i.i to i64 + store i64 %conv.i27.i, ptr %AL, align 8, !tbaa !5 + %cf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %conv.i.i.i = trunc i32 %xor3.i.i to i8 + %14 = call i8 @llvm.ctpop.i8(i8 %conv.i.i.i), !range !26 + %15 = and i8 %14, 1 + %16 = xor i8 %15, 1 + %pf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 3 + store i8 %16, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i = icmp eq i32 %xor3.i.i, 0 + %conv3.i.i = zext i1 %cmp.i.i.i to i8 + %zf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 7 + store i8 %conv3.i.i, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i = icmp slt i32 %xor3.i.i, 0 + %conv6.i.i = zext i1 %cmp.i19.i.i to i8 + %sf.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 9 + store i8 %conv6.i.i, ptr %sf.i.i, align 1, !tbaa !29 + %of.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 13 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + %af.i.i = getelementptr inbounds %struct.X86State, ptr %state, i64 0, i32 2, i32 5 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %11, ptr %PC, align 8 + %17 = add i64 %program_counter, 22 + %18 = load i8, ptr %AL, align 1 + store i8 0, ptr %cf.i.i, align 1, !tbaa !9 + %19 = call i8 @llvm.ctpop.i8(i8 %18), !range !26 + %20 = and i8 %19, 1 + %21 = xor i8 %20, 1 + store i8 %21, ptr %pf.i.i, align 1, !tbaa !27 + %cmp.i.i.i5 = icmp eq i8 %18, 0 + %conv3.i.i7 = zext i1 %cmp.i.i.i5 to i8 + store i8 %conv3.i.i7, ptr %zf.i.i, align 1, !tbaa !28 + %cmp.i19.i.i9 = icmp slt i8 %18, 0 + %conv6.i.i10 = zext i1 %cmp.i19.i.i9 to i8 + store i8 %conv6.i.i10, ptr %sf.i.i, align 1, !tbaa !29 + store i8 0, ptr %of.i.i, align 1, !tbaa !30 + store i8 undef, ptr %af.i.i, align 1, !tbaa !31 + store i64 %17, ptr %PC, align 8 + %tobool.not.i = xor i1 %cmp.i.i.i5, true + %cond1.i.i.v = select i1 %tobool.not.i, i64 -498, i64 28 + %cond1.i.i = add i64 %cond1.i.i.v, %program_counter + store i64 %cond1.i.i, ptr %next_pc_out, align 8 + ret ptr %memory +} + +; Function Attrs: noinline +define x86_stdcallcc i32 @sub_401270__AI_SI_B_64(i32 %0, ptr %1) local_unnamed_addr #0 !pc !54 { + %return_address = call ptr @llvm.returnaddress(i32 0), !pc !54 + %3 = ptrtoint ptr %return_address to i64, !pc !54 + %return_address_loc = alloca i64, align 8, !pc !54, !stack_offset !55 + %4 = ptrtoint ptr %return_address_loc to i64, !pc !54, !stack_offset !55 + %5 = load i64, ptr @__anvill_stack_0, align 8, !pc !54 + store i64 %5, ptr %return_address_loc, align 8, !pc !54 + %6 = alloca i64, align 8, !pc !54 + %7 = alloca %struct.State, align 8, !pc !54 + store i32 0, ptr %7, align 8, !pc !54 + %8 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 0, i32 1, !pc !54 + store i32 0, ptr %8, align 4, !pc !54 + %9 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 0, i32 2, i32 0, !pc !54 + store i64 0, ptr %9, align 8, !pc !54 + %10 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 0, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %10, align 8, !pc !54 + %11 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 0, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %11, align 8, !pc !54 + %12 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 0, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %12, align 8, !pc !54 + %13 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 0, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %13, align 8, !pc !54 + %14 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 0, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %14, align 8, !pc !54 + %15 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 0, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %15, align 8, !pc !54 + %16 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 0, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %16, align 8, !pc !54 + %17 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 0, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %17, align 8, !pc !54 + %18 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 1, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %18, align 8, !pc !54 + %19 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 1, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %19, align 8, !pc !54 + %20 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 1, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %20, align 8, !pc !54 + %21 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 1, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %21, align 8, !pc !54 + %22 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 1, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %22, align 8, !pc !54 + %23 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 1, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %23, align 8, !pc !54 + %24 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 1, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %24, align 8, !pc !54 + %25 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 1, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %25, align 8, !pc !54 + %26 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 2, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %26, align 8, !pc !54 + %27 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 2, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %27, align 8, !pc !54 + %28 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 2, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %28, align 8, !pc !54 + %29 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 2, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %29, align 8, !pc !54 + %30 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 2, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %30, align 8, !pc !54 + %31 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 2, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %31, align 8, !pc !54 + %32 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 2, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %32, align 8, !pc !54 + %33 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 2, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %33, align 8, !pc !54 + %34 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 3, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %34, align 8, !pc !54 + %35 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 3, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %35, align 8, !pc !54 + %36 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 3, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %36, align 8, !pc !54 + %37 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 3, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %37, align 8, !pc !54 + %38 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 3, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %38, align 8, !pc !54 + %39 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 3, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %39, align 8, !pc !54 + %40 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 3, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %40, align 8, !pc !54 + %41 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 3, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %41, align 8, !pc !54 + %42 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 4, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %42, align 8, !pc !54 + %43 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 4, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %43, align 8, !pc !54 + %44 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 4, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %44, align 8, !pc !54 + %45 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 4, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %45, align 8, !pc !54 + %46 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 4, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %46, align 8, !pc !54 + %47 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 4, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %47, align 8, !pc !54 + %48 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 4, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %48, align 8, !pc !54 + %49 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 4, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %49, align 8, !pc !54 + %50 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 5, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %50, align 8, !pc !54 + %51 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 5, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %51, align 8, !pc !54 + %52 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 5, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %52, align 8, !pc !54 + %53 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 5, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %53, align 8, !pc !54 + %54 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 5, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %54, align 8, !pc !54 + %55 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 5, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %55, align 8, !pc !54 + %56 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 5, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %56, align 8, !pc !54 + %57 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 5, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %57, align 8, !pc !54 + %58 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 6, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %58, align 8, !pc !54 + %59 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 6, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %59, align 8, !pc !54 + %60 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 6, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %60, align 8, !pc !54 + %61 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 6, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %61, align 8, !pc !54 + %62 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 6, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %62, align 8, !pc !54 + %63 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 6, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %63, align 8, !pc !54 + %64 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 6, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %64, align 8, !pc !54 + %65 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 6, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %65, align 8, !pc !54 + %66 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 7, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %66, align 8, !pc !54 + %67 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 7, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %67, align 8, !pc !54 + %68 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 7, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %68, align 8, !pc !54 + %69 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 7, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %69, align 8, !pc !54 + %70 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 7, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %70, align 8, !pc !54 + %71 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 7, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %71, align 8, !pc !54 + %72 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 7, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %72, align 8, !pc !54 + %73 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 7, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %73, align 8, !pc !54 + %74 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 8, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %74, align 8, !pc !54 + %75 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 8, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %75, align 8, !pc !54 + %76 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 8, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %76, align 8, !pc !54 + %77 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 8, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %77, align 8, !pc !54 + %78 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 8, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %78, align 8, !pc !54 + %79 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 8, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %79, align 8, !pc !54 + %80 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 8, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %80, align 8, !pc !54 + %81 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 8, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %81, align 8, !pc !54 + %82 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 9, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %82, align 8, !pc !54 + %83 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 9, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %83, align 8, !pc !54 + %84 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 9, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %84, align 8, !pc !54 + %85 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 9, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %85, align 8, !pc !54 + %86 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 9, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %86, align 8, !pc !54 + %87 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 9, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %87, align 8, !pc !54 + %88 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 9, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %88, align 8, !pc !54 + %89 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 9, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %89, align 8, !pc !54 + %90 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 10, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %90, align 8, !pc !54 + %91 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 10, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %91, align 8, !pc !54 + %92 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 10, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %92, align 8, !pc !54 + %93 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 10, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %93, align 8, !pc !54 + %94 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 10, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %94, align 8, !pc !54 + %95 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 10, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %95, align 8, !pc !54 + %96 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 10, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %96, align 8, !pc !54 + %97 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 10, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %97, align 8, !pc !54 + %98 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 11, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %98, align 8, !pc !54 + %99 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 11, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %99, align 8, !pc !54 + %100 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 11, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %100, align 8, !pc !54 + %101 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 11, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %101, align 8, !pc !54 + %102 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 11, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %102, align 8, !pc !54 + %103 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 11, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %103, align 8, !pc !54 + %104 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 11, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %104, align 8, !pc !54 + %105 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 11, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %105, align 8, !pc !54 + %106 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 12, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %106, align 8, !pc !54 + %107 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 12, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %107, align 8, !pc !54 + %108 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 12, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %108, align 8, !pc !54 + %109 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 12, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %109, align 8, !pc !54 + %110 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 12, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %110, align 8, !pc !54 + %111 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 12, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %111, align 8, !pc !54 + %112 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 12, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %112, align 8, !pc !54 + %113 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 12, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %113, align 8, !pc !54 + %114 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 13, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %114, align 8, !pc !54 + %115 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 13, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %115, align 8, !pc !54 + %116 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 13, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %116, align 8, !pc !54 + %117 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 13, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %117, align 8, !pc !54 + %118 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 13, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %118, align 8, !pc !54 + %119 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 13, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %119, align 8, !pc !54 + %120 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 13, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %120, align 8, !pc !54 + %121 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 13, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %121, align 8, !pc !54 + %122 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 14, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %122, align 8, !pc !54 + %123 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 14, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %123, align 8, !pc !54 + %124 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 14, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %124, align 8, !pc !54 + %125 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 14, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %125, align 8, !pc !54 + %126 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 14, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %126, align 8, !pc !54 + %127 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 14, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %127, align 8, !pc !54 + %128 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 14, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %128, align 8, !pc !54 + %129 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 14, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %129, align 8, !pc !54 + %130 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 15, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %130, align 8, !pc !54 + %131 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 15, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %131, align 8, !pc !54 + %132 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 15, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %132, align 8, !pc !54 + %133 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 15, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %133, align 8, !pc !54 + %134 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 15, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %134, align 8, !pc !54 + %135 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 15, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %135, align 8, !pc !54 + %136 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 15, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %136, align 8, !pc !54 + %137 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 15, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %137, align 8, !pc !54 + %138 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 16, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %138, align 8, !pc !54 + %139 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 16, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %139, align 8, !pc !54 + %140 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 16, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %140, align 8, !pc !54 + %141 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 16, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %141, align 8, !pc !54 + %142 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 16, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %142, align 8, !pc !54 + %143 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 16, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %143, align 8, !pc !54 + %144 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 16, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %144, align 8, !pc !54 + %145 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 16, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %145, align 8, !pc !54 + %146 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 17, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %146, align 8, !pc !54 + %147 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 17, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %147, align 8, !pc !54 + %148 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 17, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %148, align 8, !pc !54 + %149 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 17, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %149, align 8, !pc !54 + %150 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 17, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %150, align 8, !pc !54 + %151 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 17, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %151, align 8, !pc !54 + %152 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 17, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %152, align 8, !pc !54 + %153 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 17, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %153, align 8, !pc !54 + %154 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 18, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %154, align 8, !pc !54 + %155 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 18, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %155, align 8, !pc !54 + %156 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 18, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %156, align 8, !pc !54 + %157 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 18, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %157, align 8, !pc !54 + %158 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 18, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %158, align 8, !pc !54 + %159 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 18, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %159, align 8, !pc !54 + %160 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 18, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %160, align 8, !pc !54 + %161 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 18, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %161, align 8, !pc !54 + %162 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 19, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %162, align 8, !pc !54 + %163 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 19, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %163, align 8, !pc !54 + %164 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 19, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %164, align 8, !pc !54 + %165 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 19, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %165, align 8, !pc !54 + %166 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 19, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %166, align 8, !pc !54 + %167 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 19, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %167, align 8, !pc !54 + %168 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 19, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %168, align 8, !pc !54 + %169 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 19, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %169, align 8, !pc !54 + %170 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 20, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %170, align 8, !pc !54 + %171 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 20, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %171, align 8, !pc !54 + %172 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 20, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %172, align 8, !pc !54 + %173 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 20, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %173, align 8, !pc !54 + %174 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 20, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %174, align 8, !pc !54 + %175 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 20, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %175, align 8, !pc !54 + %176 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 20, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %176, align 8, !pc !54 + %177 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 20, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %177, align 8, !pc !54 + %178 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 21, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %178, align 8, !pc !54 + %179 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 21, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %179, align 8, !pc !54 + %180 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 21, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %180, align 8, !pc !54 + %181 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 21, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %181, align 8, !pc !54 + %182 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 21, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %182, align 8, !pc !54 + %183 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 21, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %183, align 8, !pc !54 + %184 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 21, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %184, align 8, !pc !54 + %185 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 21, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %185, align 8, !pc !54 + %186 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 22, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %186, align 8, !pc !54 + %187 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 22, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %187, align 8, !pc !54 + %188 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 22, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %188, align 8, !pc !54 + %189 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 22, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %189, align 8, !pc !54 + %190 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 22, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %190, align 8, !pc !54 + %191 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 22, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %191, align 8, !pc !54 + %192 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 22, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %192, align 8, !pc !54 + %193 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 22, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %193, align 8, !pc !54 + %194 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 23, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %194, align 8, !pc !54 + %195 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 23, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %195, align 8, !pc !54 + %196 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 23, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %196, align 8, !pc !54 + %197 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 23, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %197, align 8, !pc !54 + %198 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 23, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %198, align 8, !pc !54 + %199 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 23, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %199, align 8, !pc !54 + %200 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 23, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %200, align 8, !pc !54 + %201 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 23, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %201, align 8, !pc !54 + %202 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 24, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %202, align 8, !pc !54 + %203 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 24, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %203, align 8, !pc !54 + %204 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 24, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %204, align 8, !pc !54 + %205 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 24, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %205, align 8, !pc !54 + %206 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 24, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %206, align 8, !pc !54 + %207 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 24, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %207, align 8, !pc !54 + %208 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 24, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %208, align 8, !pc !54 + %209 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 24, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %209, align 8, !pc !54 + %210 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 25, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %210, align 8, !pc !54 + %211 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 25, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %211, align 8, !pc !54 + %212 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 25, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %212, align 8, !pc !54 + %213 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 25, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %213, align 8, !pc !54 + %214 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 25, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %214, align 8, !pc !54 + %215 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 25, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %215, align 8, !pc !54 + %216 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 25, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %216, align 8, !pc !54 + %217 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 25, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %217, align 8, !pc !54 + %218 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 26, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %218, align 8, !pc !54 + %219 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 26, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %219, align 8, !pc !54 + %220 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 26, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %220, align 8, !pc !54 + %221 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 26, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %221, align 8, !pc !54 + %222 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 26, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %222, align 8, !pc !54 + %223 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 26, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %223, align 8, !pc !54 + %224 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 26, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %224, align 8, !pc !54 + %225 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 26, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %225, align 8, !pc !54 + %226 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 27, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %226, align 8, !pc !54 + %227 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 27, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %227, align 8, !pc !54 + %228 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 27, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %228, align 8, !pc !54 + %229 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 27, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %229, align 8, !pc !54 + %230 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 27, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %230, align 8, !pc !54 + %231 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 27, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %231, align 8, !pc !54 + %232 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 27, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %232, align 8, !pc !54 + %233 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 27, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %233, align 8, !pc !54 + %234 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 28, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %234, align 8, !pc !54 + %235 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 28, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %235, align 8, !pc !54 + %236 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 28, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %236, align 8, !pc !54 + %237 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 28, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %237, align 8, !pc !54 + %238 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 28, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %238, align 8, !pc !54 + %239 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 28, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %239, align 8, !pc !54 + %240 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 28, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %240, align 8, !pc !54 + %241 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 28, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %241, align 8, !pc !54 + %242 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 29, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %242, align 8, !pc !54 + %243 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 29, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %243, align 8, !pc !54 + %244 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 29, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %244, align 8, !pc !54 + %245 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 29, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %245, align 8, !pc !54 + %246 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 29, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %246, align 8, !pc !54 + %247 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 29, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %247, align 8, !pc !54 + %248 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 29, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %248, align 8, !pc !54 + %249 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 29, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %249, align 8, !pc !54 + %250 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 30, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %250, align 8, !pc !54 + %251 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 30, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %251, align 8, !pc !54 + %252 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 30, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %252, align 8, !pc !54 + %253 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 30, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %253, align 8, !pc !54 + %254 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 30, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %254, align 8, !pc !54 + %255 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 30, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %255, align 8, !pc !54 + %256 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 30, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %256, align 8, !pc !54 + %257 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 30, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %257, align 8, !pc !54 + %258 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 31, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %258, align 8, !pc !54 + %259 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 31, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i64 0, ptr %259, align 8, !pc !54 + %260 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 31, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i64 0, ptr %260, align 8, !pc !54 + %261 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 31, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i64 0, ptr %261, align 8, !pc !54 + %262 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 31, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i64 0, ptr %262, align 8, !pc !54 + %263 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 31, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i64 0, ptr %263, align 8, !pc !54 + %264 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 31, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i64 0, ptr %264, align 8, !pc !54 + %265 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 1, i64 31, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i64 0, ptr %265, align 8, !pc !54 + %266 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 0, !pc !54 + store i8 0, ptr %266, align 8, !pc !54 + %267 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 1, !pc !54 + store i8 0, ptr %267, align 1, !pc !54 + %268 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 2, !pc !54 + store i8 0, ptr %268, align 2, !pc !54 + %269 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 3, !pc !54 + store i8 0, ptr %269, align 1, !pc !54 + %270 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 4, !pc !54 + store i8 0, ptr %270, align 4, !pc !54 + %271 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 5, !pc !54 + store i8 0, ptr %271, align 1, !pc !54 + %272 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 6, !pc !54 + store i8 0, ptr %272, align 2, !pc !54 + %273 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 7, !pc !54 + store i8 0, ptr %273, align 1, !pc !54 + %274 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 8, !pc !54 + store i8 0, ptr %274, align 8, !pc !54 + %275 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 9, !pc !54 + store i8 0, ptr %275, align 1, !pc !54 + %276 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 10, !pc !54 + store i8 0, ptr %276, align 2, !pc !54 + %277 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 11, !pc !54 + store i8 0, ptr %277, align 1, !pc !54 + %278 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 12, !pc !54 + store i8 0, ptr %278, align 4, !pc !54 + %279 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 13, !pc !54 + store i8 0, ptr %279, align 1, !pc !54 + %280 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 14, !pc !54 + store i8 0, ptr %280, align 2, !pc !54 + %281 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 2, i32 15, !pc !54 + store i8 0, ptr %281, align 1, !pc !54 + %282 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 3, i32 0, !pc !54 + store i64 0, ptr %282, align 8, !pc !54 + %283 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 0, !pc !54 + store i16 0, ptr %283, align 8, !pc !54 + %284 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 1, i32 0, !pc !54 + store i16 0, ptr %284, align 2, !pc !54 + %285 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 2, !pc !54 + store i16 0, ptr %285, align 4, !pc !54 + %286 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 3, i32 0, !pc !54 + store i16 0, ptr %286, align 2, !pc !54 + %287 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 4, !pc !54 + store i16 0, ptr %287, align 8, !pc !54 + %288 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 5, i32 0, !pc !54 + store i16 0, ptr %288, align 2, !pc !54 + %289 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 6, !pc !54 + store i16 0, ptr %289, align 4, !pc !54 + %290 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 7, i32 0, !pc !54 + store i16 0, ptr %290, align 2, !pc !54 + %291 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 8, !pc !54 + store i16 0, ptr %291, align 8, !pc !54 + %292 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 9, i32 0, !pc !54 + store i16 0, ptr %292, align 2, !pc !54 + %293 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 10, !pc !54 + store i16 0, ptr %293, align 4, !pc !54 + %294 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 4, i32 11, i32 0, !pc !54 + store i16 0, ptr %294, align 2, !pc !54 + %295 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 0, !pc !54 + store i64 0, ptr %295, align 8, !pc !54 + %296 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 1, i32 0, i32 0, !pc !54 + store i64 0, ptr %296, align 8, !pc !54 + %297 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 2, !pc !54 + store i64 0, ptr %297, align 8, !pc !54 + %298 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 3, i32 0, i32 0, !pc !54 + store i64 0, ptr %298, align 8, !pc !54 + %299 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 4, !pc !54 + store i64 0, ptr %299, align 8, !pc !54 + %300 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 5, i32 0, i32 0, !pc !54 + store i64 0, ptr %300, align 8, !pc !54 + %301 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 6, !pc !54 + store i64 0, ptr %301, align 8, !pc !54 + %302 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 7, i32 0, i32 0, !pc !54 + store i64 0, ptr %302, align 8, !pc !54 + %303 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 8, !pc !54 + store i64 0, ptr %303, align 8, !pc !54 + %304 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 9, i32 0, i32 0, !pc !54 + store i64 0, ptr %304, align 8, !pc !54 + %305 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 10, !pc !54 + store i64 0, ptr %305, align 8, !pc !54 + %306 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 5, i32 11, i32 0, i32 0, !pc !54 + store i64 0, ptr %306, align 8, !pc !54 + %307 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 0, !pc !54 + store i64 0, ptr %307, align 8, !pc !54 + %308 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 1, i32 0, i32 0, !pc !54 + store i64 0, ptr %308, align 8, !pc !54 + %309 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 2, !pc !54 + store i64 0, ptr %309, align 8, !pc !54 + %310 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 3, i32 0, i32 0, !pc !54 + store i64 0, ptr %310, align 8, !pc !54 + %311 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 4, !pc !54 + store i64 0, ptr %311, align 8, !pc !54 + %312 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 5, i32 0, i32 0, !pc !54 + store i64 0, ptr %312, align 8, !pc !54 + %313 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 6, !pc !54 + store i64 0, ptr %313, align 8, !pc !54 + %314 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 7, i32 0, i32 0, !pc !54 + store i64 0, ptr %314, align 8, !pc !54 + %315 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 8, !pc !54 + store i64 0, ptr %315, align 8, !pc !54 + %316 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 9, i32 0, i32 0, !pc !54 + store i64 0, ptr %316, align 8, !pc !54 + %317 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 10, !pc !54 + store i64 0, ptr %317, align 8, !pc !54 + %318 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 11, i32 0, i32 0, !pc !54 + store i64 0, ptr %318, align 8, !pc !54 + %319 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 12, !pc !54 + store i64 0, ptr %319, align 8, !pc !54 + %320 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 13, i32 0, i32 0, !pc !54 + store i64 0, ptr %320, align 8, !pc !54 + %321 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 14, !pc !54 + store i64 0, ptr %321, align 8, !pc !54 + %322 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 15, i32 0, i32 0, !pc !54 + store i64 0, ptr %322, align 8, !pc !54 + %323 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 16, !pc !54 + store i64 0, ptr %323, align 8, !pc !54 + %324 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 17, i32 0, i32 0, !pc !54 + store i64 0, ptr %324, align 8, !pc !54 + %325 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 18, !pc !54 + store i64 0, ptr %325, align 8, !pc !54 + %326 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 19, i32 0, i32 0, !pc !54 + store i64 0, ptr %326, align 8, !pc !54 + %327 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 20, !pc !54 + store i64 0, ptr %327, align 8, !pc !54 + %328 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 21, i32 0, i32 0, !pc !54 + store i64 0, ptr %328, align 8, !pc !54 + %329 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 22, !pc !54 + store i64 0, ptr %329, align 8, !pc !54 + %330 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 23, i32 0, i32 0, !pc !54 + store i64 0, ptr %330, align 8, !pc !54 + %331 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 24, !pc !54 + store i64 0, ptr %331, align 8, !pc !54 + %332 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 25, i32 0, i32 0, !pc !54 + store i64 0, ptr %332, align 8, !pc !54 + %333 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 26, !pc !54 + store i64 0, ptr %333, align 8, !pc !54 + %334 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 27, i32 0, i32 0, !pc !54 + store i64 0, ptr %334, align 8, !pc !54 + %335 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 28, !pc !54 + store i64 0, ptr %335, align 8, !pc !54 + %336 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 29, i32 0, i32 0, !pc !54 + store i64 0, ptr %336, align 8, !pc !54 + %337 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 30, !pc !54 + store i64 0, ptr %337, align 8, !pc !54 + %338 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 31, i32 0, i32 0, !pc !54 + store i64 0, ptr %338, align 8, !pc !54 + %339 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 32, !pc !54 + store i64 0, ptr %339, align 8, !pc !54 + %340 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 6, i32 33, i32 0, i32 0, !pc !54 + store i64 0, ptr %340, align 8, !pc !54 + %341 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 0, i64 0, !pc !54 + store i8 0, ptr %341, align 8, !pc !54 + %342 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 0, i64 1, !pc !54 + store i8 0, ptr %342, align 1, !pc !54 + %343 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 0, i64 2, !pc !54 + store i8 0, ptr %343, align 2, !pc !54 + %344 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 0, i64 3, !pc !54 + store i8 0, ptr %344, align 1, !pc !54 + %345 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 0, i64 4, !pc !54 + store i8 0, ptr %345, align 4, !pc !54 + %346 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 0, i64 5, !pc !54 + store i8 0, ptr %346, align 1, !pc !54 + %347 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 1, i32 0, i64 0, !pc !54 + store i8 0, ptr %347, align 2, !pc !54 + %348 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 1, i32 0, i64 1, !pc !54 + store i8 0, ptr %348, align 1, !pc !54 + %349 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 1, i32 0, i64 2, !pc !54 + store i8 0, ptr %349, align 8, !pc !54 + %350 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 1, i32 0, i64 3, !pc !54 + store i8 0, ptr %350, align 1, !pc !54 + %351 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 1, i32 0, i64 4, !pc !54 + store i8 0, ptr %351, align 2, !pc !54 + %352 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 1, i32 0, i64 5, !pc !54 + store i8 0, ptr %352, align 1, !pc !54 + %353 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 1, i32 0, i64 6, !pc !54 + store i8 0, ptr %353, align 4, !pc !54 + %354 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 1, i32 0, i64 7, !pc !54 + store i8 0, ptr %354, align 1, !pc !54 + %355 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 1, i32 0, i64 8, !pc !54 + store i8 0, ptr %355, align 2, !pc !54 + %356 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 0, i32 1, i32 0, i64 9, !pc !54 + store i8 0, ptr %356, align 1, !pc !54 + %357 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 0, i64 0, !pc !54 + store i8 0, ptr %357, align 8, !pc !54 + %358 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 0, i64 1, !pc !54 + store i8 0, ptr %358, align 1, !pc !54 + %359 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 0, i64 2, !pc !54 + store i8 0, ptr %359, align 2, !pc !54 + %360 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 0, i64 3, !pc !54 + store i8 0, ptr %360, align 1, !pc !54 + %361 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 0, i64 4, !pc !54 + store i8 0, ptr %361, align 4, !pc !54 + %362 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 0, i64 5, !pc !54 + store i8 0, ptr %362, align 1, !pc !54 + %363 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 1, i32 0, i64 0, !pc !54 + store i8 0, ptr %363, align 2, !pc !54 + %364 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 1, i32 0, i64 1, !pc !54 + store i8 0, ptr %364, align 1, !pc !54 + %365 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 1, i32 0, i64 2, !pc !54 + store i8 0, ptr %365, align 8, !pc !54 + %366 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 1, i32 0, i64 3, !pc !54 + store i8 0, ptr %366, align 1, !pc !54 + %367 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 1, i32 0, i64 4, !pc !54 + store i8 0, ptr %367, align 2, !pc !54 + %368 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 1, i32 0, i64 5, !pc !54 + store i8 0, ptr %368, align 1, !pc !54 + %369 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 1, i32 0, i64 6, !pc !54 + store i8 0, ptr %369, align 4, !pc !54 + %370 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 1, i32 0, i64 7, !pc !54 + store i8 0, ptr %370, align 1, !pc !54 + %371 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 1, i32 0, i64 8, !pc !54 + store i8 0, ptr %371, align 2, !pc !54 + %372 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 1, i32 1, i32 0, i64 9, !pc !54 + store i8 0, ptr %372, align 1, !pc !54 + %373 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 0, i64 0, !pc !54 + store i8 0, ptr %373, align 8, !pc !54 + %374 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 0, i64 1, !pc !54 + store i8 0, ptr %374, align 1, !pc !54 + %375 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 0, i64 2, !pc !54 + store i8 0, ptr %375, align 2, !pc !54 + %376 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 0, i64 3, !pc !54 + store i8 0, ptr %376, align 1, !pc !54 + %377 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 0, i64 4, !pc !54 + store i8 0, ptr %377, align 4, !pc !54 + %378 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 0, i64 5, !pc !54 + store i8 0, ptr %378, align 1, !pc !54 + %379 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 1, i32 0, i64 0, !pc !54 + store i8 0, ptr %379, align 2, !pc !54 + %380 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 1, i32 0, i64 1, !pc !54 + store i8 0, ptr %380, align 1, !pc !54 + %381 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 1, i32 0, i64 2, !pc !54 + store i8 0, ptr %381, align 8, !pc !54 + %382 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 1, i32 0, i64 3, !pc !54 + store i8 0, ptr %382, align 1, !pc !54 + %383 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 1, i32 0, i64 4, !pc !54 + store i8 0, ptr %383, align 2, !pc !54 + %384 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 1, i32 0, i64 5, !pc !54 + store i8 0, ptr %384, align 1, !pc !54 + %385 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 1, i32 0, i64 6, !pc !54 + store i8 0, ptr %385, align 4, !pc !54 + %386 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 1, i32 0, i64 7, !pc !54 + store i8 0, ptr %386, align 1, !pc !54 + %387 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 1, i32 0, i64 8, !pc !54 + store i8 0, ptr %387, align 2, !pc !54 + %388 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 2, i32 1, i32 0, i64 9, !pc !54 + store i8 0, ptr %388, align 1, !pc !54 + %389 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 0, i64 0, !pc !54 + store i8 0, ptr %389, align 8, !pc !54 + %390 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 0, i64 1, !pc !54 + store i8 0, ptr %390, align 1, !pc !54 + %391 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 0, i64 2, !pc !54 + store i8 0, ptr %391, align 2, !pc !54 + %392 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 0, i64 3, !pc !54 + store i8 0, ptr %392, align 1, !pc !54 + %393 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 0, i64 4, !pc !54 + store i8 0, ptr %393, align 4, !pc !54 + %394 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 0, i64 5, !pc !54 + store i8 0, ptr %394, align 1, !pc !54 + %395 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 1, i32 0, i64 0, !pc !54 + store i8 0, ptr %395, align 2, !pc !54 + %396 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 1, i32 0, i64 1, !pc !54 + store i8 0, ptr %396, align 1, !pc !54 + %397 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 1, i32 0, i64 2, !pc !54 + store i8 0, ptr %397, align 8, !pc !54 + %398 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 1, i32 0, i64 3, !pc !54 + store i8 0, ptr %398, align 1, !pc !54 + %399 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 1, i32 0, i64 4, !pc !54 + store i8 0, ptr %399, align 2, !pc !54 + %400 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 1, i32 0, i64 5, !pc !54 + store i8 0, ptr %400, align 1, !pc !54 + %401 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 1, i32 0, i64 6, !pc !54 + store i8 0, ptr %401, align 4, !pc !54 + %402 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 1, i32 0, i64 7, !pc !54 + store i8 0, ptr %402, align 1, !pc !54 + %403 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 1, i32 0, i64 8, !pc !54 + store i8 0, ptr %403, align 2, !pc !54 + %404 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 3, i32 1, i32 0, i64 9, !pc !54 + store i8 0, ptr %404, align 1, !pc !54 + %405 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 0, i64 0, !pc !54 + store i8 0, ptr %405, align 8, !pc !54 + %406 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 0, i64 1, !pc !54 + store i8 0, ptr %406, align 1, !pc !54 + %407 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 0, i64 2, !pc !54 + store i8 0, ptr %407, align 2, !pc !54 + %408 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 0, i64 3, !pc !54 + store i8 0, ptr %408, align 1, !pc !54 + %409 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 0, i64 4, !pc !54 + store i8 0, ptr %409, align 4, !pc !54 + %410 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 0, i64 5, !pc !54 + store i8 0, ptr %410, align 1, !pc !54 + %411 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 1, i32 0, i64 0, !pc !54 + store i8 0, ptr %411, align 2, !pc !54 + %412 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 1, i32 0, i64 1, !pc !54 + store i8 0, ptr %412, align 1, !pc !54 + %413 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 1, i32 0, i64 2, !pc !54 + store i8 0, ptr %413, align 8, !pc !54 + %414 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 1, i32 0, i64 3, !pc !54 + store i8 0, ptr %414, align 1, !pc !54 + %415 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 1, i32 0, i64 4, !pc !54 + store i8 0, ptr %415, align 2, !pc !54 + %416 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 1, i32 0, i64 5, !pc !54 + store i8 0, ptr %416, align 1, !pc !54 + %417 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 1, i32 0, i64 6, !pc !54 + store i8 0, ptr %417, align 4, !pc !54 + %418 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 1, i32 0, i64 7, !pc !54 + store i8 0, ptr %418, align 1, !pc !54 + %419 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 1, i32 0, i64 8, !pc !54 + store i8 0, ptr %419, align 2, !pc !54 + %420 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 4, i32 1, i32 0, i64 9, !pc !54 + store i8 0, ptr %420, align 1, !pc !54 + %421 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 0, i64 0, !pc !54 + store i8 0, ptr %421, align 8, !pc !54 + %422 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 0, i64 1, !pc !54 + store i8 0, ptr %422, align 1, !pc !54 + %423 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 0, i64 2, !pc !54 + store i8 0, ptr %423, align 2, !pc !54 + %424 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 0, i64 3, !pc !54 + store i8 0, ptr %424, align 1, !pc !54 + %425 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 0, i64 4, !pc !54 + store i8 0, ptr %425, align 4, !pc !54 + %426 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 0, i64 5, !pc !54 + store i8 0, ptr %426, align 1, !pc !54 + %427 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 1, i32 0, i64 0, !pc !54 + store i8 0, ptr %427, align 2, !pc !54 + %428 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 1, i32 0, i64 1, !pc !54 + store i8 0, ptr %428, align 1, !pc !54 + %429 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 1, i32 0, i64 2, !pc !54 + store i8 0, ptr %429, align 8, !pc !54 + %430 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 1, i32 0, i64 3, !pc !54 + store i8 0, ptr %430, align 1, !pc !54 + %431 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 1, i32 0, i64 4, !pc !54 + store i8 0, ptr %431, align 2, !pc !54 + %432 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 1, i32 0, i64 5, !pc !54 + store i8 0, ptr %432, align 1, !pc !54 + %433 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 1, i32 0, i64 6, !pc !54 + store i8 0, ptr %433, align 4, !pc !54 + %434 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 1, i32 0, i64 7, !pc !54 + store i8 0, ptr %434, align 1, !pc !54 + %435 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 1, i32 0, i64 8, !pc !54 + store i8 0, ptr %435, align 2, !pc !54 + %436 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 5, i32 1, i32 0, i64 9, !pc !54 + store i8 0, ptr %436, align 1, !pc !54 + %437 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 0, i64 0, !pc !54 + store i8 0, ptr %437, align 8, !pc !54 + %438 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 0, i64 1, !pc !54 + store i8 0, ptr %438, align 1, !pc !54 + %439 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 0, i64 2, !pc !54 + store i8 0, ptr %439, align 2, !pc !54 + %440 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 0, i64 3, !pc !54 + store i8 0, ptr %440, align 1, !pc !54 + %441 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 0, i64 4, !pc !54 + store i8 0, ptr %441, align 4, !pc !54 + %442 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 0, i64 5, !pc !54 + store i8 0, ptr %442, align 1, !pc !54 + %443 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 1, i32 0, i64 0, !pc !54 + store i8 0, ptr %443, align 2, !pc !54 + %444 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 1, i32 0, i64 1, !pc !54 + store i8 0, ptr %444, align 1, !pc !54 + %445 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 1, i32 0, i64 2, !pc !54 + store i8 0, ptr %445, align 8, !pc !54 + %446 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 1, i32 0, i64 3, !pc !54 + store i8 0, ptr %446, align 1, !pc !54 + %447 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 1, i32 0, i64 4, !pc !54 + store i8 0, ptr %447, align 2, !pc !54 + %448 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 1, i32 0, i64 5, !pc !54 + store i8 0, ptr %448, align 1, !pc !54 + %449 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 1, i32 0, i64 6, !pc !54 + store i8 0, ptr %449, align 4, !pc !54 + %450 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 1, i32 0, i64 7, !pc !54 + store i8 0, ptr %450, align 1, !pc !54 + %451 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 1, i32 0, i64 8, !pc !54 + store i8 0, ptr %451, align 2, !pc !54 + %452 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 6, i32 1, i32 0, i64 9, !pc !54 + store i8 0, ptr %452, align 1, !pc !54 + %453 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 0, i64 0, !pc !54 + store i8 0, ptr %453, align 8, !pc !54 + %454 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 0, i64 1, !pc !54 + store i8 0, ptr %454, align 1, !pc !54 + %455 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 0, i64 2, !pc !54 + store i8 0, ptr %455, align 2, !pc !54 + %456 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 0, i64 3, !pc !54 + store i8 0, ptr %456, align 1, !pc !54 + %457 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 0, i64 4, !pc !54 + store i8 0, ptr %457, align 4, !pc !54 + %458 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 0, i64 5, !pc !54 + store i8 0, ptr %458, align 1, !pc !54 + %459 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 1, i32 0, i64 0, !pc !54 + store i8 0, ptr %459, align 2, !pc !54 + %460 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 1, i32 0, i64 1, !pc !54 + store i8 0, ptr %460, align 1, !pc !54 + %461 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 1, i32 0, i64 2, !pc !54 + store i8 0, ptr %461, align 8, !pc !54 + %462 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 1, i32 0, i64 3, !pc !54 + store i8 0, ptr %462, align 1, !pc !54 + %463 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 1, i32 0, i64 4, !pc !54 + store i8 0, ptr %463, align 2, !pc !54 + %464 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 1, i32 0, i64 5, !pc !54 + store i8 0, ptr %464, align 1, !pc !54 + %465 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 1, i32 0, i64 6, !pc !54 + store i8 0, ptr %465, align 4, !pc !54 + %466 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 1, i32 0, i64 7, !pc !54 + store i8 0, ptr %466, align 1, !pc !54 + %467 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 1, i32 0, i64 8, !pc !54 + store i8 0, ptr %467, align 2, !pc !54 + %468 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 7, i32 0, i64 7, i32 1, i32 0, i64 9, !pc !54 + store i8 0, ptr %468, align 1, !pc !54 + %469 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 0, i32 0, !pc !54 + store i64 0, ptr %469, align 8, !pc !54 + %470 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 0, i32 1, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %470, align 8, !pc !54 + %471 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 1, i32 0, !pc !54 + store i64 0, ptr %471, align 8, !pc !54 + %472 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 1, i32 1, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %472, align 8, !pc !54 + %473 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 2, i32 0, !pc !54 + store i64 0, ptr %473, align 8, !pc !54 + %474 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 2, i32 1, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %474, align 8, !pc !54 + %475 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 3, i32 0, !pc !54 + store i64 0, ptr %475, align 8, !pc !54 + %476 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 3, i32 1, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %476, align 8, !pc !54 + %477 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 4, i32 0, !pc !54 + store i64 0, ptr %477, align 8, !pc !54 + %478 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 4, i32 1, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %478, align 8, !pc !54 + %479 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 5, i32 0, !pc !54 + store i64 0, ptr %479, align 8, !pc !54 + %480 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 5, i32 1, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %480, align 8, !pc !54 + %481 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 6, i32 0, !pc !54 + store i64 0, ptr %481, align 8, !pc !54 + %482 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 6, i32 1, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %482, align 8, !pc !54 + %483 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 7, i32 0, !pc !54 + store i64 0, ptr %483, align 8, !pc !54 + %484 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 8, i32 0, i64 7, i32 1, i32 0, i32 0, i64 0, !pc !54 + store i64 0, ptr %484, align 8, !pc !54 + %485 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 0, !pc !54 + store i8 0, ptr %485, align 8, !pc !54 + %486 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 1, !pc !54 + store i8 0, ptr %486, align 1, !pc !54 + %487 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 2, !pc !54 + store i8 0, ptr %487, align 2, !pc !54 + %488 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 3, !pc !54 + store i8 0, ptr %488, align 1, !pc !54 + %489 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 4, !pc !54 + store i8 0, ptr %489, align 4, !pc !54 + %490 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 5, !pc !54 + store i8 0, ptr %490, align 1, !pc !54 + %491 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 6, !pc !54 + store i8 0, ptr %491, align 2, !pc !54 + %492 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 7, !pc !54 + store i8 0, ptr %492, align 1, !pc !54 + %493 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 8, !pc !54 + store i8 0, ptr %493, align 8, !pc !54 + %494 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 9, !pc !54 + store i8 0, ptr %494, align 1, !pc !54 + %495 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 10, !pc !54 + store i8 0, ptr %495, align 2, !pc !54 + %496 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 11, !pc !54 + store i8 0, ptr %496, align 1, !pc !54 + %497 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 12, !pc !54 + store i8 0, ptr %497, align 4, !pc !54 + %498 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 13, !pc !54 + store i8 0, ptr %498, align 1, !pc !54 + %499 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 14, !pc !54 + store i8 0, ptr %499, align 2, !pc !54 + %500 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 15, !pc !54 + store i8 0, ptr %500, align 1, !pc !54 + %501 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 16, !pc !54 + store i8 0, ptr %501, align 8, !pc !54 + %502 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 17, !pc !54 + store i8 0, ptr %502, align 1, !pc !54 + %503 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 18, !pc !54 + store i8 0, ptr %503, align 2, !pc !54 + %504 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 19, !pc !54 + store i8 0, ptr %504, align 1, !pc !54 + %505 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 20, i64 0, !pc !54 + store i8 0, ptr %505, align 4, !pc !54 + %506 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 20, i64 1, !pc !54 + store i8 0, ptr %506, align 1, !pc !54 + %507 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 20, i64 2, !pc !54 + store i8 0, ptr %507, align 2, !pc !54 + %508 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 9, i32 20, i64 3, !pc !54 + store i8 0, ptr %508, align 1, !pc !54 + %509 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 10, i32 0, !pc !54 + store i64 0, ptr %509, align 8, !pc !54 + %510 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 0, i32 0, !pc !54 + store i16 0, ptr %510, align 8, !pc !54 + %511 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 1, i32 0, !pc !54 + store i16 0, ptr %511, align 2, !pc !54 + %512 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 2, i32 0, !pc !54 + store i8 0, ptr %512, align 4, !pc !54 + %513 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 3, !pc !54 + store i8 0, ptr %513, align 1, !pc !54 + %514 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 4, !pc !54 + store i16 0, ptr %514, align 2, !pc !54 + %515 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 5, !pc !54 + store i32 0, ptr %515, align 8, !pc !54 + %516 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 6, i32 0, !pc !54 + store i16 0, ptr %516, align 4, !pc !54 + %517 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 7, !pc !54 + store i16 0, ptr %517, align 2, !pc !54 + %518 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 8, !pc !54 + store i32 0, ptr %518, align 8, !pc !54 + %519 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 9, i32 0, !pc !54 + store i16 0, ptr %519, align 4, !pc !54 + %520 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 10, !pc !54 + store i16 0, ptr %520, align 2, !pc !54 + %521 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 11, i32 0, !pc !54 + store i32 0, ptr %521, align 8, !pc !54 + %522 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 12, i32 0, !pc !54 + store i32 0, ptr %522, align 4, !pc !54 + %523 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i8 0, ptr %523, align 8, !pc !54 + %524 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i8 0, ptr %524, align 1, !pc !54 + %525 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i8 0, ptr %525, align 2, !pc !54 + %526 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i8 0, ptr %526, align 1, !pc !54 + %527 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i8 0, ptr %527, align 4, !pc !54 + %528 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i8 0, ptr %528, align 1, !pc !54 + %529 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i8 0, ptr %529, align 2, !pc !54 + %530 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i8 0, ptr %530, align 1, !pc !54 + %531 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 0, i32 0, i32 0, i64 8, !pc !54 + store i8 0, ptr %531, align 8, !pc !54 + %532 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 0, i32 0, i32 0, i64 9, !pc !54 + store i8 0, ptr %532, align 1, !pc !54 + %533 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 1, i64 0, !pc !54 + store i8 0, ptr %533, align 2, !pc !54 + %534 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 1, i64 1, !pc !54 + store i8 0, ptr %534, align 1, !pc !54 + %535 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 1, i64 2, !pc !54 + store i8 0, ptr %535, align 4, !pc !54 + %536 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 1, i64 3, !pc !54 + store i8 0, ptr %536, align 1, !pc !54 + %537 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 1, i64 4, !pc !54 + store i8 0, ptr %537, align 2, !pc !54 + %538 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 0, i32 1, i64 5, !pc !54 + store i8 0, ptr %538, align 1, !pc !54 + %539 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i8 0, ptr %539, align 8, !pc !54 + %540 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i8 0, ptr %540, align 1, !pc !54 + %541 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i8 0, ptr %541, align 2, !pc !54 + %542 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i8 0, ptr %542, align 1, !pc !54 + %543 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i8 0, ptr %543, align 4, !pc !54 + %544 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i8 0, ptr %544, align 1, !pc !54 + %545 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i8 0, ptr %545, align 2, !pc !54 + %546 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i8 0, ptr %546, align 1, !pc !54 + %547 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 0, i32 0, i32 0, i64 8, !pc !54 + store i8 0, ptr %547, align 8, !pc !54 + %548 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 0, i32 0, i32 0, i64 9, !pc !54 + store i8 0, ptr %548, align 1, !pc !54 + %549 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 1, i64 0, !pc !54 + store i8 0, ptr %549, align 2, !pc !54 + %550 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 1, i64 1, !pc !54 + store i8 0, ptr %550, align 1, !pc !54 + %551 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 1, i64 2, !pc !54 + store i8 0, ptr %551, align 4, !pc !54 + %552 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 1, i64 3, !pc !54 + store i8 0, ptr %552, align 1, !pc !54 + %553 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 1, i64 4, !pc !54 + store i8 0, ptr %553, align 2, !pc !54 + %554 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 1, i32 1, i64 5, !pc !54 + store i8 0, ptr %554, align 1, !pc !54 + %555 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i8 0, ptr %555, align 8, !pc !54 + %556 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i8 0, ptr %556, align 1, !pc !54 + %557 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i8 0, ptr %557, align 2, !pc !54 + %558 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i8 0, ptr %558, align 1, !pc !54 + %559 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i8 0, ptr %559, align 4, !pc !54 + %560 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i8 0, ptr %560, align 1, !pc !54 + %561 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i8 0, ptr %561, align 2, !pc !54 + %562 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i8 0, ptr %562, align 1, !pc !54 + %563 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 0, i32 0, i32 0, i64 8, !pc !54 + store i8 0, ptr %563, align 8, !pc !54 + %564 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 0, i32 0, i32 0, i64 9, !pc !54 + store i8 0, ptr %564, align 1, !pc !54 + %565 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 1, i64 0, !pc !54 + store i8 0, ptr %565, align 2, !pc !54 + %566 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 1, i64 1, !pc !54 + store i8 0, ptr %566, align 1, !pc !54 + %567 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 1, i64 2, !pc !54 + store i8 0, ptr %567, align 4, !pc !54 + %568 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 1, i64 3, !pc !54 + store i8 0, ptr %568, align 1, !pc !54 + %569 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 1, i64 4, !pc !54 + store i8 0, ptr %569, align 2, !pc !54 + %570 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 2, i32 1, i64 5, !pc !54 + store i8 0, ptr %570, align 1, !pc !54 + %571 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i8 0, ptr %571, align 8, !pc !54 + %572 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i8 0, ptr %572, align 1, !pc !54 + %573 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i8 0, ptr %573, align 2, !pc !54 + %574 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i8 0, ptr %574, align 1, !pc !54 + %575 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i8 0, ptr %575, align 4, !pc !54 + %576 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i8 0, ptr %576, align 1, !pc !54 + %577 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i8 0, ptr %577, align 2, !pc !54 + %578 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i8 0, ptr %578, align 1, !pc !54 + %579 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 0, i32 0, i32 0, i64 8, !pc !54 + store i8 0, ptr %579, align 8, !pc !54 + %580 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 0, i32 0, i32 0, i64 9, !pc !54 + store i8 0, ptr %580, align 1, !pc !54 + %581 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 1, i64 0, !pc !54 + store i8 0, ptr %581, align 2, !pc !54 + %582 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 1, i64 1, !pc !54 + store i8 0, ptr %582, align 1, !pc !54 + %583 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 1, i64 2, !pc !54 + store i8 0, ptr %583, align 4, !pc !54 + %584 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 1, i64 3, !pc !54 + store i8 0, ptr %584, align 1, !pc !54 + %585 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 1, i64 4, !pc !54 + store i8 0, ptr %585, align 2, !pc !54 + %586 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 3, i32 1, i64 5, !pc !54 + store i8 0, ptr %586, align 1, !pc !54 + %587 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i8 0, ptr %587, align 8, !pc !54 + %588 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i8 0, ptr %588, align 1, !pc !54 + %589 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i8 0, ptr %589, align 2, !pc !54 + %590 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i8 0, ptr %590, align 1, !pc !54 + %591 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i8 0, ptr %591, align 4, !pc !54 + %592 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i8 0, ptr %592, align 1, !pc !54 + %593 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i8 0, ptr %593, align 2, !pc !54 + %594 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i8 0, ptr %594, align 1, !pc !54 + %595 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 0, i32 0, i32 0, i64 8, !pc !54 + store i8 0, ptr %595, align 8, !pc !54 + %596 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 0, i32 0, i32 0, i64 9, !pc !54 + store i8 0, ptr %596, align 1, !pc !54 + %597 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 1, i64 0, !pc !54 + store i8 0, ptr %597, align 2, !pc !54 + %598 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 1, i64 1, !pc !54 + store i8 0, ptr %598, align 1, !pc !54 + %599 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 1, i64 2, !pc !54 + store i8 0, ptr %599, align 4, !pc !54 + %600 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 1, i64 3, !pc !54 + store i8 0, ptr %600, align 1, !pc !54 + %601 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 1, i64 4, !pc !54 + store i8 0, ptr %601, align 2, !pc !54 + %602 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 4, i32 1, i64 5, !pc !54 + store i8 0, ptr %602, align 1, !pc !54 + %603 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i8 0, ptr %603, align 8, !pc !54 + %604 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i8 0, ptr %604, align 1, !pc !54 + %605 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i8 0, ptr %605, align 2, !pc !54 + %606 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i8 0, ptr %606, align 1, !pc !54 + %607 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i8 0, ptr %607, align 4, !pc !54 + %608 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i8 0, ptr %608, align 1, !pc !54 + %609 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i8 0, ptr %609, align 2, !pc !54 + %610 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i8 0, ptr %610, align 1, !pc !54 + %611 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 0, i32 0, i32 0, i64 8, !pc !54 + store i8 0, ptr %611, align 8, !pc !54 + %612 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 0, i32 0, i32 0, i64 9, !pc !54 + store i8 0, ptr %612, align 1, !pc !54 + %613 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 1, i64 0, !pc !54 + store i8 0, ptr %613, align 2, !pc !54 + %614 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 1, i64 1, !pc !54 + store i8 0, ptr %614, align 1, !pc !54 + %615 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 1, i64 2, !pc !54 + store i8 0, ptr %615, align 4, !pc !54 + %616 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 1, i64 3, !pc !54 + store i8 0, ptr %616, align 1, !pc !54 + %617 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 1, i64 4, !pc !54 + store i8 0, ptr %617, align 2, !pc !54 + %618 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 5, i32 1, i64 5, !pc !54 + store i8 0, ptr %618, align 1, !pc !54 + %619 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i8 0, ptr %619, align 8, !pc !54 + %620 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i8 0, ptr %620, align 1, !pc !54 + %621 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i8 0, ptr %621, align 2, !pc !54 + %622 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i8 0, ptr %622, align 1, !pc !54 + %623 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i8 0, ptr %623, align 4, !pc !54 + %624 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i8 0, ptr %624, align 1, !pc !54 + %625 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i8 0, ptr %625, align 2, !pc !54 + %626 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i8 0, ptr %626, align 1, !pc !54 + %627 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 0, i32 0, i32 0, i64 8, !pc !54 + store i8 0, ptr %627, align 8, !pc !54 + %628 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 0, i32 0, i32 0, i64 9, !pc !54 + store i8 0, ptr %628, align 1, !pc !54 + %629 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 1, i64 0, !pc !54 + store i8 0, ptr %629, align 2, !pc !54 + %630 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 1, i64 1, !pc !54 + store i8 0, ptr %630, align 1, !pc !54 + %631 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 1, i64 2, !pc !54 + store i8 0, ptr %631, align 4, !pc !54 + %632 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 1, i64 3, !pc !54 + store i8 0, ptr %632, align 1, !pc !54 + %633 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 1, i64 4, !pc !54 + store i8 0, ptr %633, align 2, !pc !54 + %634 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 6, i32 1, i64 5, !pc !54 + store i8 0, ptr %634, align 1, !pc !54 + %635 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 0, i32 0, i32 0, i64 0, !pc !54 + store i8 0, ptr %635, align 8, !pc !54 + %636 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 0, i32 0, i32 0, i64 1, !pc !54 + store i8 0, ptr %636, align 1, !pc !54 + %637 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 0, i32 0, i32 0, i64 2, !pc !54 + store i8 0, ptr %637, align 2, !pc !54 + %638 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 0, i32 0, i32 0, i64 3, !pc !54 + store i8 0, ptr %638, align 1, !pc !54 + %639 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 0, i32 0, i32 0, i64 4, !pc !54 + store i8 0, ptr %639, align 4, !pc !54 + %640 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 0, i32 0, i32 0, i64 5, !pc !54 + store i8 0, ptr %640, align 1, !pc !54 + %641 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 0, i32 0, i32 0, i64 6, !pc !54 + store i8 0, ptr %641, align 2, !pc !54 + %642 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 0, i32 0, i32 0, i64 7, !pc !54 + store i8 0, ptr %642, align 1, !pc !54 + %643 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 0, i32 0, i32 0, i64 8, !pc !54 + store i8 0, ptr %643, align 8, !pc !54 + %644 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 0, i32 0, i32 0, i64 9, !pc !54 + store i8 0, ptr %644, align 1, !pc !54 + %645 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 1, i64 0, !pc !54 + store i8 0, ptr %645, align 2, !pc !54 + %646 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 1, i64 1, !pc !54 + store i8 0, ptr %646, align 1, !pc !54 + %647 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 1, i64 2, !pc !54 + store i8 0, ptr %647, align 4, !pc !54 + %648 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 1, i64 3, !pc !54 + store i8 0, ptr %648, align 1, !pc !54 + %649 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 1, i64 4, !pc !54 + store i8 0, ptr %649, align 2, !pc !54 + %650 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 13, i64 7, i32 1, i64 5, !pc !54 + store i8 0, ptr %650, align 1, !pc !54 + %651 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 0, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %651, align 8, !pc !54 + %652 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 1, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %652, align 8, !pc !54 + %653 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 2, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %653, align 8, !pc !54 + %654 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 3, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %654, align 8, !pc !54 + %655 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 4, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %655, align 8, !pc !54 + %656 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 5, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %656, align 8, !pc !54 + %657 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 6, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %657, align 8, !pc !54 + %658 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 7, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %658, align 8, !pc !54 + %659 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 8, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %659, align 8, !pc !54 + %660 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 9, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %660, align 8, !pc !54 + %661 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 10, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %661, align 8, !pc !54 + %662 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 11, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %662, align 8, !pc !54 + %663 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 12, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %663, align 8, !pc !54 + %664 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 13, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %664, align 8, !pc !54 + %665 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 14, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %665, align 8, !pc !54 + %666 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 0, i32 14, i64 15, i32 0, i32 0, i64 0, !pc !54 + store i128 0, ptr %666, align 8, !pc !54 + %667 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 0, !pc !54 + store i8 0, ptr %667, align 8, !pc !54 + %668 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 1, !pc !54 + store i8 0, ptr %668, align 1, !pc !54 + %669 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 2, !pc !54 + store i8 0, ptr %669, align 2, !pc !54 + %670 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 3, !pc !54 + store i8 0, ptr %670, align 1, !pc !54 + %671 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 4, !pc !54 + store i8 0, ptr %671, align 4, !pc !54 + %672 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 5, !pc !54 + store i8 0, ptr %672, align 1, !pc !54 + %673 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 6, !pc !54 + store i8 0, ptr %673, align 2, !pc !54 + %674 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 7, !pc !54 + store i8 0, ptr %674, align 1, !pc !54 + %675 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 8, !pc !54 + store i8 0, ptr %675, align 8, !pc !54 + %676 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 9, !pc !54 + store i8 0, ptr %676, align 1, !pc !54 + %677 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 10, !pc !54 + store i8 0, ptr %677, align 2, !pc !54 + %678 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 11, !pc !54 + store i8 0, ptr %678, align 1, !pc !54 + %679 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 12, !pc !54 + store i8 0, ptr %679, align 4, !pc !54 + %680 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 13, !pc !54 + store i8 0, ptr %680, align 1, !pc !54 + %681 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 14, !pc !54 + store i8 0, ptr %681, align 2, !pc !54 + %682 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 15, !pc !54 + store i8 0, ptr %682, align 1, !pc !54 + %683 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 16, !pc !54 + store i8 0, ptr %683, align 8, !pc !54 + %684 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 17, !pc !54 + store i8 0, ptr %684, align 1, !pc !54 + %685 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 18, !pc !54 + store i8 0, ptr %685, align 2, !pc !54 + %686 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 19, !pc !54 + store i8 0, ptr %686, align 1, !pc !54 + %687 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 20, !pc !54 + store i8 0, ptr %687, align 4, !pc !54 + %688 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 21, !pc !54 + store i8 0, ptr %688, align 1, !pc !54 + %689 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 22, !pc !54 + store i8 0, ptr %689, align 2, !pc !54 + %690 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 23, !pc !54 + store i8 0, ptr %690, align 1, !pc !54 + %691 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 24, !pc !54 + store i8 0, ptr %691, align 8, !pc !54 + %692 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 25, !pc !54 + store i8 0, ptr %692, align 1, !pc !54 + %693 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 26, !pc !54 + store i8 0, ptr %693, align 2, !pc !54 + %694 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 27, !pc !54 + store i8 0, ptr %694, align 1, !pc !54 + %695 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 28, !pc !54 + store i8 0, ptr %695, align 4, !pc !54 + %696 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 29, !pc !54 + store i8 0, ptr %696, align 1, !pc !54 + %697 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 30, !pc !54 + store i8 0, ptr %697, align 2, !pc !54 + %698 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 31, !pc !54 + store i8 0, ptr %698, align 1, !pc !54 + %699 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 32, !pc !54 + store i8 0, ptr %699, align 8, !pc !54 + %700 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 33, !pc !54 + store i8 0, ptr %700, align 1, !pc !54 + %701 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 34, !pc !54 + store i8 0, ptr %701, align 2, !pc !54 + %702 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 35, !pc !54 + store i8 0, ptr %702, align 1, !pc !54 + %703 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 36, !pc !54 + store i8 0, ptr %703, align 4, !pc !54 + %704 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 37, !pc !54 + store i8 0, ptr %704, align 1, !pc !54 + %705 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 38, !pc !54 + store i8 0, ptr %705, align 2, !pc !54 + %706 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 39, !pc !54 + store i8 0, ptr %706, align 1, !pc !54 + %707 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 40, !pc !54 + store i8 0, ptr %707, align 8, !pc !54 + %708 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 41, !pc !54 + store i8 0, ptr %708, align 1, !pc !54 + %709 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 42, !pc !54 + store i8 0, ptr %709, align 2, !pc !54 + %710 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 43, !pc !54 + store i8 0, ptr %710, align 1, !pc !54 + %711 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 44, !pc !54 + store i8 0, ptr %711, align 4, !pc !54 + %712 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 45, !pc !54 + store i8 0, ptr %712, align 1, !pc !54 + %713 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 46, !pc !54 + store i8 0, ptr %713, align 2, !pc !54 + %714 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 47, !pc !54 + store i8 0, ptr %714, align 1, !pc !54 + %715 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 48, !pc !54 + store i8 0, ptr %715, align 8, !pc !54 + %716 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 49, !pc !54 + store i8 0, ptr %716, align 1, !pc !54 + %717 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 50, !pc !54 + store i8 0, ptr %717, align 2, !pc !54 + %718 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 51, !pc !54 + store i8 0, ptr %718, align 1, !pc !54 + %719 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 52, !pc !54 + store i8 0, ptr %719, align 4, !pc !54 + %720 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 53, !pc !54 + store i8 0, ptr %720, align 1, !pc !54 + %721 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 54, !pc !54 + store i8 0, ptr %721, align 2, !pc !54 + %722 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 55, !pc !54 + store i8 0, ptr %722, align 1, !pc !54 + %723 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 56, !pc !54 + store i8 0, ptr %723, align 8, !pc !54 + %724 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 57, !pc !54 + store i8 0, ptr %724, align 1, !pc !54 + %725 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 58, !pc !54 + store i8 0, ptr %725, align 2, !pc !54 + %726 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 59, !pc !54 + store i8 0, ptr %726, align 1, !pc !54 + %727 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 60, !pc !54 + store i8 0, ptr %727, align 4, !pc !54 + %728 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 61, !pc !54 + store i8 0, ptr %728, align 1, !pc !54 + %729 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 62, !pc !54 + store i8 0, ptr %729, align 2, !pc !54 + %730 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 63, !pc !54 + store i8 0, ptr %730, align 1, !pc !54 + %731 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 64, !pc !54 + store i8 0, ptr %731, align 8, !pc !54 + %732 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 65, !pc !54 + store i8 0, ptr %732, align 1, !pc !54 + %733 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 66, !pc !54 + store i8 0, ptr %733, align 2, !pc !54 + %734 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 67, !pc !54 + store i8 0, ptr %734, align 1, !pc !54 + %735 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 68, !pc !54 + store i8 0, ptr %735, align 4, !pc !54 + %736 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 69, !pc !54 + store i8 0, ptr %736, align 1, !pc !54 + %737 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 70, !pc !54 + store i8 0, ptr %737, align 2, !pc !54 + %738 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 71, !pc !54 + store i8 0, ptr %738, align 1, !pc !54 + %739 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 72, !pc !54 + store i8 0, ptr %739, align 8, !pc !54 + %740 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 73, !pc !54 + store i8 0, ptr %740, align 1, !pc !54 + %741 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 74, !pc !54 + store i8 0, ptr %741, align 2, !pc !54 + %742 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 75, !pc !54 + store i8 0, ptr %742, align 1, !pc !54 + %743 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 76, !pc !54 + store i8 0, ptr %743, align 4, !pc !54 + %744 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 77, !pc !54 + store i8 0, ptr %744, align 1, !pc !54 + %745 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 78, !pc !54 + store i8 0, ptr %745, align 2, !pc !54 + %746 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 79, !pc !54 + store i8 0, ptr %746, align 1, !pc !54 + %747 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 80, !pc !54 + store i8 0, ptr %747, align 8, !pc !54 + %748 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 81, !pc !54 + store i8 0, ptr %748, align 1, !pc !54 + %749 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 82, !pc !54 + store i8 0, ptr %749, align 2, !pc !54 + %750 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 83, !pc !54 + store i8 0, ptr %750, align 1, !pc !54 + %751 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 84, !pc !54 + store i8 0, ptr %751, align 4, !pc !54 + %752 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 85, !pc !54 + store i8 0, ptr %752, align 1, !pc !54 + %753 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 86, !pc !54 + store i8 0, ptr %753, align 2, !pc !54 + %754 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 87, !pc !54 + store i8 0, ptr %754, align 1, !pc !54 + %755 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 88, !pc !54 + store i8 0, ptr %755, align 8, !pc !54 + %756 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 89, !pc !54 + store i8 0, ptr %756, align 1, !pc !54 + %757 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 90, !pc !54 + store i8 0, ptr %757, align 2, !pc !54 + %758 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 91, !pc !54 + store i8 0, ptr %758, align 1, !pc !54 + %759 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 92, !pc !54 + store i8 0, ptr %759, align 4, !pc !54 + %760 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 93, !pc !54 + store i8 0, ptr %760, align 1, !pc !54 + %761 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 94, !pc !54 + store i8 0, ptr %761, align 2, !pc !54 + %762 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 11, i32 0, i32 1, i64 95, !pc !54 + store i8 0, ptr %762, align 1, !pc !54 + %763 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 0, i32 0, i32 0, !pc !54 + store i64 0, ptr %763, align 8, !pc !54 + %764 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 0, i32 1, !pc !54 + store i32 0, ptr %764, align 8, !pc !54 + %765 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 0, i32 2, !pc !54 + store i32 0, ptr %765, align 4, !pc !54 + %766 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 1, i32 0, i32 0, !pc !54 + store i64 0, ptr %766, align 8, !pc !54 + %767 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 1, i32 1, !pc !54 + store i32 0, ptr %767, align 8, !pc !54 + %768 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 1, i32 2, !pc !54 + store i32 0, ptr %768, align 4, !pc !54 + %769 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 2, i32 0, i32 0, !pc !54 + store i64 0, ptr %769, align 8, !pc !54 + %770 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 2, i32 1, !pc !54 + store i32 0, ptr %770, align 8, !pc !54 + %771 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 2, i32 2, !pc !54 + store i32 0, ptr %771, align 4, !pc !54 + %772 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 3, i32 0, i32 0, !pc !54 + store i64 0, ptr %772, align 8, !pc !54 + %773 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 3, i32 1, !pc !54 + store i32 0, ptr %773, align 8, !pc !54 + %774 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 3, i32 2, !pc !54 + store i32 0, ptr %774, align 4, !pc !54 + %775 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 4, i32 0, i32 0, !pc !54 + store i64 0, ptr %775, align 8, !pc !54 + %776 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 4, i32 1, !pc !54 + store i32 0, ptr %776, align 8, !pc !54 + %777 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 4, i32 2, !pc !54 + store i32 0, ptr %777, align 4, !pc !54 + %778 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 5, i32 0, i32 0, !pc !54 + store i64 0, ptr %778, align 8, !pc !54 + %779 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 5, i32 1, !pc !54 + store i32 0, ptr %779, align 8, !pc !54 + %780 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 12, i32 5, i32 2, !pc !54 + store i32 0, ptr %780, align 4, !pc !54 + %781 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 0, i32 0, !pc !54 + store i64 0, ptr %781, align 8, !pc !54 + %782 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 0, i32 1, !pc !54 + store i64 0, ptr %782, align 8, !pc !54 + %783 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 1, i32 0, !pc !54 + store i64 0, ptr %783, align 8, !pc !54 + %784 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 1, i32 1, !pc !54 + store i64 0, ptr %784, align 8, !pc !54 + %785 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 2, i32 0, !pc !54 + store i64 0, ptr %785, align 8, !pc !54 + %786 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 2, i32 1, !pc !54 + store i64 0, ptr %786, align 8, !pc !54 + %787 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 3, i32 0, !pc !54 + store i64 0, ptr %787, align 8, !pc !54 + %788 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 3, i32 1, !pc !54 + store i64 0, ptr %788, align 8, !pc !54 + %789 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 4, i32 0, !pc !54 + store i64 0, ptr %789, align 8, !pc !54 + %790 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 4, i32 1, !pc !54 + store i64 0, ptr %790, align 8, !pc !54 + %791 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 5, i32 0, !pc !54 + store i64 0, ptr %791, align 8, !pc !54 + %792 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 5, i32 1, !pc !54 + store i64 0, ptr %792, align 8, !pc !54 + %793 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 6, i32 0, !pc !54 + store i64 0, ptr %793, align 8, !pc !54 + %794 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 6, i32 1, !pc !54 + store i64 0, ptr %794, align 8, !pc !54 + %795 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 7, i32 0, !pc !54 + store i64 0, ptr %795, align 8, !pc !54 + %796 = getelementptr inbounds %struct.State, ptr %7, i64 0, i32 0, i32 13, i32 0, i64 7, i32 1, !pc !54 + store i64 0, ptr %796, align 8, !pc !54 + %797 = load i64, ptr @__anvill_reg_RAX, align 8, !pc !54 + store i64 %797, ptr %308, align 8, !pc !54 + %798 = load i64, ptr @__anvill_reg_RBX, align 8, !pc !54 + store i64 %798, ptr %310, align 8, !pc !54 + %799 = load i64, ptr @__anvill_reg_RCX, align 8, !pc !54 + store i64 %799, ptr %312, align 8, !pc !54 + %800 = load i64, ptr @__anvill_reg_RDX, align 8, !pc !54 + store i64 %800, ptr %314, align 8, !pc !54 + %801 = load i64, ptr @__anvill_reg_RDI, align 8, !pc !54 + store i64 %801, ptr %318, align 8, !pc !54 + %802 = load i64, ptr @__anvill_reg_RBP, align 8, !pc !54 + store i64 %802, ptr %322, align 8, !pc !54 + %803 = load i64, ptr @__anvill_reg_R8, align 8, !pc !54 + store i64 %803, ptr %324, align 8, !pc !54 + %804 = load i64, ptr @__anvill_reg_R9, align 8, !pc !54 + store i64 %804, ptr %326, align 8, !pc !54 + %805 = load i64, ptr @__anvill_reg_R10, align 8, !pc !54 + store i64 %805, ptr %328, align 8, !pc !54 + %806 = load i64, ptr @__anvill_reg_R11, align 8, !pc !54 + store i64 %806, ptr %330, align 8, !pc !54 + %807 = load i64, ptr @__anvill_reg_R12, align 8, !pc !54 + store i64 %807, ptr %332, align 8, !pc !54 + %808 = load i64, ptr @__anvill_reg_R13, align 8, !pc !54 + store i64 %808, ptr %334, align 8, !pc !54 + %809 = load i64, ptr @__anvill_reg_R14, align 8, !pc !54 + store i64 %809, ptr %336, align 8, !pc !54 + %810 = load i64, ptr @__anvill_reg_R15, align 8, !pc !54 + store i64 %810, ptr %338, align 8, !pc !54 + %811 = load i16, ptr @__anvill_reg_SS, align 2, !pc !54 + store i16 %811, ptr %284, align 2, !pc !54 + %812 = load i16, ptr @__anvill_reg_ES, align 2, !pc !54 + store i16 %812, ptr %286, align 2, !pc !54 + %813 = load i16, ptr @__anvill_reg_GS, align 2, !pc !54 + store i16 %813, ptr %288, align 2, !pc !54 + %814 = load i16, ptr @__anvill_reg_FS, align 2, !pc !54 + store i16 %814, ptr %290, align 2, !pc !54 + %815 = load i16, ptr @__anvill_reg_DS, align 2, !pc !54 + store i16 %815, ptr %292, align 2, !pc !54 + %816 = load i16, ptr @__anvill_reg_CS, align 2, !pc !54 + store i16 %816, ptr %294, align 2, !pc !54 + %817 = load i8, ptr @__anvill_reg_XMM0, align 1, !pc !54 + %818 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 1), align 1, !pc !54 + %819 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 2), align 1, !pc !54 + %820 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 3), align 1, !pc !54 + %821 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 4), align 1, !pc !54 + %822 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 5), align 1, !pc !54 + %823 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 6), align 1, !pc !54 + %824 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 7), align 1, !pc !54 + %825 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 8), align 1, !pc !54 + %826 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 9), align 1, !pc !54 + %827 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 10), align 1, !pc !54 + %828 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 11), align 1, !pc !54 + %829 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 12), align 1, !pc !54 + %830 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 13), align 1, !pc !54 + %831 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 14), align 1, !pc !54 + %832 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM0, i64 0, i64 15), align 1, !pc !54 + store i8 %817, ptr %10, align 8, !pc !54 + %833 = getelementptr inbounds i8, ptr %7, i64 17, !pc !54 + store i8 %818, ptr %833, align 1, !pc !54 + %834 = getelementptr inbounds i8, ptr %7, i64 18, !pc !54 + store i8 %819, ptr %834, align 2, !pc !54 + %835 = getelementptr inbounds i8, ptr %7, i64 19, !pc !54 + store i8 %820, ptr %835, align 1, !pc !54 + %836 = getelementptr inbounds i8, ptr %7, i64 20, !pc !54 + store i8 %821, ptr %836, align 4, !pc !54 + %837 = getelementptr inbounds i8, ptr %7, i64 21, !pc !54 + store i8 %822, ptr %837, align 1, !pc !54 + %838 = getelementptr inbounds i8, ptr %7, i64 22, !pc !54 + store i8 %823, ptr %838, align 2, !pc !54 + %839 = getelementptr inbounds i8, ptr %7, i64 23, !pc !54 + store i8 %824, ptr %839, align 1, !pc !54 + store i8 %825, ptr %11, align 8, !pc !54 + %840 = getelementptr inbounds i8, ptr %7, i64 25, !pc !54 + store i8 %826, ptr %840, align 1, !pc !54 + %841 = getelementptr inbounds i8, ptr %7, i64 26, !pc !54 + store i8 %827, ptr %841, align 2, !pc !54 + %842 = getelementptr inbounds i8, ptr %7, i64 27, !pc !54 + store i8 %828, ptr %842, align 1, !pc !54 + %843 = getelementptr inbounds i8, ptr %7, i64 28, !pc !54 + store i8 %829, ptr %843, align 4, !pc !54 + %844 = getelementptr inbounds i8, ptr %7, i64 29, !pc !54 + store i8 %830, ptr %844, align 1, !pc !54 + %845 = getelementptr inbounds i8, ptr %7, i64 30, !pc !54 + store i8 %831, ptr %845, align 2, !pc !54 + %846 = getelementptr inbounds i8, ptr %7, i64 31, !pc !54 + store i8 %832, ptr %846, align 1, !pc !54 + %847 = load i8, ptr @__anvill_reg_XMM1, align 1, !pc !54 + %848 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 1), align 1, !pc !54 + %849 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 2), align 1, !pc !54 + %850 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 3), align 1, !pc !54 + %851 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 4), align 1, !pc !54 + %852 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 5), align 1, !pc !54 + %853 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 6), align 1, !pc !54 + %854 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 7), align 1, !pc !54 + %855 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 8), align 1, !pc !54 + %856 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 9), align 1, !pc !54 + %857 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 10), align 1, !pc !54 + %858 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 11), align 1, !pc !54 + %859 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 12), align 1, !pc !54 + %860 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 13), align 1, !pc !54 + %861 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 14), align 1, !pc !54 + %862 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM1, i64 0, i64 15), align 1, !pc !54 + store i8 %847, ptr %18, align 8, !pc !54 + %863 = getelementptr inbounds i8, ptr %7, i64 81, !pc !54 + store i8 %848, ptr %863, align 1, !pc !54 + %864 = getelementptr inbounds i8, ptr %7, i64 82, !pc !54 + store i8 %849, ptr %864, align 2, !pc !54 + %865 = getelementptr inbounds i8, ptr %7, i64 83, !pc !54 + store i8 %850, ptr %865, align 1, !pc !54 + %866 = getelementptr inbounds i8, ptr %7, i64 84, !pc !54 + store i8 %851, ptr %866, align 4, !pc !54 + %867 = getelementptr inbounds i8, ptr %7, i64 85, !pc !54 + store i8 %852, ptr %867, align 1, !pc !54 + %868 = getelementptr inbounds i8, ptr %7, i64 86, !pc !54 + store i8 %853, ptr %868, align 2, !pc !54 + %869 = getelementptr inbounds i8, ptr %7, i64 87, !pc !54 + store i8 %854, ptr %869, align 1, !pc !54 + store i8 %855, ptr %19, align 8, !pc !54 + %870 = getelementptr inbounds i8, ptr %7, i64 89, !pc !54 + store i8 %856, ptr %870, align 1, !pc !54 + %871 = getelementptr inbounds i8, ptr %7, i64 90, !pc !54 + store i8 %857, ptr %871, align 2, !pc !54 + %872 = getelementptr inbounds i8, ptr %7, i64 91, !pc !54 + store i8 %858, ptr %872, align 1, !pc !54 + %873 = getelementptr inbounds i8, ptr %7, i64 92, !pc !54 + store i8 %859, ptr %873, align 4, !pc !54 + %874 = getelementptr inbounds i8, ptr %7, i64 93, !pc !54 + store i8 %860, ptr %874, align 1, !pc !54 + %875 = getelementptr inbounds i8, ptr %7, i64 94, !pc !54 + store i8 %861, ptr %875, align 2, !pc !54 + %876 = getelementptr inbounds i8, ptr %7, i64 95, !pc !54 + store i8 %862, ptr %876, align 1, !pc !54 + %877 = load i8, ptr @__anvill_reg_XMM2, align 1, !pc !54 + %878 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 1), align 1, !pc !54 + %879 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 2), align 1, !pc !54 + %880 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 3), align 1, !pc !54 + %881 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 4), align 1, !pc !54 + %882 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 5), align 1, !pc !54 + %883 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 6), align 1, !pc !54 + %884 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 7), align 1, !pc !54 + %885 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 8), align 1, !pc !54 + %886 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 9), align 1, !pc !54 + %887 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 10), align 1, !pc !54 + %888 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 11), align 1, !pc !54 + %889 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 12), align 1, !pc !54 + %890 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 13), align 1, !pc !54 + %891 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 14), align 1, !pc !54 + %892 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM2, i64 0, i64 15), align 1, !pc !54 + store i8 %877, ptr %26, align 8, !pc !54 + %893 = getelementptr inbounds i8, ptr %7, i64 145, !pc !54 + store i8 %878, ptr %893, align 1, !pc !54 + %894 = getelementptr inbounds i8, ptr %7, i64 146, !pc !54 + store i8 %879, ptr %894, align 2, !pc !54 + %895 = getelementptr inbounds i8, ptr %7, i64 147, !pc !54 + store i8 %880, ptr %895, align 1, !pc !54 + %896 = getelementptr inbounds i8, ptr %7, i64 148, !pc !54 + store i8 %881, ptr %896, align 4, !pc !54 + %897 = getelementptr inbounds i8, ptr %7, i64 149, !pc !54 + store i8 %882, ptr %897, align 1, !pc !54 + %898 = getelementptr inbounds i8, ptr %7, i64 150, !pc !54 + store i8 %883, ptr %898, align 2, !pc !54 + %899 = getelementptr inbounds i8, ptr %7, i64 151, !pc !54 + store i8 %884, ptr %899, align 1, !pc !54 + store i8 %885, ptr %27, align 8, !pc !54 + %900 = getelementptr inbounds i8, ptr %7, i64 153, !pc !54 + store i8 %886, ptr %900, align 1, !pc !54 + %901 = getelementptr inbounds i8, ptr %7, i64 154, !pc !54 + store i8 %887, ptr %901, align 2, !pc !54 + %902 = getelementptr inbounds i8, ptr %7, i64 155, !pc !54 + store i8 %888, ptr %902, align 1, !pc !54 + %903 = getelementptr inbounds i8, ptr %7, i64 156, !pc !54 + store i8 %889, ptr %903, align 4, !pc !54 + %904 = getelementptr inbounds i8, ptr %7, i64 157, !pc !54 + store i8 %890, ptr %904, align 1, !pc !54 + %905 = getelementptr inbounds i8, ptr %7, i64 158, !pc !54 + store i8 %891, ptr %905, align 2, !pc !54 + %906 = getelementptr inbounds i8, ptr %7, i64 159, !pc !54 + store i8 %892, ptr %906, align 1, !pc !54 + %907 = load i8, ptr @__anvill_reg_XMM3, align 1, !pc !54 + %908 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 1), align 1, !pc !54 + %909 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 2), align 1, !pc !54 + %910 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 3), align 1, !pc !54 + %911 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 4), align 1, !pc !54 + %912 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 5), align 1, !pc !54 + %913 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 6), align 1, !pc !54 + %914 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 7), align 1, !pc !54 + %915 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 8), align 1, !pc !54 + %916 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 9), align 1, !pc !54 + %917 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 10), align 1, !pc !54 + %918 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 11), align 1, !pc !54 + %919 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 12), align 1, !pc !54 + %920 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 13), align 1, !pc !54 + %921 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 14), align 1, !pc !54 + %922 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM3, i64 0, i64 15), align 1, !pc !54 + store i8 %907, ptr %34, align 8, !pc !54 + %923 = getelementptr inbounds i8, ptr %7, i64 209, !pc !54 + store i8 %908, ptr %923, align 1, !pc !54 + %924 = getelementptr inbounds i8, ptr %7, i64 210, !pc !54 + store i8 %909, ptr %924, align 2, !pc !54 + %925 = getelementptr inbounds i8, ptr %7, i64 211, !pc !54 + store i8 %910, ptr %925, align 1, !pc !54 + %926 = getelementptr inbounds i8, ptr %7, i64 212, !pc !54 + store i8 %911, ptr %926, align 4, !pc !54 + %927 = getelementptr inbounds i8, ptr %7, i64 213, !pc !54 + store i8 %912, ptr %927, align 1, !pc !54 + %928 = getelementptr inbounds i8, ptr %7, i64 214, !pc !54 + store i8 %913, ptr %928, align 2, !pc !54 + %929 = getelementptr inbounds i8, ptr %7, i64 215, !pc !54 + store i8 %914, ptr %929, align 1, !pc !54 + store i8 %915, ptr %35, align 8, !pc !54 + %930 = getelementptr inbounds i8, ptr %7, i64 217, !pc !54 + store i8 %916, ptr %930, align 1, !pc !54 + %931 = getelementptr inbounds i8, ptr %7, i64 218, !pc !54 + store i8 %917, ptr %931, align 2, !pc !54 + %932 = getelementptr inbounds i8, ptr %7, i64 219, !pc !54 + store i8 %918, ptr %932, align 1, !pc !54 + %933 = getelementptr inbounds i8, ptr %7, i64 220, !pc !54 + store i8 %919, ptr %933, align 4, !pc !54 + %934 = getelementptr inbounds i8, ptr %7, i64 221, !pc !54 + store i8 %920, ptr %934, align 1, !pc !54 + %935 = getelementptr inbounds i8, ptr %7, i64 222, !pc !54 + store i8 %921, ptr %935, align 2, !pc !54 + %936 = getelementptr inbounds i8, ptr %7, i64 223, !pc !54 + store i8 %922, ptr %936, align 1, !pc !54 + %937 = load i8, ptr @__anvill_reg_XMM4, align 1, !pc !54 + %938 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 1), align 1, !pc !54 + %939 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 2), align 1, !pc !54 + %940 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 3), align 1, !pc !54 + %941 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 4), align 1, !pc !54 + %942 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 5), align 1, !pc !54 + %943 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 6), align 1, !pc !54 + %944 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 7), align 1, !pc !54 + %945 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 8), align 1, !pc !54 + %946 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 9), align 1, !pc !54 + %947 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 10), align 1, !pc !54 + %948 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 11), align 1, !pc !54 + %949 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 12), align 1, !pc !54 + %950 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 13), align 1, !pc !54 + %951 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 14), align 1, !pc !54 + %952 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM4, i64 0, i64 15), align 1, !pc !54 + store i8 %937, ptr %42, align 8, !pc !54 + %953 = getelementptr inbounds i8, ptr %7, i64 273, !pc !54 + store i8 %938, ptr %953, align 1, !pc !54 + %954 = getelementptr inbounds i8, ptr %7, i64 274, !pc !54 + store i8 %939, ptr %954, align 2, !pc !54 + %955 = getelementptr inbounds i8, ptr %7, i64 275, !pc !54 + store i8 %940, ptr %955, align 1, !pc !54 + %956 = getelementptr inbounds i8, ptr %7, i64 276, !pc !54 + store i8 %941, ptr %956, align 4, !pc !54 + %957 = getelementptr inbounds i8, ptr %7, i64 277, !pc !54 + store i8 %942, ptr %957, align 1, !pc !54 + %958 = getelementptr inbounds i8, ptr %7, i64 278, !pc !54 + store i8 %943, ptr %958, align 2, !pc !54 + %959 = getelementptr inbounds i8, ptr %7, i64 279, !pc !54 + store i8 %944, ptr %959, align 1, !pc !54 + store i8 %945, ptr %43, align 8, !pc !54 + %960 = getelementptr inbounds i8, ptr %7, i64 281, !pc !54 + store i8 %946, ptr %960, align 1, !pc !54 + %961 = getelementptr inbounds i8, ptr %7, i64 282, !pc !54 + store i8 %947, ptr %961, align 2, !pc !54 + %962 = getelementptr inbounds i8, ptr %7, i64 283, !pc !54 + store i8 %948, ptr %962, align 1, !pc !54 + %963 = getelementptr inbounds i8, ptr %7, i64 284, !pc !54 + store i8 %949, ptr %963, align 4, !pc !54 + %964 = getelementptr inbounds i8, ptr %7, i64 285, !pc !54 + store i8 %950, ptr %964, align 1, !pc !54 + %965 = getelementptr inbounds i8, ptr %7, i64 286, !pc !54 + store i8 %951, ptr %965, align 2, !pc !54 + %966 = getelementptr inbounds i8, ptr %7, i64 287, !pc !54 + store i8 %952, ptr %966, align 1, !pc !54 + %967 = load i8, ptr @__anvill_reg_XMM5, align 1, !pc !54 + %968 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 1), align 1, !pc !54 + %969 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 2), align 1, !pc !54 + %970 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 3), align 1, !pc !54 + %971 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 4), align 1, !pc !54 + %972 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 5), align 1, !pc !54 + %973 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 6), align 1, !pc !54 + %974 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 7), align 1, !pc !54 + %975 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 8), align 1, !pc !54 + %976 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 9), align 1, !pc !54 + %977 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 10), align 1, !pc !54 + %978 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 11), align 1, !pc !54 + %979 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 12), align 1, !pc !54 + %980 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 13), align 1, !pc !54 + %981 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 14), align 1, !pc !54 + %982 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM5, i64 0, i64 15), align 1, !pc !54 + store i8 %967, ptr %50, align 8, !pc !54 + %983 = getelementptr inbounds i8, ptr %7, i64 337, !pc !54 + store i8 %968, ptr %983, align 1, !pc !54 + %984 = getelementptr inbounds i8, ptr %7, i64 338, !pc !54 + store i8 %969, ptr %984, align 2, !pc !54 + %985 = getelementptr inbounds i8, ptr %7, i64 339, !pc !54 + store i8 %970, ptr %985, align 1, !pc !54 + %986 = getelementptr inbounds i8, ptr %7, i64 340, !pc !54 + store i8 %971, ptr %986, align 4, !pc !54 + %987 = getelementptr inbounds i8, ptr %7, i64 341, !pc !54 + store i8 %972, ptr %987, align 1, !pc !54 + %988 = getelementptr inbounds i8, ptr %7, i64 342, !pc !54 + store i8 %973, ptr %988, align 2, !pc !54 + %989 = getelementptr inbounds i8, ptr %7, i64 343, !pc !54 + store i8 %974, ptr %989, align 1, !pc !54 + store i8 %975, ptr %51, align 8, !pc !54 + %990 = getelementptr inbounds i8, ptr %7, i64 345, !pc !54 + store i8 %976, ptr %990, align 1, !pc !54 + %991 = getelementptr inbounds i8, ptr %7, i64 346, !pc !54 + store i8 %977, ptr %991, align 2, !pc !54 + %992 = getelementptr inbounds i8, ptr %7, i64 347, !pc !54 + store i8 %978, ptr %992, align 1, !pc !54 + %993 = getelementptr inbounds i8, ptr %7, i64 348, !pc !54 + store i8 %979, ptr %993, align 4, !pc !54 + %994 = getelementptr inbounds i8, ptr %7, i64 349, !pc !54 + store i8 %980, ptr %994, align 1, !pc !54 + %995 = getelementptr inbounds i8, ptr %7, i64 350, !pc !54 + store i8 %981, ptr %995, align 2, !pc !54 + %996 = getelementptr inbounds i8, ptr %7, i64 351, !pc !54 + store i8 %982, ptr %996, align 1, !pc !54 + %997 = load i8, ptr @__anvill_reg_XMM6, align 1, !pc !54 + %998 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 1), align 1, !pc !54 + %999 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 2), align 1, !pc !54 + %1000 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 3), align 1, !pc !54 + %1001 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 4), align 1, !pc !54 + %1002 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 5), align 1, !pc !54 + %1003 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 6), align 1, !pc !54 + %1004 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 7), align 1, !pc !54 + %1005 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 8), align 1, !pc !54 + %1006 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 9), align 1, !pc !54 + %1007 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 10), align 1, !pc !54 + %1008 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 11), align 1, !pc !54 + %1009 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 12), align 1, !pc !54 + %1010 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 13), align 1, !pc !54 + %1011 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 14), align 1, !pc !54 + %1012 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM6, i64 0, i64 15), align 1, !pc !54 + store i8 %997, ptr %58, align 8, !pc !54 + %1013 = getelementptr inbounds i8, ptr %7, i64 401, !pc !54 + store i8 %998, ptr %1013, align 1, !pc !54 + %1014 = getelementptr inbounds i8, ptr %7, i64 402, !pc !54 + store i8 %999, ptr %1014, align 2, !pc !54 + %1015 = getelementptr inbounds i8, ptr %7, i64 403, !pc !54 + store i8 %1000, ptr %1015, align 1, !pc !54 + %1016 = getelementptr inbounds i8, ptr %7, i64 404, !pc !54 + store i8 %1001, ptr %1016, align 4, !pc !54 + %1017 = getelementptr inbounds i8, ptr %7, i64 405, !pc !54 + store i8 %1002, ptr %1017, align 1, !pc !54 + %1018 = getelementptr inbounds i8, ptr %7, i64 406, !pc !54 + store i8 %1003, ptr %1018, align 2, !pc !54 + %1019 = getelementptr inbounds i8, ptr %7, i64 407, !pc !54 + store i8 %1004, ptr %1019, align 1, !pc !54 + store i8 %1005, ptr %59, align 8, !pc !54 + %1020 = getelementptr inbounds i8, ptr %7, i64 409, !pc !54 + store i8 %1006, ptr %1020, align 1, !pc !54 + %1021 = getelementptr inbounds i8, ptr %7, i64 410, !pc !54 + store i8 %1007, ptr %1021, align 2, !pc !54 + %1022 = getelementptr inbounds i8, ptr %7, i64 411, !pc !54 + store i8 %1008, ptr %1022, align 1, !pc !54 + %1023 = getelementptr inbounds i8, ptr %7, i64 412, !pc !54 + store i8 %1009, ptr %1023, align 4, !pc !54 + %1024 = getelementptr inbounds i8, ptr %7, i64 413, !pc !54 + store i8 %1010, ptr %1024, align 1, !pc !54 + %1025 = getelementptr inbounds i8, ptr %7, i64 414, !pc !54 + store i8 %1011, ptr %1025, align 2, !pc !54 + %1026 = getelementptr inbounds i8, ptr %7, i64 415, !pc !54 + store i8 %1012, ptr %1026, align 1, !pc !54 + %1027 = load i8, ptr @__anvill_reg_XMM7, align 1, !pc !54 + %1028 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 1), align 1, !pc !54 + %1029 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 2), align 1, !pc !54 + %1030 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 3), align 1, !pc !54 + %1031 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 4), align 1, !pc !54 + %1032 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 5), align 1, !pc !54 + %1033 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 6), align 1, !pc !54 + %1034 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 7), align 1, !pc !54 + %1035 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 8), align 1, !pc !54 + %1036 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 9), align 1, !pc !54 + %1037 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 10), align 1, !pc !54 + %1038 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 11), align 1, !pc !54 + %1039 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 12), align 1, !pc !54 + %1040 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 13), align 1, !pc !54 + %1041 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 14), align 1, !pc !54 + %1042 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM7, i64 0, i64 15), align 1, !pc !54 + store i8 %1027, ptr %66, align 8, !pc !54 + %1043 = getelementptr inbounds i8, ptr %7, i64 465, !pc !54 + store i8 %1028, ptr %1043, align 1, !pc !54 + %1044 = getelementptr inbounds i8, ptr %7, i64 466, !pc !54 + store i8 %1029, ptr %1044, align 2, !pc !54 + %1045 = getelementptr inbounds i8, ptr %7, i64 467, !pc !54 + store i8 %1030, ptr %1045, align 1, !pc !54 + %1046 = getelementptr inbounds i8, ptr %7, i64 468, !pc !54 + store i8 %1031, ptr %1046, align 4, !pc !54 + %1047 = getelementptr inbounds i8, ptr %7, i64 469, !pc !54 + store i8 %1032, ptr %1047, align 1, !pc !54 + %1048 = getelementptr inbounds i8, ptr %7, i64 470, !pc !54 + store i8 %1033, ptr %1048, align 2, !pc !54 + %1049 = getelementptr inbounds i8, ptr %7, i64 471, !pc !54 + store i8 %1034, ptr %1049, align 1, !pc !54 + store i8 %1035, ptr %67, align 8, !pc !54 + %1050 = getelementptr inbounds i8, ptr %7, i64 473, !pc !54 + store i8 %1036, ptr %1050, align 1, !pc !54 + %1051 = getelementptr inbounds i8, ptr %7, i64 474, !pc !54 + store i8 %1037, ptr %1051, align 2, !pc !54 + %1052 = getelementptr inbounds i8, ptr %7, i64 475, !pc !54 + store i8 %1038, ptr %1052, align 1, !pc !54 + %1053 = getelementptr inbounds i8, ptr %7, i64 476, !pc !54 + store i8 %1039, ptr %1053, align 4, !pc !54 + %1054 = getelementptr inbounds i8, ptr %7, i64 477, !pc !54 + store i8 %1040, ptr %1054, align 1, !pc !54 + %1055 = getelementptr inbounds i8, ptr %7, i64 478, !pc !54 + store i8 %1041, ptr %1055, align 2, !pc !54 + %1056 = getelementptr inbounds i8, ptr %7, i64 479, !pc !54 + store i8 %1042, ptr %1056, align 1, !pc !54 + %1057 = load i8, ptr @__anvill_reg_XMM8, align 1, !pc !54 + %1058 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 1), align 1, !pc !54 + %1059 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 2), align 1, !pc !54 + %1060 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 3), align 1, !pc !54 + %1061 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 4), align 1, !pc !54 + %1062 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 5), align 1, !pc !54 + %1063 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 6), align 1, !pc !54 + %1064 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 7), align 1, !pc !54 + %1065 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 8), align 1, !pc !54 + %1066 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 9), align 1, !pc !54 + %1067 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 10), align 1, !pc !54 + %1068 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 11), align 1, !pc !54 + %1069 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 12), align 1, !pc !54 + %1070 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 13), align 1, !pc !54 + %1071 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 14), align 1, !pc !54 + %1072 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM8, i64 0, i64 15), align 1, !pc !54 + store i8 %1057, ptr %74, align 8, !pc !54 + %1073 = getelementptr inbounds i8, ptr %7, i64 529, !pc !54 + store i8 %1058, ptr %1073, align 1, !pc !54 + %1074 = getelementptr inbounds i8, ptr %7, i64 530, !pc !54 + store i8 %1059, ptr %1074, align 2, !pc !54 + %1075 = getelementptr inbounds i8, ptr %7, i64 531, !pc !54 + store i8 %1060, ptr %1075, align 1, !pc !54 + %1076 = getelementptr inbounds i8, ptr %7, i64 532, !pc !54 + store i8 %1061, ptr %1076, align 4, !pc !54 + %1077 = getelementptr inbounds i8, ptr %7, i64 533, !pc !54 + store i8 %1062, ptr %1077, align 1, !pc !54 + %1078 = getelementptr inbounds i8, ptr %7, i64 534, !pc !54 + store i8 %1063, ptr %1078, align 2, !pc !54 + %1079 = getelementptr inbounds i8, ptr %7, i64 535, !pc !54 + store i8 %1064, ptr %1079, align 1, !pc !54 + store i8 %1065, ptr %75, align 8, !pc !54 + %1080 = getelementptr inbounds i8, ptr %7, i64 537, !pc !54 + store i8 %1066, ptr %1080, align 1, !pc !54 + %1081 = getelementptr inbounds i8, ptr %7, i64 538, !pc !54 + store i8 %1067, ptr %1081, align 2, !pc !54 + %1082 = getelementptr inbounds i8, ptr %7, i64 539, !pc !54 + store i8 %1068, ptr %1082, align 1, !pc !54 + %1083 = getelementptr inbounds i8, ptr %7, i64 540, !pc !54 + store i8 %1069, ptr %1083, align 4, !pc !54 + %1084 = getelementptr inbounds i8, ptr %7, i64 541, !pc !54 + store i8 %1070, ptr %1084, align 1, !pc !54 + %1085 = getelementptr inbounds i8, ptr %7, i64 542, !pc !54 + store i8 %1071, ptr %1085, align 2, !pc !54 + %1086 = getelementptr inbounds i8, ptr %7, i64 543, !pc !54 + store i8 %1072, ptr %1086, align 1, !pc !54 + %1087 = load i8, ptr @__anvill_reg_XMM9, align 1, !pc !54 + %1088 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 1), align 1, !pc !54 + %1089 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 2), align 1, !pc !54 + %1090 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 3), align 1, !pc !54 + %1091 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 4), align 1, !pc !54 + %1092 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 5), align 1, !pc !54 + %1093 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 6), align 1, !pc !54 + %1094 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 7), align 1, !pc !54 + %1095 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 8), align 1, !pc !54 + %1096 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 9), align 1, !pc !54 + %1097 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 10), align 1, !pc !54 + %1098 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 11), align 1, !pc !54 + %1099 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 12), align 1, !pc !54 + %1100 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 13), align 1, !pc !54 + %1101 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 14), align 1, !pc !54 + %1102 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM9, i64 0, i64 15), align 1, !pc !54 + store i8 %1087, ptr %82, align 8, !pc !54 + %1103 = getelementptr inbounds i8, ptr %7, i64 593, !pc !54 + store i8 %1088, ptr %1103, align 1, !pc !54 + %1104 = getelementptr inbounds i8, ptr %7, i64 594, !pc !54 + store i8 %1089, ptr %1104, align 2, !pc !54 + %1105 = getelementptr inbounds i8, ptr %7, i64 595, !pc !54 + store i8 %1090, ptr %1105, align 1, !pc !54 + %1106 = getelementptr inbounds i8, ptr %7, i64 596, !pc !54 + store i8 %1091, ptr %1106, align 4, !pc !54 + %1107 = getelementptr inbounds i8, ptr %7, i64 597, !pc !54 + store i8 %1092, ptr %1107, align 1, !pc !54 + %1108 = getelementptr inbounds i8, ptr %7, i64 598, !pc !54 + store i8 %1093, ptr %1108, align 2, !pc !54 + %1109 = getelementptr inbounds i8, ptr %7, i64 599, !pc !54 + store i8 %1094, ptr %1109, align 1, !pc !54 + store i8 %1095, ptr %83, align 8, !pc !54 + %1110 = getelementptr inbounds i8, ptr %7, i64 601, !pc !54 + store i8 %1096, ptr %1110, align 1, !pc !54 + %1111 = getelementptr inbounds i8, ptr %7, i64 602, !pc !54 + store i8 %1097, ptr %1111, align 2, !pc !54 + %1112 = getelementptr inbounds i8, ptr %7, i64 603, !pc !54 + store i8 %1098, ptr %1112, align 1, !pc !54 + %1113 = getelementptr inbounds i8, ptr %7, i64 604, !pc !54 + store i8 %1099, ptr %1113, align 4, !pc !54 + %1114 = getelementptr inbounds i8, ptr %7, i64 605, !pc !54 + store i8 %1100, ptr %1114, align 1, !pc !54 + %1115 = getelementptr inbounds i8, ptr %7, i64 606, !pc !54 + store i8 %1101, ptr %1115, align 2, !pc !54 + %1116 = getelementptr inbounds i8, ptr %7, i64 607, !pc !54 + store i8 %1102, ptr %1116, align 1, !pc !54 + %1117 = load i8, ptr @__anvill_reg_XMM10, align 1, !pc !54 + %1118 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 1), align 1, !pc !54 + %1119 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 2), align 1, !pc !54 + %1120 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 3), align 1, !pc !54 + %1121 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 4), align 1, !pc !54 + %1122 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 5), align 1, !pc !54 + %1123 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 6), align 1, !pc !54 + %1124 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 7), align 1, !pc !54 + %1125 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 8), align 1, !pc !54 + %1126 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 9), align 1, !pc !54 + %1127 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 10), align 1, !pc !54 + %1128 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 11), align 1, !pc !54 + %1129 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 12), align 1, !pc !54 + %1130 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 13), align 1, !pc !54 + %1131 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 14), align 1, !pc !54 + %1132 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM10, i64 0, i64 15), align 1, !pc !54 + store i8 %1117, ptr %90, align 8, !pc !54 + %1133 = getelementptr inbounds i8, ptr %7, i64 657, !pc !54 + store i8 %1118, ptr %1133, align 1, !pc !54 + %1134 = getelementptr inbounds i8, ptr %7, i64 658, !pc !54 + store i8 %1119, ptr %1134, align 2, !pc !54 + %1135 = getelementptr inbounds i8, ptr %7, i64 659, !pc !54 + store i8 %1120, ptr %1135, align 1, !pc !54 + %1136 = getelementptr inbounds i8, ptr %7, i64 660, !pc !54 + store i8 %1121, ptr %1136, align 4, !pc !54 + %1137 = getelementptr inbounds i8, ptr %7, i64 661, !pc !54 + store i8 %1122, ptr %1137, align 1, !pc !54 + %1138 = getelementptr inbounds i8, ptr %7, i64 662, !pc !54 + store i8 %1123, ptr %1138, align 2, !pc !54 + %1139 = getelementptr inbounds i8, ptr %7, i64 663, !pc !54 + store i8 %1124, ptr %1139, align 1, !pc !54 + store i8 %1125, ptr %91, align 8, !pc !54 + %1140 = getelementptr inbounds i8, ptr %7, i64 665, !pc !54 + store i8 %1126, ptr %1140, align 1, !pc !54 + %1141 = getelementptr inbounds i8, ptr %7, i64 666, !pc !54 + store i8 %1127, ptr %1141, align 2, !pc !54 + %1142 = getelementptr inbounds i8, ptr %7, i64 667, !pc !54 + store i8 %1128, ptr %1142, align 1, !pc !54 + %1143 = getelementptr inbounds i8, ptr %7, i64 668, !pc !54 + store i8 %1129, ptr %1143, align 4, !pc !54 + %1144 = getelementptr inbounds i8, ptr %7, i64 669, !pc !54 + store i8 %1130, ptr %1144, align 1, !pc !54 + %1145 = getelementptr inbounds i8, ptr %7, i64 670, !pc !54 + store i8 %1131, ptr %1145, align 2, !pc !54 + %1146 = getelementptr inbounds i8, ptr %7, i64 671, !pc !54 + store i8 %1132, ptr %1146, align 1, !pc !54 + %1147 = load i8, ptr @__anvill_reg_XMM11, align 1, !pc !54 + %1148 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 1), align 1, !pc !54 + %1149 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 2), align 1, !pc !54 + %1150 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 3), align 1, !pc !54 + %1151 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 4), align 1, !pc !54 + %1152 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 5), align 1, !pc !54 + %1153 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 6), align 1, !pc !54 + %1154 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 7), align 1, !pc !54 + %1155 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 8), align 1, !pc !54 + %1156 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 9), align 1, !pc !54 + %1157 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 10), align 1, !pc !54 + %1158 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 11), align 1, !pc !54 + %1159 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 12), align 1, !pc !54 + %1160 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 13), align 1, !pc !54 + %1161 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 14), align 1, !pc !54 + %1162 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM11, i64 0, i64 15), align 1, !pc !54 + store i8 %1147, ptr %98, align 8, !pc !54 + %1163 = getelementptr inbounds i8, ptr %7, i64 721, !pc !54 + store i8 %1148, ptr %1163, align 1, !pc !54 + %1164 = getelementptr inbounds i8, ptr %7, i64 722, !pc !54 + store i8 %1149, ptr %1164, align 2, !pc !54 + %1165 = getelementptr inbounds i8, ptr %7, i64 723, !pc !54 + store i8 %1150, ptr %1165, align 1, !pc !54 + %1166 = getelementptr inbounds i8, ptr %7, i64 724, !pc !54 + store i8 %1151, ptr %1166, align 4, !pc !54 + %1167 = getelementptr inbounds i8, ptr %7, i64 725, !pc !54 + store i8 %1152, ptr %1167, align 1, !pc !54 + %1168 = getelementptr inbounds i8, ptr %7, i64 726, !pc !54 + store i8 %1153, ptr %1168, align 2, !pc !54 + %1169 = getelementptr inbounds i8, ptr %7, i64 727, !pc !54 + store i8 %1154, ptr %1169, align 1, !pc !54 + store i8 %1155, ptr %99, align 8, !pc !54 + %1170 = getelementptr inbounds i8, ptr %7, i64 729, !pc !54 + store i8 %1156, ptr %1170, align 1, !pc !54 + %1171 = getelementptr inbounds i8, ptr %7, i64 730, !pc !54 + store i8 %1157, ptr %1171, align 2, !pc !54 + %1172 = getelementptr inbounds i8, ptr %7, i64 731, !pc !54 + store i8 %1158, ptr %1172, align 1, !pc !54 + %1173 = getelementptr inbounds i8, ptr %7, i64 732, !pc !54 + store i8 %1159, ptr %1173, align 4, !pc !54 + %1174 = getelementptr inbounds i8, ptr %7, i64 733, !pc !54 + store i8 %1160, ptr %1174, align 1, !pc !54 + %1175 = getelementptr inbounds i8, ptr %7, i64 734, !pc !54 + store i8 %1161, ptr %1175, align 2, !pc !54 + %1176 = getelementptr inbounds i8, ptr %7, i64 735, !pc !54 + store i8 %1162, ptr %1176, align 1, !pc !54 + %1177 = load i8, ptr @__anvill_reg_XMM12, align 1, !pc !54 + %1178 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 1), align 1, !pc !54 + %1179 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 2), align 1, !pc !54 + %1180 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 3), align 1, !pc !54 + %1181 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 4), align 1, !pc !54 + %1182 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 5), align 1, !pc !54 + %1183 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 6), align 1, !pc !54 + %1184 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 7), align 1, !pc !54 + %1185 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 8), align 1, !pc !54 + %1186 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 9), align 1, !pc !54 + %1187 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 10), align 1, !pc !54 + %1188 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 11), align 1, !pc !54 + %1189 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 12), align 1, !pc !54 + %1190 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 13), align 1, !pc !54 + %1191 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 14), align 1, !pc !54 + %1192 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM12, i64 0, i64 15), align 1, !pc !54 + store i8 %1177, ptr %106, align 8, !pc !54 + %1193 = getelementptr inbounds i8, ptr %7, i64 785, !pc !54 + store i8 %1178, ptr %1193, align 1, !pc !54 + %1194 = getelementptr inbounds i8, ptr %7, i64 786, !pc !54 + store i8 %1179, ptr %1194, align 2, !pc !54 + %1195 = getelementptr inbounds i8, ptr %7, i64 787, !pc !54 + store i8 %1180, ptr %1195, align 1, !pc !54 + %1196 = getelementptr inbounds i8, ptr %7, i64 788, !pc !54 + store i8 %1181, ptr %1196, align 4, !pc !54 + %1197 = getelementptr inbounds i8, ptr %7, i64 789, !pc !54 + store i8 %1182, ptr %1197, align 1, !pc !54 + %1198 = getelementptr inbounds i8, ptr %7, i64 790, !pc !54 + store i8 %1183, ptr %1198, align 2, !pc !54 + %1199 = getelementptr inbounds i8, ptr %7, i64 791, !pc !54 + store i8 %1184, ptr %1199, align 1, !pc !54 + store i8 %1185, ptr %107, align 8, !pc !54 + %1200 = getelementptr inbounds i8, ptr %7, i64 793, !pc !54 + store i8 %1186, ptr %1200, align 1, !pc !54 + %1201 = getelementptr inbounds i8, ptr %7, i64 794, !pc !54 + store i8 %1187, ptr %1201, align 2, !pc !54 + %1202 = getelementptr inbounds i8, ptr %7, i64 795, !pc !54 + store i8 %1188, ptr %1202, align 1, !pc !54 + %1203 = getelementptr inbounds i8, ptr %7, i64 796, !pc !54 + store i8 %1189, ptr %1203, align 4, !pc !54 + %1204 = getelementptr inbounds i8, ptr %7, i64 797, !pc !54 + store i8 %1190, ptr %1204, align 1, !pc !54 + %1205 = getelementptr inbounds i8, ptr %7, i64 798, !pc !54 + store i8 %1191, ptr %1205, align 2, !pc !54 + %1206 = getelementptr inbounds i8, ptr %7, i64 799, !pc !54 + store i8 %1192, ptr %1206, align 1, !pc !54 + %1207 = load i8, ptr @__anvill_reg_XMM13, align 1, !pc !54 + %1208 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 1), align 1, !pc !54 + %1209 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 2), align 1, !pc !54 + %1210 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 3), align 1, !pc !54 + %1211 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 4), align 1, !pc !54 + %1212 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 5), align 1, !pc !54 + %1213 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 6), align 1, !pc !54 + %1214 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 7), align 1, !pc !54 + %1215 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 8), align 1, !pc !54 + %1216 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 9), align 1, !pc !54 + %1217 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 10), align 1, !pc !54 + %1218 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 11), align 1, !pc !54 + %1219 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 12), align 1, !pc !54 + %1220 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 13), align 1, !pc !54 + %1221 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 14), align 1, !pc !54 + %1222 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM13, i64 0, i64 15), align 1, !pc !54 + store i8 %1207, ptr %114, align 8, !pc !54 + %1223 = getelementptr inbounds i8, ptr %7, i64 849, !pc !54 + store i8 %1208, ptr %1223, align 1, !pc !54 + %1224 = getelementptr inbounds i8, ptr %7, i64 850, !pc !54 + store i8 %1209, ptr %1224, align 2, !pc !54 + %1225 = getelementptr inbounds i8, ptr %7, i64 851, !pc !54 + store i8 %1210, ptr %1225, align 1, !pc !54 + %1226 = getelementptr inbounds i8, ptr %7, i64 852, !pc !54 + store i8 %1211, ptr %1226, align 4, !pc !54 + %1227 = getelementptr inbounds i8, ptr %7, i64 853, !pc !54 + store i8 %1212, ptr %1227, align 1, !pc !54 + %1228 = getelementptr inbounds i8, ptr %7, i64 854, !pc !54 + store i8 %1213, ptr %1228, align 2, !pc !54 + %1229 = getelementptr inbounds i8, ptr %7, i64 855, !pc !54 + store i8 %1214, ptr %1229, align 1, !pc !54 + store i8 %1215, ptr %115, align 8, !pc !54 + %1230 = getelementptr inbounds i8, ptr %7, i64 857, !pc !54 + store i8 %1216, ptr %1230, align 1, !pc !54 + %1231 = getelementptr inbounds i8, ptr %7, i64 858, !pc !54 + store i8 %1217, ptr %1231, align 2, !pc !54 + %1232 = getelementptr inbounds i8, ptr %7, i64 859, !pc !54 + store i8 %1218, ptr %1232, align 1, !pc !54 + %1233 = getelementptr inbounds i8, ptr %7, i64 860, !pc !54 + store i8 %1219, ptr %1233, align 4, !pc !54 + %1234 = getelementptr inbounds i8, ptr %7, i64 861, !pc !54 + store i8 %1220, ptr %1234, align 1, !pc !54 + %1235 = getelementptr inbounds i8, ptr %7, i64 862, !pc !54 + store i8 %1221, ptr %1235, align 2, !pc !54 + %1236 = getelementptr inbounds i8, ptr %7, i64 863, !pc !54 + store i8 %1222, ptr %1236, align 1, !pc !54 + %1237 = load i8, ptr @__anvill_reg_XMM14, align 1, !pc !54 + %1238 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 1), align 1, !pc !54 + %1239 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 2), align 1, !pc !54 + %1240 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 3), align 1, !pc !54 + %1241 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 4), align 1, !pc !54 + %1242 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 5), align 1, !pc !54 + %1243 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 6), align 1, !pc !54 + %1244 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 7), align 1, !pc !54 + %1245 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 8), align 1, !pc !54 + %1246 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 9), align 1, !pc !54 + %1247 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 10), align 1, !pc !54 + %1248 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 11), align 1, !pc !54 + %1249 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 12), align 1, !pc !54 + %1250 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 13), align 1, !pc !54 + %1251 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 14), align 1, !pc !54 + %1252 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM14, i64 0, i64 15), align 1, !pc !54 + store i8 %1237, ptr %122, align 8, !pc !54 + %1253 = getelementptr inbounds i8, ptr %7, i64 913, !pc !54 + store i8 %1238, ptr %1253, align 1, !pc !54 + %1254 = getelementptr inbounds i8, ptr %7, i64 914, !pc !54 + store i8 %1239, ptr %1254, align 2, !pc !54 + %1255 = getelementptr inbounds i8, ptr %7, i64 915, !pc !54 + store i8 %1240, ptr %1255, align 1, !pc !54 + %1256 = getelementptr inbounds i8, ptr %7, i64 916, !pc !54 + store i8 %1241, ptr %1256, align 4, !pc !54 + %1257 = getelementptr inbounds i8, ptr %7, i64 917, !pc !54 + store i8 %1242, ptr %1257, align 1, !pc !54 + %1258 = getelementptr inbounds i8, ptr %7, i64 918, !pc !54 + store i8 %1243, ptr %1258, align 2, !pc !54 + %1259 = getelementptr inbounds i8, ptr %7, i64 919, !pc !54 + store i8 %1244, ptr %1259, align 1, !pc !54 + store i8 %1245, ptr %123, align 8, !pc !54 + %1260 = getelementptr inbounds i8, ptr %7, i64 921, !pc !54 + store i8 %1246, ptr %1260, align 1, !pc !54 + %1261 = getelementptr inbounds i8, ptr %7, i64 922, !pc !54 + store i8 %1247, ptr %1261, align 2, !pc !54 + %1262 = getelementptr inbounds i8, ptr %7, i64 923, !pc !54 + store i8 %1248, ptr %1262, align 1, !pc !54 + %1263 = getelementptr inbounds i8, ptr %7, i64 924, !pc !54 + store i8 %1249, ptr %1263, align 4, !pc !54 + %1264 = getelementptr inbounds i8, ptr %7, i64 925, !pc !54 + store i8 %1250, ptr %1264, align 1, !pc !54 + %1265 = getelementptr inbounds i8, ptr %7, i64 926, !pc !54 + store i8 %1251, ptr %1265, align 2, !pc !54 + %1266 = getelementptr inbounds i8, ptr %7, i64 927, !pc !54 + store i8 %1252, ptr %1266, align 1, !pc !54 + %1267 = load i8, ptr @__anvill_reg_XMM15, align 1, !pc !54 + %1268 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 1), align 1, !pc !54 + %1269 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 2), align 1, !pc !54 + %1270 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 3), align 1, !pc !54 + %1271 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 4), align 1, !pc !54 + %1272 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 5), align 1, !pc !54 + %1273 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 6), align 1, !pc !54 + %1274 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 7), align 1, !pc !54 + %1275 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 8), align 1, !pc !54 + %1276 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 9), align 1, !pc !54 + %1277 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 10), align 1, !pc !54 + %1278 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 11), align 1, !pc !54 + %1279 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 12), align 1, !pc !54 + %1280 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 13), align 1, !pc !54 + %1281 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 14), align 1, !pc !54 + %1282 = load i8, ptr getelementptr inbounds ([16 x i8], ptr @__anvill_reg_XMM15, i64 0, i64 15), align 1, !pc !54 + store i8 %1267, ptr %130, align 8, !pc !54 + %1283 = getelementptr inbounds i8, ptr %7, i64 977, !pc !54 + store i8 %1268, ptr %1283, align 1, !pc !54 + %1284 = getelementptr inbounds i8, ptr %7, i64 978, !pc !54 + store i8 %1269, ptr %1284, align 2, !pc !54 + %1285 = getelementptr inbounds i8, ptr %7, i64 979, !pc !54 + store i8 %1270, ptr %1285, align 1, !pc !54 + %1286 = getelementptr inbounds i8, ptr %7, i64 980, !pc !54 + store i8 %1271, ptr %1286, align 4, !pc !54 + %1287 = getelementptr inbounds i8, ptr %7, i64 981, !pc !54 + store i8 %1272, ptr %1287, align 1, !pc !54 + %1288 = getelementptr inbounds i8, ptr %7, i64 982, !pc !54 + store i8 %1273, ptr %1288, align 2, !pc !54 + %1289 = getelementptr inbounds i8, ptr %7, i64 983, !pc !54 + store i8 %1274, ptr %1289, align 1, !pc !54 + store i8 %1275, ptr %131, align 8, !pc !54 + %1290 = getelementptr inbounds i8, ptr %7, i64 985, !pc !54 + store i8 %1276, ptr %1290, align 1, !pc !54 + %1291 = getelementptr inbounds i8, ptr %7, i64 986, !pc !54 + store i8 %1277, ptr %1291, align 2, !pc !54 + %1292 = getelementptr inbounds i8, ptr %7, i64 987, !pc !54 + store i8 %1278, ptr %1292, align 1, !pc !54 + %1293 = getelementptr inbounds i8, ptr %7, i64 988, !pc !54 + store i8 %1279, ptr %1293, align 4, !pc !54 + %1294 = getelementptr inbounds i8, ptr %7, i64 989, !pc !54 + store i8 %1280, ptr %1294, align 1, !pc !54 + %1295 = getelementptr inbounds i8, ptr %7, i64 990, !pc !54 + store i8 %1281, ptr %1295, align 2, !pc !54 + %1296 = getelementptr inbounds i8, ptr %7, i64 991, !pc !54 + store i8 %1282, ptr %1296, align 1, !pc !54 + %1297 = load x86_fp80, ptr @__anvill_reg_ST0, align 16, !pc !54 + store x86_fp80 %1297, ptr %347, align 16, !pc !54 + %1298 = load x86_fp80, ptr @__anvill_reg_ST1, align 16, !pc !54 + store x86_fp80 %1298, ptr %363, align 16, !pc !54 + %1299 = load x86_fp80, ptr @__anvill_reg_ST2, align 16, !pc !54 + store x86_fp80 %1299, ptr %379, align 16, !pc !54 + %1300 = load x86_fp80, ptr @__anvill_reg_ST3, align 16, !pc !54 + store x86_fp80 %1300, ptr %395, align 16, !pc !54 + %1301 = load x86_fp80, ptr @__anvill_reg_ST4, align 16, !pc !54 + store x86_fp80 %1301, ptr %411, align 16, !pc !54 + %1302 = load x86_fp80, ptr @__anvill_reg_ST5, align 16, !pc !54 + store x86_fp80 %1302, ptr %427, align 16, !pc !54 + %1303 = load x86_fp80, ptr @__anvill_reg_ST6, align 16, !pc !54 + store x86_fp80 %1303, ptr %443, align 16, !pc !54 + %1304 = load x86_fp80, ptr @__anvill_reg_ST7, align 16, !pc !54 + store x86_fp80 %1304, ptr %459, align 16, !pc !54 + %1305 = load i64, ptr @__anvill_reg_MM0, align 8, !pc !54 + store i64 %1305, ptr %470, align 8, !pc !54 + %1306 = load i64, ptr @__anvill_reg_MM1, align 8, !pc !54 + store i64 %1306, ptr %472, align 8, !pc !54 + %1307 = load i64, ptr @__anvill_reg_MM2, align 8, !pc !54 + store i64 %1307, ptr %474, align 8, !pc !54 + %1308 = load i64, ptr @__anvill_reg_MM3, align 8, !pc !54 + store i64 %1308, ptr %476, align 8, !pc !54 + %1309 = load i64, ptr @__anvill_reg_MM4, align 8, !pc !54 + store i64 %1309, ptr %478, align 8, !pc !54 + %1310 = load i64, ptr @__anvill_reg_MM5, align 8, !pc !54 + store i64 %1310, ptr %480, align 8, !pc !54 + %1311 = load i64, ptr @__anvill_reg_MM6, align 8, !pc !54 + store i64 %1311, ptr %482, align 8, !pc !54 + %1312 = load i64, ptr @__anvill_reg_MM7, align 8, !pc !54 + store i64 %1312, ptr %484, align 8, !pc !54 + %1313 = load i8, ptr @__anvill_reg_AF, align 1, !pc !54 + store i8 %1313, ptr %271, align 1, !pc !54 + %1314 = load i8, ptr @__anvill_reg_CF, align 1, !pc !54 + store i8 %1314, ptr %267, align 1, !pc !54 + %1315 = load i8, ptr @__anvill_reg_DF, align 1, !pc !54 + store i8 %1315, ptr %277, align 1, !pc !54 + %1316 = load i8, ptr @__anvill_reg_OF, align 1, !pc !54 + store i8 %1316, ptr %279, align 1, !pc !54 + %1317 = load i8, ptr @__anvill_reg_PF, align 1, !pc !54 + store i8 %1317, ptr %269, align 1, !pc !54 + %1318 = load i8, ptr @__anvill_reg_SF, align 1, !pc !54 + store i8 %1318, ptr %275, align 1, !pc !54 + %1319 = load i8, ptr @__anvill_reg_ZF, align 1, !pc !54 + store i8 %1319, ptr %273, align 1, !pc !54 + store i64 ptrtoint (ptr addrspacecast (ptr addrspace(256) null to ptr) to i64), ptr %300, align 8, !pc !54 + store i64 ptrtoint (ptr addrspacecast (ptr addrspace(257) null to ptr) to i64), ptr %302, align 8, !pc !54 + store i64 %4, ptr %320, align 8, !pc !54 + store i64 %3, ptr %return_address_loc, align 8, !pc !54 + store i32 %0, ptr %318, align 8, !pc !54 + %1320 = ptrtoint ptr %1 to i64, !pc !54 + store i64 %1320, ptr %316, align 8, !pc !54 + call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %6), !pc !54 + store i64 ptrtoint (ptr @sub_401270__AI_SI_B_64 to i64), ptr %6, align 8, !pc !54 + store i64 ptrtoint (ptr @sub_401270__AI_SI_B_64 to i64), ptr %340, align 8, !pc !54 + %1321 = call fastcc ptr @basic_block_func4199024(ptr nonnull %7, i64 ptrtoint (ptr @sub_401270__AI_SI_B_64 to i64), ptr null, ptr nonnull %6) #7, !pc !54 + %1322 = load i64, ptr %6, align 8, !pc !54 + switch i64 %1322, label %sub_401270__AI_SI_B_64.lifted.exit [ + i64 4199049, label %inst_401289.i + i64 4199074, label %inst_4012a2.i + ], !pc !54 + +inst_401289.i: ; preds = %2 + %1323 = call fastcc ptr @basic_block_func4199049(ptr nonnull %7, i64 4199049, ptr %1321, ptr nonnull %6) #7, !pc !54 + %1324 = load i64, ptr %6, align 8, !pc !54 + %1325 = icmp eq i64 %1324, 4199922, !pc !54 + call void @llvm.assume(i1 %1325), !pc !54 + br label %inst_4015f2.i, !pc !54 + +inst_4015f2.i: ; preds = %inst_401289.i, %inst_4015ee.i + %1326 = phi ptr [ %1347, %inst_4015ee.i ], [ %1323, %inst_401289.i ], !pc !54 + %1327 = call fastcc ptr @basic_block_func4199922(ptr nonnull %7, i64 4199922, ptr %1326, ptr nonnull %6) #7, !pc !54 + unreachable, !pc !54 + +inst_401306.i: ; preds = %inst_4012a2.i + %1328 = call fastcc ptr @basic_block_func4199174(ptr nonnull %7, i64 4199174, ptr %1369, ptr nonnull %6) #7, !pc !54 + %1329 = load i64, ptr %6, align 8, !pc !54 + %1330 = icmp eq i64 %1329, 4199184, !pc !54 + call void @llvm.assume(i1 %1330), !pc !54 + br label %inst_401310.i, !pc !54 + +inst_401310.i: ; preds = %inst_401306.i, %inst_401310.i + %1331 = phi ptr [ %1328, %inst_401306.i ], [ %1332, %inst_401310.i ], !pc !54 + %1332 = call fastcc ptr @basic_block_func4199184(ptr nonnull %7, i64 4199184, ptr %1331, ptr nonnull %6) #7, !pc !54 + %1333 = load i64, ptr %6, align 8, !pc !54 + switch i64 %1333, label %sub_401270__AI_SI_B_64.lifted.exit [ + i64 4199219, label %inst_401333.i + i64 4199184, label %inst_401310.i + ], !pc !54 + +inst_4014f9.i: ; preds = %inst_4014f1.i + %1334 = call fastcc ptr @basic_block_func4199673(ptr nonnull %7, i64 4199673, ptr %1352, ptr nonnull %6) #7, !pc !54 + %1335 = load i64, ptr %6, align 8, !pc !54 + %1336 = icmp eq i64 %1335, 4199701, !pc !54 + call void @llvm.assume(i1 %1336), !pc !54 + br label %inst_401515.i, !pc !54 + +inst_401515.i: ; preds = %inst_4014f9.i, %inst_401508.i, %inst_4014f1.i, %inst_4013e0.i + %1337 = phi ptr [ %1344, %inst_4013e0.i ], [ %1354, %inst_401508.i ], [ %1352, %inst_4014f1.i ], [ %1334, %inst_4014f9.i ], !pc !54 + %1338 = call fastcc ptr @basic_block_func4199701(ptr nonnull %7, i64 4199701, ptr %1337, ptr nonnull %6) #7, !pc !54 + %1339 = load i64, ptr %6, align 8, !pc !54 + switch i64 %1339, label %sub_401270__AI_SI_B_64.lifted.exit [ + i64 4199888, label %inst_4015d0.i + i64 4199392, label %inst_4013e0.i + ], !pc !54 + +inst_4015d0.i: ; preds = %inst_401515.i + %1340 = call fastcc ptr @basic_block_func4199888(ptr nonnull %7, i64 4199888, ptr %1338, ptr nonnull %6) #7, !pc !54 + %1341 = load i64, ptr %6, align 8, !pc !54 + %1342 = icmp eq i64 %1341, 4199918, !pc !54 + call void @llvm.assume(i1 %1342), !pc !54 + br label %inst_4015ee.i, !pc !54 + +inst_4013e0.i: ; preds = %inst_401381.i, %inst_4015d2.i, %inst_401515.i + %1343 = phi ptr [ %1357, %inst_401381.i ], [ %1338, %inst_401515.i ], [ %1371, %inst_4015d2.i ], !pc !54 + %1344 = call fastcc ptr @basic_block_func4199392(ptr nonnull %7, i64 4199392, ptr %1343, ptr nonnull %6) #7, !pc !54 + %1345 = load i64, ptr %6, align 8, !pc !54 + switch i64 %1345, label %sub_401270__AI_SI_B_64.lifted.exit [ + i64 4199470, label %inst_40142e.i + i64 4199701, label %inst_401515.i + ], !pc !54 + +inst_4015ee.i: ; preds = %inst_4015d0.i, %inst_4015d2.i + %1346 = phi ptr [ %1340, %inst_4015d0.i ], [ %1371, %inst_4015d2.i ], !pc !54 + %1347 = call fastcc ptr @basic_block_func4199918(ptr nonnull %7, i64 4199918, ptr %1346, ptr nonnull %6) #7, !pc !54 + %1348 = load i64, ptr %6, align 8, !pc !54 + %1349 = icmp eq i64 %1348, 4199922, !pc !54 + call void @llvm.assume(i1 %1349), !pc !54 + br label %inst_4015f2.i, !pc !54 + +inst_401449.i: ; preds = %inst_40142e.i + %1350 = call fastcc ptr @basic_block_func4199497(ptr nonnull %7, i64 4199497, ptr %1367, ptr nonnull %6) #7, !pc !54 + %1351 = load i64, ptr %6, align 8, !pc !54 + switch i64 %1351, label %sub_401270__AI_SI_B_64.lifted.exit [ + i64 4199665, label %inst_4014f1.i + i64 4199688, label %inst_401508.i + ], !pc !54 + +inst_4014f1.i: ; preds = %inst_401449.i + %1352 = call fastcc ptr @basic_block_func4199665(ptr nonnull %7, i64 4199665, ptr %1350, ptr nonnull %6) #7, !pc !54 + %1353 = load i64, ptr %6, align 8, !pc !54 + switch i64 %1353, label %sub_401270__AI_SI_B_64.lifted.exit [ + i64 4199673, label %inst_4014f9.i + i64 4199701, label %inst_401515.i + ], !pc !54 + +inst_401508.i: ; preds = %inst_401449.i + %1354 = call fastcc ptr @basic_block_func4199688(ptr nonnull %7, i64 4199688, ptr %1350, ptr nonnull %6) #7, !pc !54 + %1355 = load i64, ptr %6, align 8, !pc !54 + %1356 = icmp eq i64 %1355, 4199701, !pc !54 + call void @llvm.assume(i1 %1356), !pc !54 + br label %inst_401515.i, !pc !54 + +inst_401381.i: ; preds = %inst_401350.i + %1357 = call fastcc ptr @basic_block_func4199297(ptr nonnull %7, i64 4199297, ptr %1365, ptr nonnull %6) #7, !pc !54 + %1358 = load i64, ptr %6, align 8, !pc !54 + %1359 = icmp eq i64 %1358, 4199392, !pc !54 + call void @llvm.assume(i1 %1359), !pc !54 + br label %inst_4013e0.i, !pc !54 + +inst_401333.i: ; preds = %inst_4012a2.i, %inst_401310.i + %1360 = phi ptr [ %1369, %inst_4012a2.i ], [ %1332, %inst_401310.i ], !pc !54 + %1361 = call fastcc ptr @basic_block_func4199219(ptr nonnull %7, i64 4199219, ptr %1360, ptr nonnull %6) #7, !pc !54 + %1362 = load i64, ptr %6, align 8, !pc !54 + %1363 = icmp eq i64 %1362, 4199248, !pc !54 + call void @llvm.assume(i1 %1363), !pc !54 + br label %inst_401350.i, !pc !54 + +inst_401350.i: ; preds = %inst_401333.i, %inst_401350.i + %1364 = phi ptr [ %1361, %inst_401333.i ], [ %1365, %inst_401350.i ], !pc !54 + %1365 = call fastcc ptr @basic_block_func4199248(ptr nonnull %7, i64 4199248, ptr %1364, ptr nonnull %6) #7, !pc !54 + %1366 = load i64, ptr %6, align 8, !pc !54 + switch i64 %1366, label %sub_401270__AI_SI_B_64.lifted.exit [ + i64 4199297, label %inst_401381.i + i64 4199248, label %inst_401350.i + ], !pc !54 + +inst_40142e.i: ; preds = %inst_4013e0.i + %1367 = call fastcc ptr @basic_block_func4199470(ptr nonnull %7, i64 4199470, ptr %1344, ptr nonnull %6) #7, !pc !54 + %1368 = load i64, ptr %6, align 8, !pc !54 + switch i64 %1368, label %sub_401270__AI_SI_B_64.lifted.exit [ + i64 4199497, label %inst_401449.i + i64 4199890, label %inst_4015d2.i + ], !pc !54 + +inst_4012a2.i: ; preds = %2 + %1369 = call fastcc ptr @basic_block_func4199074(ptr nonnull %7, i64 4199074, ptr %1321, ptr nonnull %6) #7, !pc !54 + %1370 = load i64, ptr %6, align 8, !pc !54 + switch i64 %1370, label %sub_401270__AI_SI_B_64.lifted.exit [ + i64 4199174, label %inst_401306.i + i64 4199219, label %inst_401333.i + ], !pc !54 + +inst_4015d2.i: ; preds = %inst_40142e.i + %1371 = call fastcc ptr @basic_block_func4199890(ptr nonnull %7, i64 4199890, ptr %1367, ptr nonnull %6) #7, !pc !54 + %1372 = load i64, ptr %6, align 8, !pc !54 + switch i64 %1372, label %sub_401270__AI_SI_B_64.lifted.exit [ + i64 4199918, label %inst_4015ee.i + i64 4199392, label %inst_4013e0.i + ], !pc !54 + +sub_401270__AI_SI_B_64.lifted.exit: ; preds = %inst_4015d2.i, %inst_4012a2.i, %inst_40142e.i, %inst_401350.i, %inst_4014f1.i, %inst_401449.i, %inst_4013e0.i, %inst_401515.i, %inst_401310.i, %2 + unreachable, !pc !54 +} + +; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4 + +; Function Attrs: inaccessiblememonly nocallback nofree nosync nounwind willreturn +declare void @llvm.assume(i1 noundef) #5 + +; Function Attrs: nocallback nofree nosync nounwind readnone willreturn +declare ptr @llvm.returnaddress(i32 immarg) #6 + +attributes #0 = { noinline } +attributes #1 = { nocallback nofree nosync nounwind readnone speculatable willreturn } +attributes #2 = { mustprogress noduplicate nofree noinline nosync nounwind optnone readnone willreturn "frame-pointer"="all" "no-builtins" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "tune-cpu"="generic" } +attributes #3 = { noduplicate noinline nounwind optnone "frame-pointer"="all" "no-builtins" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "tune-cpu"="generic" } +attributes #4 = { argmemonly nocallback nofree nosync nounwind willreturn } +attributes #5 = { inaccessiblememonly nocallback nofree nosync nounwind willreturn } +attributes #6 = { nocallback nofree nosync nounwind readnone willreturn } +attributes #7 = { nounwind } + +!0 = !{[4 x i8] c"EAX\00"} +!1 = !{[4 x i8] c"RDI\00"} +!2 = !{[4 x i8] c"RBX\00"} +!3 = !{[4 x i8] c"RSI\00"} +!4 = !{[3 x i8] c"PC\00"} +!5 = !{!6, !6, i64 0} +!6 = !{!"long long", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C++ TBAA"} +!9 = !{!10, !7, i64 2065} +!10 = !{!"_ZTS8X86State", !11, i64 0, !7, i64 16, !14, i64 2064, !7, i64 2080, !15, i64 2088, !17, i64 2112, !19, i64 2208, !20, i64 2480, !21, i64 2608, !22, i64 2736, !7, i64 2760, !7, i64 2768, !23, i64 3280, !25, i64 3376} +!11 = !{!"_ZTS9ArchState", !12, i64 0, !13, i64 4, !7, i64 8} +!12 = !{!"_ZTSN14AsyncHyperCall4NameE", !7, i64 0} +!13 = !{!"int", !7, i64 0} +!14 = !{!"_ZTS10ArithFlags", !7, i64 0, !7, i64 1, !7, i64 2, !7, i64 3, !7, i64 4, !7, i64 5, !7, i64 6, !7, i64 7, !7, i64 8, !7, i64 9, !7, i64 10, !7, i64 11, !7, i64 12, !7, i64 13, !7, i64 14, !7, i64 15} +!15 = !{!"_ZTS8Segments", !16, i64 0, !7, i64 2, !16, i64 4, !7, i64 6, !16, i64 8, !7, i64 10, !16, i64 12, !7, i64 14, !16, i64 16, !7, i64 18, !16, i64 20, !7, i64 22} +!16 = !{!"short", !7, i64 0} +!17 = !{!"_ZTS12AddressSpace", !6, i64 0, !18, i64 8, !6, i64 16, !18, i64 24, !6, i64 32, !18, i64 40, !6, i64 48, !18, i64 56, !6, i64 64, !18, i64 72, !6, i64 80, !18, i64 88} +!18 = !{!"_ZTS3Reg", !7, i64 0} +!19 = !{!"_ZTS3GPR", !6, i64 0, !18, i64 8, !6, i64 16, !18, i64 24, !6, i64 32, !18, i64 40, !6, i64 48, !18, i64 56, !6, i64 64, !18, i64 72, !6, i64 80, !18, i64 88, !6, i64 96, !18, i64 104, !6, i64 112, !18, i64 120, !6, i64 128, !18, i64 136, !6, i64 144, !18, i64 152, !6, i64 160, !18, i64 168, !6, i64 176, !18, i64 184, !6, i64 192, !18, i64 200, !6, i64 208, !18, i64 216, !6, i64 224, !18, i64 232, !6, i64 240, !18, i64 248, !6, i64 256, !18, i64 264} +!20 = !{!"_ZTS8X87Stack", !7, i64 0} +!21 = !{!"_ZTS3MMX", !7, i64 0} +!22 = !{!"_ZTS14FPUStatusFlags", !7, i64 0, !7, i64 1, !7, i64 2, !7, i64 3, !7, i64 4, !7, i64 5, !7, i64 6, !7, i64 7, !7, i64 8, !7, i64 9, !7, i64 10, !7, i64 11, !7, i64 12, !7, i64 13, !7, i64 14, !7, i64 15, !7, i64 16, !7, i64 17, !7, i64 18, !7, i64 19, !7, i64 20} +!23 = !{!"_ZTS13SegmentCaches", !24, i64 0, !24, i64 16, !24, i64 32, !24, i64 48, !24, i64 64, !24, i64 80} +!24 = !{!"_ZTS13SegmentShadow", !7, i64 0, !13, i64 8, !13, i64 12} +!25 = !{!"_ZTS5K_REG", !7, i64 0} +!26 = !{i8 0, i8 9} +!27 = !{!10, !7, i64 2067} +!28 = !{!10, !7, i64 2071} +!29 = !{!10, !7, i64 2073} +!30 = !{!10, !7, i64 2077} +!31 = !{!10, !7, i64 2069} +!32 = !{!7, !7, i64 0} +!33 = !{[4 x i8] c"RBP\00"} +!34 = !{[4 x i8] c"RSP\00"} +!35 = !{[4 x i8] c"R14\00"} +!36 = !{[4 x i8] c"R15\00"} +!37 = !{[4 x i8] c"R13\00"} +!38 = !{[4 x i8] c"R12\00"} +!39 = !{[3 x i8] c"AL\00"} +!40 = !{[3 x i8] c"R8\00"} +!41 = !{[4 x i8] c"RCX\00"} +!42 = !{[4 x i8] c"RDX\00"} +!43 = !{[4 x i8] c"EBP\00"} +!44 = !{[4 x i8] c"RAX\00"} +!45 = !{[4 x i8] c"R11\00"} +!46 = !{[4 x i8] c"R10\00"} +!47 = !{[3 x i8] c"R9\00"} +!48 = !{[5 x i8] c"R12D\00"} +!49 = !{[4 x i8] c"ECX\00"} +!50 = !{[4 x i8] c"EDX\00"} +!51 = !{[4 x i8] c"ESI\00"} +!52 = !{!10, !7, i64 2075} +!53 = !{[4 x i8] c"EDI\00"} +!54 = !{i64 4199024} +!55 = !{i64 0} diff --git a/tests/anvill_passes/data/VectorRewriteSmall.ll b/tests/anvill_passes/data/VectorRewriteSmall.ll new file mode 100644 index 000000000..5f15cdcb2 --- /dev/null +++ b/tests/anvill_passes/data/VectorRewriteSmall.ll @@ -0,0 +1,8 @@ +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx-macho" + +define <2 x float> @f(<4 x i8> %v1 , <4 x i8> %v2) { + %.sroa.23.28.vecblend = shufflevector <4 x i8> %v1, <4 x i8> %v2, <8 x i32> + %casted = bitcast <8 x i8> %.sroa.23.28.vecblend to <2 x float> + ret <2 x float> %casted +} \ No newline at end of file diff --git a/tests/anvill_passes/data/VectorToRewrite.ll b/tests/anvill_passes/data/VectorToRewrite.ll new file mode 100644 index 000000000..6da1d9798 --- /dev/null +++ b/tests/anvill_passes/data/VectorToRewrite.ll @@ -0,0 +1,10 @@ +target datalayout = "E-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx-macho" + +define <2 x float> @f(<5 x i8> %v1 , <4 x i8> %v2) { + %.sroa.23.24.vec.expand = shufflevector <5 x i8> %v1, <5 x i8> poison, <8 x i32> + %.sroa.23.28.vec.expand = shufflevector <4 x i8> %v2, <4 x i8> poison, <8 x i32> + %.sroa.23.28.vecblend = shufflevector <8 x i8> %.sroa.23.24.vec.expand, <8 x i8> %.sroa.23.28.vec.expand, <8 x i32> + %casted = bitcast <8 x i8> %.sroa.23.28.vecblend to <2 x float> + ret <2 x float> %casted +} \ No newline at end of file diff --git a/tests/anvill_passes/src/BranchRecoveryPass.cpp b/tests/anvill_passes/src/BranchRecoveryPass.cpp index 5f9c31751..6da3ccc40 100644 --- a/tests/anvill_passes/src/BranchRecoveryPass.cpp +++ b/tests/anvill_passes/src/BranchRecoveryPass.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -10,7 +11,6 @@ #include -#include #include "Utils.h" namespace anvill { @@ -28,8 +28,8 @@ static llvm::Function *FindFunction(llvm::Module *module, std::string name) { TEST_SUITE("BranchRecoveryPass") { TEST_CASE("Run analysis on aarch64") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "BranchRecoveryAarch64.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "BranchRecoveryAarch64.ll"); auto target_function = FindFunction(mod.get(), "slice"); CHECK(target_function != nullptr); llvm::FunctionPassManager fpm; @@ -109,8 +109,8 @@ TEST_SUITE("BranchRecoveryPass") { } TEST_CASE("Run analysis sliced function sub") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "RecoverSubBranch.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "RecoverSubBranch.ll"); auto target_function = FindFunction(mod.get(), "slice"); CHECK(target_function != nullptr); llvm::FunctionPassManager fpm; @@ -191,8 +191,8 @@ TEST_SUITE("BranchRecoveryPass") { TEST_CASE("Run on sliced function sub") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "UnrecoverableBranch.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "UnrecoverableBranch.ll"); auto target_function = FindFunction(mod.get(), "slice"); CHECK(target_function != nullptr); llvm::FunctionPassManager fpm; diff --git a/tests/anvill_passes/src/BrightenPointers.cpp b/tests/anvill_passes/src/BrightenPointers.cpp index 185ab0949..4378c3e65 100644 --- a/tests/anvill_passes/src/BrightenPointers.cpp +++ b/tests/anvill_passes/src/BrightenPointers.cpp @@ -8,8 +8,6 @@ #include #include -#include -#include #include #include #include @@ -94,32 +92,32 @@ TEST_SUITE("BrightenPointers") { TEST_CASE("Run the whole pass on a well-formed function") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "gep_add.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "gep_add.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("multiple_bitcast") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "multiple_bitcast.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "multiple_bitcast.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("don't crash on loops") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "loop_test.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "loop_test.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("challenge 1") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "rx_message.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "rx_message.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); @@ -130,8 +128,8 @@ TEST_SUITE("BrightenPointers") { TEST_CASE("challenge 2") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "chall2.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "chall2.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); @@ -141,121 +139,121 @@ TEST_SUITE("BrightenPointers") { } TEST_CASE("ret0") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "ret0.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "ret0.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("jmp0") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "jmp0.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "jmp0.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_array_swap") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_array_swap_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_array_swap_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_binja_var_none_type") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_binja_var_none_type_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_binja_var_none_type_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); mod->print(llvm::errs(), nullptr); CHECK(checkMod(*mod)); } TEST_CASE("test_bitops") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_bitops_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_bitops_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_binops") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_binops_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_binops_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_cast") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_cast_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_cast_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_init_list_rt.ll") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_init_list_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_init_list_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_inttoptr_rt.ll") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_inttoptr_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_inttoptr_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_nullptr_rt.ll") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_nullptr_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_nullptr_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_ret0_rt.ll") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_ret0_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_ret0_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_struct_rt.ll") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_struct_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_struct_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_struct_swap_rt.ll") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_struct_swap_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_struct_swap_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_trunc_rt.ll") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_trunc_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_trunc_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_zeroinit.ll") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_zeroinit_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_zeroinit_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_zext_rt.ll") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_zext_rt.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_zext_rt.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); } TEST_CASE("test_rx.ll") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "test_rx.ll"); + llvm::LLVMContext context; + auto mod = LoadTestData(context, "test_rx.ll"); REQUIRE(mod != nullptr); CHECK(RunFunctionPass(*mod)); CHECK(checkMod(*mod)); diff --git a/tests/anvill_passes/src/InstructionFolderPass.cpp b/tests/anvill_passes/src/InstructionFolderPass.cpp index d25610c2e..0d7b6f5e7 100644 --- a/tests/anvill_passes/src/InstructionFolderPass.cpp +++ b/tests/anvill_passes/src/InstructionFolderPass.cpp @@ -7,7 +7,6 @@ */ #include - #include #include #include @@ -22,12 +21,12 @@ namespace anvill { TEST_SUITE("InstructionFolderPass") { TEST_CASE("Run the whole pass on a well-formed function") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto module = LoadTestData(*context, "InstructionFolderPass.ll"); + llvm::LLVMContext context; + auto module = LoadTestData(context, "InstructionFolderPass.ll"); REQUIRE(module != nullptr); - auto arch = remill::Arch::Build(context.get(), remill::GetOSName("linux"), + auto arch = remill::Arch::Build(&context, remill::GetOSName("linux"), remill::GetArchName("amd64")); REQUIRE(arch != nullptr); diff --git a/tests/anvill_passes/src/RecoverEntityUses.cpp b/tests/anvill_passes/src/RecoverEntityUses.cpp index e770ff7c0..3980fc167 100644 --- a/tests/anvill_passes/src/RecoverEntityUses.cpp +++ b/tests/anvill_passes/src/RecoverEntityUses.cpp @@ -1,58 +1,56 @@ +#include #include +#include +#include #include #include #include #include #include #include -#include -#include + #include -#include -#include #include "Utils.h" namespace anvill { - TEST_SUITE("RecoverEntityUses") { - TEST_CASE("Regression test for unresolved anvill_pc") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); - auto module = LoadTestData(*llvm_context, "TestingUnresolvedEntity.ll"); +TEST_SUITE("RecoverEntityUses") { + TEST_CASE("Regression test for unresolved anvill_pc") { + llvm::LLVMContext llvm_context; + auto module = LoadTestData(llvm_context, "TestingUnresolvedEntity.ll"); - auto arch = - remill::Arch::Build(llvm_context.get(), remill::GetOSName("linux"), - remill::GetArchName("amd64")); - REQUIRE(arch != nullptr); + auto arch = remill::Arch::Build(&llvm_context, remill::GetOSName("linux"), + remill::GetArchName("x86")); + REQUIRE(arch != nullptr); - auto ctrl_flow_provider = anvill::NullControlFlowProvider(); - TypeDictionary tyDict(*llvm_context); + auto ctrl_flow_provider = anvill::NullControlFlowProvider(); + TypeDictionary tyDict(llvm_context); - NullTypeProvider ty_prov(tyDict); - NullMemoryProvider mem_prov; - anvill::LifterOptions lift_options(arch.get(), *module, ty_prov, - std::move(ctrl_flow_provider), - mem_prov); + NullTypeProvider ty_prov(tyDict); + NullMemoryProvider mem_prov; + anvill::LifterOptions lift_options(arch.get(), *module, ty_prov, + std::move(ctrl_flow_provider), mem_prov); - anvill::LifterOptions options(arch.get(), *module, ty_prov, - std::move(ctrl_flow_provider), mem_prov); + anvill::LifterOptions options(arch.get(), *module, ty_prov, + std::move(ctrl_flow_provider), mem_prov); - // memory and types will not get used and create lifter with null - anvill::EntityLifter lifter(options); + // memory and types will not get used and create lifter with null + anvill::EntityLifter lifter(options); - EntityCrossReferenceResolver xref(lifter); + EntityCrossReferenceResolver xref(lifter); - ConvertAddressesToEntityUses conv(xref); + ConvertAddressesToEntityUses conv(xref); - auto func = module->getFunction("sub_12b30__A_SBI_B_0.6"); + auto func = module->getFunction("sub_12b30__A_SBI_B_0.6"); - REQUIRE(func != nullptr); + REQUIRE(func != nullptr); - llvm::FunctionAnalysisManager fam; + llvm::FunctionAnalysisManager fam; - conv.run(*func, fam); - func->dump(); - } - } + conv.run(*func, fam); + func->dump(); + } } +} // namespace anvill diff --git a/tests/anvill_passes/src/RecoverStackFrameInformation.cpp b/tests/anvill_passes/src/RecoverStackFrameInformation.cpp deleted file mode 100644 index 0a7bd53da..000000000 --- a/tests/anvill_passes/src/RecoverStackFrameInformation.cpp +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Copyright (c) 2019-present, Trail of Bits, Inc. - * All rights reserved. - * - * This source code is licensed in accordance with the terms specified in - * the LICENSE file found in the root directory of this source tree. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "RecoverBasicStackFrame.cpp" -#include "Utils.h" - -namespace anvill { - -TEST_SUITE("RecoverBasicStackFrame") { - TEST_CASE("Run the whole pass on a well-formed function") { - static const StackFrameStructureInitializationProcedure - kInitStackSettings[] = { - StackFrameStructureInitializationProcedure::kNone, - StackFrameStructureInitializationProcedure::kZeroes, - StackFrameStructureInitializationProcedure::kUndef, - StackFrameStructureInitializationProcedure::kSymbolic}; - - static const std::size_t kTestPaddingSettings[] = {0, 32, 64}; - - for (const auto &platform : GetSupportedPlatforms()) { - for (auto init_strategy : kInitStackSettings) { - for (auto padding_bytes : kTestPaddingSettings) { - auto context = anvill::CreateContextWithOpaquePointers(); - auto module = - LoadTestData(*context, "RecoverStackFrameInformation.ll"); - - REQUIRE(module != nullptr); - - auto arch = - remill::Arch::Build(context.get(), remill::GetOSName(platform.os), - remill::GetArchName(platform.arch)); - - REQUIRE(arch != nullptr); - - auto ctrl_flow_provider = - anvill::NullControlFlowProvider(); - - TypeDictionary tyDict(*context); - - NullTypeProvider ty_prov(tyDict); - NullMemoryProvider mem_prov; - anvill::LifterOptions lift_options( - arch.get(), *module,ty_prov,std::move(ctrl_flow_provider),mem_prov); - - lift_options.stack_frame_recovery_options.stack_frame_struct_init_procedure = init_strategy; - lift_options.stack_frame_recovery_options.stack_frame_lower_padding = - lift_options.stack_frame_recovery_options.stack_frame_higher_padding = padding_bytes / 2U; - - CHECK(RunFunctionPass( - module.get(), RecoverBasicStackFrame(lift_options.stack_frame_recovery_options))); - - } - } - } - } - - - SCENARIO("Function analysis can recreate a simple, byte-array frame type") { - - GIVEN("a lifted function without stack information") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto module = LoadTestData(*context, "RecoverStackFrameInformation.ll"); - REQUIRE(module != nullptr); - - - auto arch = remill::Arch::Build(context.get(), remill::GetOSName("linux"), - remill::GetArchName("amd64")); - REQUIRE(arch != nullptr); - - auto ctrl_flow_provider = - anvill::NullControlFlowProvider(); - - TypeDictionary tyDict(*context); - - NullTypeProvider ty_prov(tyDict); - NullMemoryProvider mem_prov; - anvill::LifterOptions lift_options( - arch.get(), *module,ty_prov,std::move(ctrl_flow_provider),mem_prov); - - - auto &function_list = module->getFunctionList(); - auto function_it = - std::find_if(function_list.begin(), function_list.end(), - - [](const llvm::Function &function) -> bool { - return !function.empty(); - }); - - REQUIRE(function_it != function_list.end()); - auto &function = *function_it; - WHEN("enumerating stack pointer usages") { - auto stack_ptr_usages = - EnumerateStackPointerUsages(function); - - THEN( - "all the uses for the instruction operands referencing the __anvill_sp symbol are returned") { - - // From the test data, you can see we have 12 instructions referencing - // the `__anvill_sp` symbol. Two of these, are `store` instructions - // that have the symbol on both operands. - CHECK(stack_ptr_usages.size() == 14U); - } - } - - WHEN("analyzing the stack frame") { - auto stack_frame_analysis = - AnalyzeStackFrame(function, lift_options.stack_frame_recovery_options); - - THEN("lowest and highest relative offsets are returned") { - - // From the test data, you can see we have 12 instructions referencing - // the `__anvill_sp` symbol - // - // The boundaries we should find are: - // __anvill_sp - 16 - 12 = -28 - // __anvill_sp + 12 = 12 - // - // The high boundary however is not 12, because we are writing a 32-bit - // integer; we have to add sizeof(i32) to it, so it becomes 16 - // - // low = -28 - // high = 16 - // size = 44 - - CHECK(stack_frame_analysis.lowest_offset == -28); - CHECK(stack_frame_analysis.highest_offset == 16); - CHECK(stack_frame_analysis.size == 44U); - - // Usages of the `__anvill_sp` symbol is 14, because two of the 12 - // instructions we have are referencing the stack from both - // operands - CHECK(stack_frame_analysis.instruction_uses.size() == 14U); - } - } - - WHEN("creating a new stack frame with no padding bytes") { - auto stack_frame_analysis = AnalyzeStackFrame( - function, lift_options.stack_frame_recovery_options); - auto stack_frame_word_type = lift_options.arch->AddressType(); - auto stack_frame_type = GenerateStackFrameType( - function, lift_options.stack_frame_recovery_options, - stack_frame_analysis, 0, stack_frame_word_type); - - THEN("a StructType containing a word array is returned") { - REQUIRE(stack_frame_type->getNumElements() == 1U); - - auto function_name = function.getName().str(); - auto expected_frame_type_name = - function_name + kStackFrameTypeNameSuffix; - REQUIRE(stack_frame_type->getName().str() == - expected_frame_type_name); - - auto first_elem_type = stack_frame_type->getElementType(0U); - REQUIRE(first_elem_type->isArrayTy()); - - auto byte_array_type = - llvm::dyn_cast(first_elem_type); - REQUIRE(byte_array_type != nullptr); - - CHECK(stack_frame_analysis.size == 44u); - - auto word_array_size = byte_array_type->getNumElements(); - std::cout << word_array_size << std::endl; - // type is always address size - CHECK(word_array_size == (48u / (lift_options.arch->address_size / 8))); - - auto module = function.getParent(); - auto data_layout = module->getDataLayout(); - auto frame_type_size = data_layout.getTypeAllocSize(stack_frame_type); - CHECK(frame_type_size == 48U); - } - } - - WHEN("creating a new stack frame with additional padding bytes") { - auto stack_frame_analysis = AnalyzeStackFrame( - function, lift_options.stack_frame_recovery_options); - auto stack_frame_word_type = lift_options.arch->AddressType(); - auto stack_frame_type = GenerateStackFrameType( - function, lift_options.stack_frame_recovery_options, - stack_frame_analysis, 128U, stack_frame_word_type); - - THEN( - "a StructType containing a word array along with the padding is returned") { - REQUIRE(stack_frame_type->getNumElements() == 1U); - - auto function_name = function.getName().str(); - auto expected_frame_type_name = - function_name + kStackFrameTypeNameSuffix; - REQUIRE(stack_frame_type->getName().str() == - expected_frame_type_name); - - auto first_elem_type = stack_frame_type->getElementType(0U); - REQUIRE(first_elem_type->isArrayTy()); - - auto byte_array_type = - llvm::dyn_cast(first_elem_type); - REQUIRE(byte_array_type != nullptr); - - CHECK(stack_frame_analysis.size == 44u); - - auto word_array_size = byte_array_type->getNumElements(); - CHECK(word_array_size == (176u / (lift_options.arch->address_size / 8))); - - auto module = function.getParent(); - auto data_layout = module->getDataLayout(); - auto frame_type_size = data_layout.getTypeAllocSize(stack_frame_type); - CHECK(frame_type_size == 176U); - } - } - } - } - - SCENARIO("Applying stack frame recovery") { - GIVEN("a well formed function") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto module = LoadTestData(*context, "RecoverStackFrameInformation.ll"); - REQUIRE(module != nullptr); - - auto &function_list = module->getFunctionList(); - auto function_it = - std::find_if(function_list.begin(), function_list.end(), - - [](const llvm::Function &function) -> bool { - return !function.empty(); - }); - - REQUIRE(function_it != function_list.end()); - auto &function = *function_it; - - - auto arch = remill::Arch::Build(context.get(), remill::GetOSName("linux"), - remill::GetArchName("amd64")); - REQUIRE(arch != nullptr); - - auto ctrl_flow_provider = - anvill::NullControlFlowProvider(); - - TypeDictionary tyDict(*context); - - NullTypeProvider ty_prov(tyDict); - NullMemoryProvider mem_prov; - anvill::LifterOptions lift_options( - arch.get(), *module,ty_prov,std::move(ctrl_flow_provider),mem_prov); - - WHEN("recovering the stack frame") { - auto stack_frame_analysis = AnalyzeStackFrame(function, lift_options.stack_frame_recovery_options); - auto arch = remill::Arch::Build(context.get(), remill::kOSLinux, - remill::kArchAMD64); - - lift_options.stack_frame_recovery_options.stack_frame_struct_init_procedure = - StackFrameStructureInitializationProcedure::kZeroes; - UpdateFunction( - function, lift_options.stack_frame_recovery_options, stack_frame_analysis); - - THEN("the function is updated to use the new stack frame structure") { - auto &entry_block = function.getEntryBlock(); - - // Find the `alloca` instruction that should appear - // as the first instruction in the entry block - llvm::AllocaInst *alloca_inst{nullptr}; - - { - auto first_instr_it = entry_block.begin(); - REQUIRE(first_instr_it != entry_block.end()); - - auto first_instr = &(*first_instr_it); - - alloca_inst = llvm::dyn_cast(first_instr); - } - - CHECK(alloca_inst != nullptr); - - // We have 12 instructions referencing the `__anvill_sp` symbol; however, two - // of those are `store` operations that have 2 references each. - // - // We should then have 14 GEP instructions - std::size_t frame_gep_count{0U}; - for (const auto &instr : entry_block) { - auto gep_instr = llvm::dyn_cast(&instr); - if (gep_instr == nullptr) { - continue; - } - - ++frame_gep_count; - } - - CHECK(frame_gep_count == 14U); - - // If we run a second stack analysis, we should no longer find any - // stack frame operation to recover - stack_frame_analysis = AnalyzeStackFrame(function, lift_options.stack_frame_recovery_options); - - CHECK(stack_frame_analysis.instruction_uses.empty()); - } - } - } - } -} - -} // namespace anvill diff --git a/tests/anvill_passes/src/RemoveStackPointerCExprs.cpp b/tests/anvill_passes/src/RemoveStackPointerCExprs.cpp index 60ed2f350..1fd958285 100644 --- a/tests/anvill_passes/src/RemoveStackPointerCExprs.cpp +++ b/tests/anvill_passes/src/RemoveStackPointerCExprs.cpp @@ -1,5 +1,5 @@ +#include #include - #include #include #include @@ -10,7 +10,7 @@ #include #include #include -#include + #include #include "Utils.h" @@ -27,8 +27,8 @@ static llvm::Function *FindFunction(llvm::Module *module, std::string name) { TEST_SUITE("RemoveStackPointerCExprs") { TEST_CASE("RegressionRecoverStack.ll") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*llvm_context, "RegressionRecoverStack.ll"); + llvm::LLVMContext llvm_context; + auto mod = LoadTestData(llvm_context, "RegressionRecoverStack.ll"); auto target_function = FindFunction(mod.get(), "slice"); CHECK(target_function != nullptr); llvm::FunctionPassManager fpm; diff --git a/tests/anvill_passes/src/SinkSelectionsIntoBranchTargets.cpp b/tests/anvill_passes/src/SinkSelectionsIntoBranchTargets.cpp index 6352dd5f6..b027a8a87 100644 --- a/tests/anvill_passes/src/SinkSelectionsIntoBranchTargets.cpp +++ b/tests/anvill_passes/src/SinkSelectionsIntoBranchTargets.cpp @@ -7,32 +7,32 @@ */ #include -#include #include #include +#include #include -#include "Utils.h" + #include +#include "Utils.h" + namespace anvill { TEST_SUITE("SinkSelectionsIntoBranchTargets") { TEST_CASE("Run the whole pass on a well-formed function") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); + llvm::LLVMContext llvm_context; auto module = - LoadTestData(*llvm_context, "SinkSelectionsIntoBranchTargets.ll"); + LoadTestData(llvm_context, "SinkSelectionsIntoBranchTargets.ll"); REQUIRE(module.get() != nullptr); - CHECK(RunFunctionPass( - module.get(), SinkSelectionsIntoBranchTargets())); - + CHECK(RunFunctionPass(module.get(), SinkSelectionsIntoBranchTargets())); } TEST_CASE("SimpleCase") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); + llvm::LLVMContext llvm_context; auto module = - LoadTestData(*llvm_context, "SinkSelectionsIntoBranchTargets.ll"); + LoadTestData(llvm_context, "SinkSelectionsIntoBranchTargets.ll"); REQUIRE(module.get() != nullptr); @@ -44,16 +44,17 @@ TEST_SUITE("SinkSelectionsIntoBranchTargets") { auto dt_res = dt.run(*function, fam); - auto analysis = SinkSelectionsIntoBranchTargets::AnalyzeFunction(dt_res, *function); + auto analysis = + SinkSelectionsIntoBranchTargets::AnalyzeFunction(dt_res, *function); CHECK(analysis.replacement_list.size() == 2U); CHECK(analysis.disposable_instruction_list.size() == 1U); } TEST_CASE("MultipleSelects") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); + llvm::LLVMContext llvm_context; auto module = - LoadTestData(*llvm_context, "SinkSelectionsIntoBranchTargets.ll"); + LoadTestData(llvm_context, "SinkSelectionsIntoBranchTargets.ll"); REQUIRE(module.get() != nullptr); @@ -65,16 +66,17 @@ TEST_SUITE("SinkSelectionsIntoBranchTargets") { auto dt_res = dt.run(*function, fam); - auto analysis = SinkSelectionsIntoBranchTargets::AnalyzeFunction(dt_res, *function); + auto analysis = + SinkSelectionsIntoBranchTargets::AnalyzeFunction(dt_res, *function); CHECK(analysis.replacement_list.size() == 6U); CHECK(analysis.disposable_instruction_list.size() == 3U); } TEST_CASE("MultipleSelectUsages") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); + llvm::LLVMContext llvm_context; auto module = - LoadTestData(*llvm_context, "SinkSelectionsIntoBranchTargets.ll"); + LoadTestData(llvm_context, "SinkSelectionsIntoBranchTargets.ll"); REQUIRE(module.get() != nullptr); @@ -86,7 +88,8 @@ TEST_SUITE("SinkSelectionsIntoBranchTargets") { auto dt_res = dt.run(*function, fam); - auto analysis = SinkSelectionsIntoBranchTargets::AnalyzeFunction(dt_res, *function); + auto analysis = + SinkSelectionsIntoBranchTargets::AnalyzeFunction(dt_res, *function); CHECK(analysis.replacement_list.size() == 6U); CHECK(analysis.disposable_instruction_list.size() == 1U); diff --git a/tests/anvill_passes/src/SplitStackFrameAtReturnAddress.cpp b/tests/anvill_passes/src/SplitStackFrameAtReturnAddress.cpp index f8779d108..56242cafd 100644 --- a/tests/anvill_passes/src/SplitStackFrameAtReturnAddress.cpp +++ b/tests/anvill_passes/src/SplitStackFrameAtReturnAddress.cpp @@ -6,8 +6,8 @@ * the LICENSE file found in the root directory of this source tree. */ -#include #include +#include #include #include #include @@ -21,15 +21,13 @@ namespace anvill { TEST_SUITE("SplitStackFrameAtReturnAddress") { TEST_CASE("Run the whole pass on a well-formed function") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); + llvm::LLVMContext llvm_context; auto module = - LoadTestData(*llvm_context, "SplitStackFrameAtReturnAddress.ll"); + LoadTestData(llvm_context, "SplitStackFrameAtReturnAddress.ll"); REQUIRE(module != nullptr); StackFrameRecoveryOptions opt; - CHECK(RunFunctionPass( - module.get(), SplitStackFrameAtReturnAddress(opt))); - + CHECK(RunFunctionPass(module.get(), SplitStackFrameAtReturnAddress(opt))); } } diff --git a/tests/anvill_passes/src/SwitchLoweringPass.cpp b/tests/anvill_passes/src/SwitchLoweringPass.cpp deleted file mode 100644 index b8e087c52..000000000 --- a/tests/anvill_passes/src/SwitchLoweringPass.cpp +++ /dev/null @@ -1,319 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "Utils.h" -namespace anvill { - - -class MockMemProv : public MemoryProvider { - private: - std::map memmap; - const llvm::DataLayout &dl; - uint64_t curr_base; - - public: - MockMemProv(const llvm::DataLayout &dl) : dl(dl), curr_base(0) {} - - std::tuple - Query(uint64_t address) const { - if (this->memmap.find(address) != this->memmap.end()) { - auto val = this->memmap.find(address)->second; - return std::make_tuple(val, ByteAvailability::kAvailable, - BytePermission::kReadable); - } - std::cout << "missed address: " << address << std::endl; - return std::make_tuple(0, ByteAvailability::kUnavailable, - BytePermission::kReadable); - } - - - void SetCurrJumpTableBase(uint64_t baseAddress) { - this->curr_base = baseAddress; - } - - void AddJumpTableOffset(uint32_t offset) { - std::vector data(sizeof(uint32_t)); - if (dl.isLittleEndian()) { - llvm::support::endian::write32le(data.data(), offset); - } else { - llvm::support::endian::write32be(data.data(), offset); - } - - for (uint64_t i = 0; i < data.size(); i++) { - this->memmap.insert({this->curr_base + i, data[i]}); - } - - this->curr_base += data.size(); - } -}; - - -namespace { - -static llvm::Function *FindFunction(llvm::Module *module, std::string name) { - for (auto &function : *module) { - if (function.getName().equals(name)) { - return &function; - } - } - return nullptr; -} -} // namespace - -TEST_SUITE("SwitchLowerLargeFunction") { - TEST_CASE("Run on large function") { - auto context = anvill::CreateContextWithOpaquePointers(); - - auto mod = LoadTestData(*context, "SwitchLoweringLarge.ll"); - auto target_function = - FindFunction(mod.get(), "sub_8240110__A_Sbi_Sbii_B_0"); - CHECK(target_function != nullptr); - llvm::FunctionPassManager fpm; - llvm::FunctionAnalysisManager fam; - llvm::ModuleAnalysisManager mam; - llvm::LoopAnalysisManager lam; - llvm::CGSCCAnalysisManager cgam; - - llvm::PassBuilder pb; - - pb.registerFunctionAnalyses(fam); - pb.registerModuleAnalyses(mam); - pb.registerCGSCCAnalyses(cgam); - pb.registerLoopAnalyses(lam); - - pb.crossRegisterProxies(lam, fam, cgam, mam); - - auto arch = remill::Arch::Build(context.get(), remill::GetOSName("linux"), - remill::GetArchName("amd64")); - auto ctrl_flow_provider = anvill::NullControlFlowProvider(); - TypeDictionary tyDict(*context); - - NullTypeProvider ty_prov(tyDict); - NullMemoryProvider null_mem_prov; - anvill::LifterOptions lift_options(arch.get(), *mod, ty_prov, - std::move(ctrl_flow_provider), - null_mem_prov); - EntityLifter lifter(lift_options); - fam.registerPass([&] { return JumpTableAnalysis(lifter); }); - - fpm.addPass(llvm::InstCombinePass()); - auto mem_prov = std::make_shared(mod->getDataLayout()); - - - // this jump table has 30 entries with these possible offsets - // -3209123, -1153321, -1153312, -1153303, -1153287, -1153278 - // the offset for the default lable %41 is -3209123 - // Since there are 30 entries in the table this test assumes the 5 offsets are in order bookending a bunch of default cases - - - mem_prov->SetCurrJumpTableBase(136968824); - mem_prov->AddJumpTableOffset(-1153321); - mem_prov->AddJumpTableOffset(-1153312); - for (int i = 0; i < 25; i++) { - mem_prov->AddJumpTableOffset(-3209123); - } - - mem_prov->AddJumpTableOffset(-1153303); - mem_prov->AddJumpTableOffset(-1153287); - mem_prov->AddJumpTableOffset(-1153278); - - fpm.addPass(LowerSwitchIntrinsics(*mem_prov.get())); - - - const auto &analysis_results = - fam.getResult(*target_function); - - REQUIRE(analysis_results.size() == - 3); // check that we resolve all the switches - - - llvm::BasicBlock *target_block = nullptr; - for (const auto &jumpres : analysis_results) { - // unfortunately values are no longer identifiable by labels because the pass requires the instruction combiner which will now run again so identify switch by first non default pc value. - llvm::Value *v = jumpres.first->getArgOperand(2); - const JumpTableResult &res = jumpres.second; - auto interp = res.interp.getInterp(); - REQUIRE(llvm::isa(v)); - auto pc1 = llvm::cast(v); - switch (pc1->getValue().getLimitedValue()) { - case 136577416: - CHECK(res.bounds.lower.getLimitedValue() == 3); - CHECK(res.bounds.upper.getLimitedValue() == 241); - CHECK(!res.bounds.isSigned); - CHECK(res.indexRel.apply(interp, llvm::APInt(8, 5)) == 136967792); - break; - case 136578775: - CHECK(res.bounds.lower.getLimitedValue() == 6); - CHECK(res.bounds.upper.getLimitedValue() == 35); - CHECK(!res.bounds.isSigned); - CHECK(res.indexRel.apply(interp, llvm::APInt(8, 35)) == 136968940); - target_block = jumpres.first->getParent(); - break; - case 136578559: - CHECK(res.bounds.lower.getLimitedValue() == 26); - CHECK(res.bounds.upper.getLimitedValue() == 46); - CHECK(!res.bounds.isSigned); - CHECK(res.indexRel.apply(interp, llvm::APInt(8, 32)) == 136968764); - break; - default: CHECK(false); - } - } - - fpm.run(*target_function, fam); - - REQUIRE(target_block != nullptr); - llvm::SwitchInst *lowered_switch = - llvm::cast(target_block->getTerminator()); - - CHECK(lowered_switch->getNumCases() == 5); - - - llvm::SmallSet allowed_indices; - allowed_indices.insert(6); - allowed_indices.insert(7); - allowed_indices.insert(33); - allowed_indices.insert(34); - allowed_indices.insert(35); - - for (auto c : lowered_switch->cases()) { - CHECK(allowed_indices.contains( - c.getCaseValue()->getValue().getLimitedValue())); - } - - - lam.clear(); - fam.clear(); - mam.clear(); - cgam.clear(); - } - - TEST_CASE("Try negative Index") { - auto context = anvill::CreateContextWithOpaquePointers(); - auto mod = LoadTestData(*context, "SwitchLoweringNeg.ll"); - auto target_function = FindFunction(mod.get(), "_start"); - CHECK(target_function != nullptr); - llvm::FunctionPassManager fpm; - llvm::FunctionAnalysisManager fam; - llvm::ModuleAnalysisManager mam; - llvm::LoopAnalysisManager lam; - llvm::CGSCCAnalysisManager cgam; - - llvm::PassBuilder pb; - - pb.registerFunctionAnalyses(fam); - pb.registerModuleAnalyses(mam); - pb.registerCGSCCAnalyses(cgam); - pb.registerLoopAnalyses(lam); - - pb.crossRegisterProxies(lam, fam, cgam, mam); - - auto arch = remill::Arch::Build(context.get(), remill::GetOSName("linux"), - remill::GetArchName("amd64")); - auto ctrl_flow_provider = anvill::NullControlFlowProvider(); - TypeDictionary tyDict(*context); - - NullTypeProvider ty_prov(tyDict); - NullMemoryProvider null_mem_prov; - anvill::LifterOptions lift_options(arch.get(), *mod, ty_prov, - std::move(ctrl_flow_provider), - null_mem_prov); - EntityLifter lifter(lift_options); - - fam.registerPass([&] { return JumpTableAnalysis(lifter); }); - - - auto mem_prov = std::make_shared(mod->getDataLayout()); - - - mem_prov->SetCurrJumpTableBase(4294983520); - mem_prov->AddJumpTableOffset(0x10); - mem_prov->AddJumpTableOffset(0x3c); - mem_prov->AddJumpTableOffset(0x3c); - mem_prov->AddJumpTableOffset(0x1c); - mem_prov->AddJumpTableOffset(0x28); - mem_prov->AddJumpTableOffset(0x3c); - mem_prov->AddJumpTableOffset(0x3c); - mem_prov->AddJumpTableOffset(0x30); - - fpm.addPass(llvm::InstCombinePass()); - fpm.addPass(LowerSwitchIntrinsics(*mem_prov)); - - - const auto &analysis_results = - fam.getResult(*target_function); - - REQUIRE(analysis_results.size() == - 1); // check that we resolve all the switches - - - for (const auto &jumpres : analysis_results) { - auto interp = jumpres.second.interp.getInterp(); - // unfortunately values are no longer identifiable by labels because the pass requires the instruction combiner which will now run again so identify switch by first non default pc value. - llvm::Value *v = jumpres.first->getArgOperand(2); - const JumpTableResult &res = jumpres.second; - REQUIRE(llvm::isa(v)); - auto pc1 = llvm::cast(v); - switch (pc1->getValue().getLimitedValue()) { - case 4294983464: - CHECK(res.bounds.lower == llvm::APInt(32, -4, true)); - CHECK(res.bounds.upper == llvm::APInt(32, 3, true)); - CHECK(res.bounds.isSigned); - CHECK(res.indexRel.apply(interp, llvm::APInt(32, -3, true)) - .getLimitedValue() == 4294983524); - break; - default: CHECK(false); - } - } - - fpm.run(*target_function, fam); - - - llvm::SwitchInst *lowered_switch = nullptr; - - for (auto &inst : llvm::instructions(target_function)) { - if (auto sw = llvm::dyn_cast(&inst)) { - lowered_switch = sw; - } - } - - REQUIRE(lowered_switch != nullptr); - - CHECK(lowered_switch->getNumCases() == 4); - - llvm::SmallSet allowed_indices; - allowed_indices.insert(llvm::APInt(32, -4).getLimitedValue()); - allowed_indices.insert(llvm::APInt(32, -1).getLimitedValue()); - allowed_indices.insert(llvm::APInt(32, -0).getLimitedValue()); - allowed_indices.insert(llvm::APInt(32, 3).getLimitedValue()); - - for (auto c : lowered_switch->cases()) { - CHECK(allowed_indices.contains( - c.getCaseValue()->getValue().getLimitedValue())); - } - - fam.clear(); - cgam.clear(); - lam.clear(); - mam.clear(); - } -} - -} // namespace anvill diff --git a/tests/anvill_passes/src/TestAbstractStackBB.cpp b/tests/anvill_passes/src/TestAbstractStackBB.cpp new file mode 100644 index 000000000..87245e93e --- /dev/null +++ b/tests/anvill_passes/src/TestAbstractStackBB.cpp @@ -0,0 +1,51 @@ + + +// basic_block_func4199701 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "Utils.h" + + +namespace anvill { + + +/* +Register pass plan: +1. iterate through all available paramater decls declaring them in the signature. +2. Call StoreNativeValue to store the parameter representing each parameter into the physcal location in the state +3. Apply SROA to the new clone +4. Replace all calls to the basic block function with the clone (should just be one but whatev) +4. When calling the basic block function we now need to call LoadLiftedValue on the parameter decl for each physical location + +Stack pass plan: +1. Add a stack parameter that’s just a byte array created in the parent that is the stack size. +2. Identify remill_reads and writes and call something that looks like the xref resolver on them. The only trick is you need to basically record out when you hit a register and then check if that register is holding some stack offset, take the register+stack_offset_in_that_register+the offset computed on the path to finding that register (ie. the xref resolver will be calculating the total displacement along the way) +3. Then we redirect the remill_read to a load from the stack variable at the computed stack offset +4. This could get arbitrarily more complicated when handling expressions built up over multiple registers and array indexing with multiplication over an index register, so there is stuff to work on here (maybe propagating the abstract domain forward as a separate affine analysis) +*/ + + +TEST_SUITE("Basic Block tests") { + TEST_CASE("Convert parameters") { + llvm::LLVMContext llvm_context; + auto module = LoadTestData(llvm_context, "MainBasicBlocks.ll"); + auto bb_func = module->getFunction("basic_block_func4199701"); + bb_func->dump(); + } +} +} // namespace anvill diff --git a/tests/anvill_passes/src/TransformRemillJump.cpp b/tests/anvill_passes/src/TransformRemillJump.cpp index 860e66844..361bfab02 100644 --- a/tests/anvill_passes/src/TransformRemillJump.cpp +++ b/tests/anvill_passes/src/TransformRemillJump.cpp @@ -6,49 +6,50 @@ * the LICENSE file found in the root directory of this source tree. */ +#include #include +#include +#include #include #include #include #include #include #include -#include -#include + #include -#include -#include #include "Utils.h" namespace anvill { TEST_SUITE("TransformRemillJump_Test0") { TEST_CASE("Run the pass on function having _remill_jump as tail call") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); - auto module = LoadTestData(*llvm_context, "TransformRemillJumpData0.ll"); + llvm::LLVMContext llvm_context; + auto module = LoadTestData(llvm_context, "TransformRemillJumpData0.ll"); - auto arch = - remill::Arch::Build(llvm_context.get(), remill::GetOSName("linux"), - remill::GetArchName("amd64")); + auto arch = remill::Arch::Build(&llvm_context, remill::GetOSName("linux"), + remill::GetArchName("amd64")); REQUIRE(arch != nullptr); - auto ctrl_flow_provider = - anvill::NullControlFlowProvider(); - TypeDictionary tyDict(*llvm_context); + auto ctrl_flow_provider = anvill::NullControlFlowProvider(); + TypeDictionary tyDict(llvm_context); - NullTypeProvider ty_prov(tyDict); - NullMemoryProvider mem_prov; - anvill::LifterOptions lift_options( - arch.get(), *module,ty_prov,std::move(ctrl_flow_provider),mem_prov); + NullTypeProvider ty_prov(tyDict); + NullMemoryProvider mem_prov; + anvill::LifterOptions lift_options(arch.get(), *module, ty_prov, + std::move(ctrl_flow_provider), mem_prov); - anvill::LifterOptions options(arch.get(), *module,ty_prov,std::move(ctrl_flow_provider),mem_prov); + anvill::LifterOptions options(arch.get(), *module, ty_prov, + std::move(ctrl_flow_provider), mem_prov); // memory and types will not get used and create lifter with null anvill::EntityLifter lifter(options); EntityCrossReferenceResolver xref(lifter); + module->getFunction("__remill_intrinsics")->eraseFromParent(); + CHECK(RunFunctionPass(module.get(), TransformRemillJumpIntrinsics(xref))); const auto ret_func = module->getFunction("__remill_function_return"); @@ -61,29 +62,28 @@ TEST_SUITE("TransformRemillJump_Test0") { TEST_SUITE("TransformRemillJump_Test1") { TEST_CASE("Run the pass on function having _remill_jump as tail call") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); - auto module = LoadTestData(*llvm_context, "TransformRemillJumpData1.ll"); + llvm::LLVMContext llvm_context; + auto module = LoadTestData(llvm_context, "TransformRemillJumpData1.ll"); - auto arch = - remill::Arch::Build(llvm_context.get(), remill::GetOSName("linux"), - remill::GetArchName("amd64")); + auto arch = remill::Arch::Build(&llvm_context, remill::GetOSName("linux"), + remill::GetArchName("amd64")); REQUIRE(arch != nullptr); - auto ctrl_flow_provider = - anvill::NullControlFlowProvider(); - TypeDictionary tyDict(*llvm_context); + auto ctrl_flow_provider = anvill::NullControlFlowProvider(); + TypeDictionary tyDict(llvm_context); - NullTypeProvider ty_prov(tyDict); - NullMemoryProvider mem_prov; - anvill::LifterOptions lift_options( - arch.get(), *module,ty_prov,std::move(ctrl_flow_provider),mem_prov); + NullTypeProvider ty_prov(tyDict); + NullMemoryProvider mem_prov; + anvill::LifterOptions lift_options(arch.get(), *module, ty_prov, + std::move(ctrl_flow_provider), mem_prov); - anvill::LifterOptions options(arch.get(), *module,ty_prov,std::move(ctrl_flow_provider),mem_prov); + anvill::LifterOptions options(arch.get(), *module, ty_prov, + std::move(ctrl_flow_provider), mem_prov); // memory and types will not get used and create lifter with null anvill::EntityLifter lifter(options); EntityCrossReferenceResolver xref(lifter); - + module->getFunction("__remill_intrinsics")->eraseFromParent(); CHECK(RunFunctionPass(module.get(), TransformRemillJumpIntrinsics(xref))); const auto ret_func = module->getFunction("__remill_function_return"); @@ -97,30 +97,30 @@ TEST_SUITE("TransformRemillJump_Test1") { TEST_SUITE("TransformRemillJump_ARM32_0") { TEST_CASE("Run the pass on function having _remill_jump as tail call") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); + llvm::LLVMContext llvm_context; auto module = - LoadTestData(*llvm_context, "TransformRemillJumpDataARM32_0.ll"); + LoadTestData(llvm_context, "TransformRemillJumpDataARM32_0.ll"); - auto arch = - remill::Arch::Build(llvm_context.get(), remill::GetOSName("linux"), - remill::GetArchName("aarch32")); + auto arch = remill::Arch::Build(&llvm_context, remill::GetOSName("linux"), + remill::GetArchName("aarch32")); REQUIRE(arch != nullptr); auto ctrl_flow_provider = anvill::NullControlFlowProvider(); - TypeDictionary tyDict(*llvm_context); + TypeDictionary tyDict(llvm_context); NullTypeProvider ty_prov(tyDict); NullMemoryProvider mem_prov; anvill::LifterOptions lift_options(arch.get(), *module, ty_prov, std::move(ctrl_flow_provider), mem_prov); - anvill::LifterOptions options(arch.get(), *module,ty_prov,std::move(ctrl_flow_provider),mem_prov); + anvill::LifterOptions options(arch.get(), *module, ty_prov, + std::move(ctrl_flow_provider), mem_prov); // memory and types will not get used and create lifter with null anvill::EntityLifter lifter(options); EntityCrossReferenceResolver xref(lifter); - + module->getFunction("__remill_intrinsics")->eraseFromParent(); CHECK(RunFunctionPass(module.get(), TransformRemillJumpIntrinsics(xref))); const auto ret_func = module->getFunction("__remill_function_return"); @@ -134,30 +134,30 @@ TEST_SUITE("TransformRemillJump_ARM32_0") { TEST_SUITE("TransformRemillJump_ARM32_1") { TEST_CASE("Run the pass on function having _remill_jump as tail call") { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); + llvm::LLVMContext llvm_context; auto module = - LoadTestData(*llvm_context, "TransformRemillJumpDataARM32_1.ll"); + LoadTestData(llvm_context, "TransformRemillJumpDataARM32_1.ll"); - auto arch = - remill::Arch::Build(llvm_context.get(), remill::GetOSName("linux"), - remill::GetArchName("aarch32")); + auto arch = remill::Arch::Build(&llvm_context, remill::GetOSName("linux"), + remill::GetArchName("aarch32")); REQUIRE(arch != nullptr); auto ctrl_flow_provider = anvill::NullControlFlowProvider(); - TypeDictionary tyDict(*llvm_context); + TypeDictionary tyDict(llvm_context); NullTypeProvider ty_prov(tyDict); NullMemoryProvider mem_prov; anvill::LifterOptions lift_options(arch.get(), *module, ty_prov, std::move(ctrl_flow_provider), mem_prov); - anvill::LifterOptions options(arch.get(), *module,ty_prov,std::move(ctrl_flow_provider),mem_prov); + anvill::LifterOptions options(arch.get(), *module, ty_prov, + std::move(ctrl_flow_provider), mem_prov); // memory and types will not get used and create lifter with null anvill::EntityLifter lifter(options); EntityCrossReferenceResolver xref(lifter); - + module->getFunction("__remill_intrinsics")->eraseFromParent(); CHECK(RunFunctionPass(module.get(), TransformRemillJumpIntrinsics(xref))); const auto ret_func = module->getFunction("__remill_function_return"); diff --git a/tests/anvill_passes/src/Utils.cpp b/tests/anvill_passes/src/Utils.cpp index 9caeba656..51f41c9bd 100644 --- a/tests/anvill_passes/src/Utils.cpp +++ b/tests/anvill_passes/src/Utils.cpp @@ -80,12 +80,4 @@ const PlatformList &GetSupportedPlatforms(void) { return kSupportedPlatforms; } -std::unique_ptr CreateContextWithOpaquePointers(void) { - auto context = std::make_unique(); -#if LLVM_VERSION_NUMBER < LLVM_VERSION(15, 0) - context->enableOpaquePointers(); -#endif - return context; -} - } // namespace anvill diff --git a/tests/anvill_passes/src/Utils.h b/tests/anvill_passes/src/Utils.h index 029664c33..a10921720 100644 --- a/tests/anvill_passes/src/Utils.h +++ b/tests/anvill_passes/src/Utils.h @@ -57,6 +57,4 @@ struct Platform final { using PlatformList = std::vector; const PlatformList &GetSupportedPlatforms(void); -std::unique_ptr CreateContextWithOpaquePointers(void); - } // namespace anvill diff --git a/tests/anvill_passes/src/VectorRW.cpp b/tests/anvill_passes/src/VectorRW.cpp new file mode 100644 index 000000000..5d5a3d154 --- /dev/null +++ b/tests/anvill_passes/src/VectorRW.cpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2019-present, Trail of Bits, Inc. + * All rights reserved. + * + * This source code is licensed in accordance with the terms specified in + * the LICENSE file found in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "Utils.h" + +namespace anvill { +static std::unique_ptr +runVectorRW(llvm::LLVMContext &llvm_context, const std::string &module_name, + const std::string &function_name) { + + + auto module = LoadTestData(llvm_context, module_name); + + auto arch = remill::Arch::Build(&llvm_context, remill::GetOSName("linux"), + remill::GetArchName("amd64")); + + REQUIRE(arch != nullptr); + + CHECK(RunFunctionPass(module.get(), RewriteVectorOps())); + + for (auto &f : module->functions()) { + for (auto &insn : llvm::instructions(f)) { + CHECK(!llvm::isa(&insn)); + } + } + + + return module; +} + + +TEST_SUITE("Devectorize") { + TEST_CASE("Devectorize Blend") { + llvm::LLVMContext llvm_context; + auto mod = runVectorRW(llvm_context, "VectorToRewrite.ll", "f"); + mod->dump(); + } + + TEST_CASE("Small Vec") { + llvm::LLVMContext llvm_context; + auto mod = runVectorRW(llvm_context, "VectorRewriteSmall.ll", "f"); + mod->dump(); + } +} + + +} // namespace anvill diff --git a/tests/anvill_passes/src/XorConversionPass.cpp b/tests/anvill_passes/src/XorConversionPass.cpp index cfe9baf84..f533ff3f4 100644 --- a/tests/anvill_passes/src/XorConversionPass.cpp +++ b/tests/anvill_passes/src/XorConversionPass.cpp @@ -42,12 +42,11 @@ static std::tuple runXorRemovalPassCountXors(const std::string &module_name, const std::string &function_name) { - auto llvm_context = anvill::CreateContextWithOpaquePointers(); - auto module = LoadTestData(*llvm_context, module_name); + llvm::LLVMContext llvm_context; + auto module = LoadTestData(llvm_context, module_name); - auto arch = - remill::Arch::Build(llvm_context.get(), remill::GetOSName("linux"), - remill::GetArchName("amd64")); + auto arch = remill::Arch::Build(&llvm_context, remill::GetOSName("linux"), + remill::GetArchName("amd64")); REQUIRE(arch != nullptr); diff --git a/tests/tools/src/TypeSpecification.cpp b/tests/tools/src/TypeSpecification.cpp index 1b9f71aa3..e2b2e2d86 100644 --- a/tests/tools/src/TypeSpecification.cpp +++ b/tests/tools/src/TypeSpecification.cpp @@ -7,7 +7,6 @@ */ #include - #include #include #include @@ -40,9 +39,6 @@ TEST_SUITE("TypeSpecifier") { for (const auto &test_entry : kTestEntryList) { llvm::LLVMContext llvm_context; -#if LLVM_VERSION_NUMBER < LLVM_VERSION(15, 0) - llvm_context.enableOpaquePointers(); -#endif llvm::DataLayout dl("e-m:e-i64:64-f80:128-n8:16:32:64-S128"); anvill::TypeDictionary type_dict(llvm_context); @@ -75,9 +71,6 @@ TEST_SUITE("TypeSpecifier") { }; llvm::LLVMContext llvm_context; -#if LLVM_VERSION_NUMBER < LLVM_VERSION(15, 0) - llvm_context.enableOpaquePointers(); -#endif llvm::Module module("TypeSpecifierTests", llvm_context); const auto &data_layout = module.getDataLayout(); @@ -228,9 +221,6 @@ TEST_SUITE("TypeSpecifier") { }; llvm::LLVMContext llvm_context; -#if LLVM_VERSION_NUMBER < LLVM_VERSION(15, 0) - llvm_context.enableOpaquePointers(); -#endif llvm::Module module("TypeSpecifierTests", llvm_context); const auto &data_layout = module.getDataLayout();