diff --git a/.cirrus.yml b/.cirrus.yml index e65a565301126..2dd109af6a1b8 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -29,15 +29,15 @@ task: functional_test_script: - ./test/functional/test_runner.py --jobs 9 --ci --extended --exclude feature_dbcrash --combinedlogslen=1000 --quiet --failfast task: - name: "x86_64 Linux [GOAL: install] [bionic] [Using ./ci/ system]" + name: "x86_64 Linux [GOAL: install] [focal] [Using ./ci/ system]" container: - image: ubuntu:18.04 + image: ubuntu:focal cpu: 8 memory: 8G timeout_in: 60m env: MAKEJOBS: "-j9" - RUN_CI_ON_HOST: "1" + DANGER_RUN_CI_ON_HOST: "1" CCACHE_SIZE: "200M" CCACHE_DIR: "/tmp/ccache_dir" ccache_cache: diff --git a/.fuzzbuzz.yml b/.fuzzbuzz.yml index be9a1cd4e12e8..1f4a5a5555692 100644 --- a/.fuzzbuzz.yml +++ b/.fuzzbuzz.yml @@ -1,4 +1,4 @@ -base: ubuntu:16.04 +base: ubuntu:focal language: c++ engine: libFuzzer environment: diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000000..09464471afe78 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,41 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: Bug +assignees: '' + +--- + + + + + +**Expected behavior** + + + +**Actual behavior** + + + +**To reproduce** + + + +**System information** + + + + + + + + + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000000..2d5685185ea36 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: Feature +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** + + +**Describe the solution you'd like** + + +**Describe alternatives you've considered** + + +**Additional context** + diff --git a/.github/ISSUE_TEMPLATE/good_first_issue.md b/.github/ISSUE_TEMPLATE/good_first_issue.md index 158ab21660a2e..9ee7e2e80a3d0 100644 --- a/.github/ISSUE_TEMPLATE/good_first_issue.md +++ b/.github/ISSUE_TEMPLATE/good_first_issue.md @@ -2,11 +2,13 @@ name: Good first issue about: '(Regular devs only): Suggest a new good first issue' title: '' -labels: good first issue +labels: '' assignees: '' --- + + #### Useful skills: diff --git a/.github/workflows/guix-build.yml b/.github/workflows/guix-build.yml new file mode 100644 index 0000000000000..34a83e99fc5f2 --- /dev/null +++ b/.github/workflows/guix-build.yml @@ -0,0 +1,69 @@ +name: Guix Build + +on: + pull_request: + types: [ labeled ] + +jobs: + build: + runs-on: self-hosted + if: contains(github.event.pull_request.labels.*.name, 'guix-build') + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Commit variables + id: dockerfile + run: | + echo "hash=$(sha256sum ./contrib/containers/guix/Dockerfile | cut -d ' ' -f1)" >> $GITHUB_OUTPUT + echo "host_user_id=$(id -u)" >> $GITHUB_OUTPUT + echo "host_group_id=$(id -g)" >> $GITHUB_OUTPUT + + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ steps.dockerfile.outputs.hash }} + restore-keys: | + ${{ runner.os }}-buildx- + + - name: Build Docker image + uses: docker/build-push-action@v4 + with: + context: ${{ github.workspace }} + build-args: | + USER_ID=${{ steps.dockerfile.outputs.host_user_id }} + GROUP_ID=${{ steps.dockerfile.outputs.host_group_id }} + build-contexts: | + docker_root=${{ github.workspace }}/contrib/containers/guix + file: ./contrib/containers/guix/Dockerfile + load: true + tags: guix_ubuntu:latest + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + + - name: Run Guix build + run: | + docker run --privileged -d --rm -t \ + --name guix-daemon \ + -e ADDITIONAL_GUIX_COMMON_FLAGS="--max-jobs=$(nproc --all)" \ + -v ${{ github.workspace }}:/src/dash \ + -w /src/dash \ + guix_ubuntu:latest && \ + docker exec guix-daemon bash -c '/usr/local/bin/guix-start' + + - name: Ensure build passes + run: | + if [[ $? != 0 ]]; then + echo "Guix build failed!" + exit 1 + fi + + - name: Compute SHA256 checksums + run: | + ./contrib/containers/guix/scripts/guix-check ${{ github.workspace }} diff --git a/.github/workflows/handle_potential_conflicts.py b/.github/workflows/handle_potential_conflicts.py old mode 100644 new mode 100755 index bd4232752f7ee..c669685ac640a --- a/.github/workflows/handle_potential_conflicts.py +++ b/.github/workflows/handle_potential_conflicts.py @@ -1,23 +1,37 @@ -# Copyright (c) 2022 The Dash Core developers +#!/usr/bin/env python3 +# Copyright (c) 2022-2023 The Dash Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" + +Usage: + $ ./handle_potential_conflicts.py + +Where is a json string which looks like + { pull_number: 26, + conflictPrs: + [ { number: 15, + files: [ 'testfile1', `testfile2` ], + conflicts: [ 'testfile1' ] }, + ... + ]} +""" + import sys import requests # need to install via pip import hjson -'''Looks like''' -'''{ pull_number: 26, - conflictPrs: - [ { number: 25, - files: [ '.github/workflows/testfile' ], - conflicts: [ '.github/workflows/testfile' ] } - { number: 24, - files: [ '.github/workflows/testfile' ], - conflicts: [ '.github/workflows/testfile' ] } ] }''' +def get_label(pr_num): + return requests.get(f'https://api.github.com/repos/dashpay/dash/pulls/{pr_num}').json()['head']['label'] def main(): + if len(sys.argv) != 2: + print(f'Usage: {sys.argv[0]} ', file=sys.stderr) + sys.exit(1) + input = sys.argv[1] print(input) j_input = hjson.loads(input) @@ -58,10 +72,5 @@ def main(): sys.exit(1) - -def get_label(pr_num): - return requests.get(f'https://api.github.com/repos/dashpay/dash/pulls/{pr_num}').json()['head']['label'] - - if __name__ == "__main__": main() diff --git a/.github/workflows/predict-conflicts.yml b/.github/workflows/predict-conflicts.yml index 45969660b0cca..ab364d65a70a4 100644 --- a/.github/workflows/predict-conflicts.yml +++ b/.github/workflows/predict-conflicts.yml @@ -21,8 +21,10 @@ jobs: runs-on: ubuntu-latest steps: - name: check for potential conflicts - uses: PastaPastaPasta/potential-conflicts-checker-action@0.1.9 + uses: PastaPastaPasta/potential-conflicts-checker-action@v0.1.10 with: ghToken: "${{ secrets.GITHUB_TOKEN }}" + - name: Checkout + uses: actions/checkout@v2 - name: validate potential conflicts - run: wget https://raw.githubusercontent.com/dashpay/dash/develop/.github/workflows/handle_potential_conflicts.py && pip3 install hjson && python3 handle_potential_conflicts.py "$conflicts" + run: pip3 install hjson && .github/workflows/handle_potential_conflicts.py "$conflicts" diff --git a/.gitignore b/.gitignore index 8b12484408376..dffb962e942dd 100644 --- a/.gitignore +++ b/.gitignore @@ -95,8 +95,6 @@ Makefile !depends/Makefile dash-qt Dash-Qt.app -background.tiff* -!/depends/Makefile # Qt Creator Makefile.am.user @@ -157,12 +155,11 @@ cmake-build-debug osx_volname dist/ -*.background.tiff /guix-build-* # cppcheck cache-directory -.cppcheck/* +test/lint/.cppcheck/* # flake8 cache location .cache/* diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 755d40a4c90cf..489b9606e176d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,4 +1,4 @@ -image: "ubuntu:bionic" +image: "ubuntu:focal" variables: DOCKER_DRIVER: overlay2 @@ -35,16 +35,20 @@ builder-image: .build-depends-template: stage: build-depends + rules: + - when: on_success + needs: + - builder-image image: $CI_REGISTRY_IMAGE:builder-$CI_COMMIT_REF_SLUG variables: SDK_URL: https://bitcoincore.org/depends-sources/sdks - XCODE_VERSION: "12.1" - XCODE_BUILD_ID: 12A7403 + XCODE_VERSION: "12.2" + XCODE_BUILD_ID: 12B45b MAKEJOBS: -j4 before_script: - echo HOST=$HOST - | - if [ "$HOST" = "x86_64-apple-darwin19" ]; then + if [ "$HOST" = "x86_64-apple-darwin" ]; then mkdir -p depends/SDKs mkdir -p depends/sdk-sources OSX_SDK_BASENAME="Xcode-${XCODE_VERSION}-${XCODE_BUILD_ID}-extracted-SDK-with-libcxx-headers.tar.gz" @@ -78,6 +82,8 @@ builder-image: .base-template: image: $CI_REGISTRY_IMAGE:builder-$CI_COMMIT_REF_SLUG + rules: + - when: on_success before_script: - export CACHE_DIR=$CI_PROJECT_DIR/cache - echo BUILD_TARGET=$BUILD_TARGET @@ -157,7 +163,7 @@ builder-image: rules: - if: '$FAST_MODE == "true"' when: never - - when: always + - when: on_success ### @@ -173,13 +179,6 @@ x86_64-w64-mingw32: variables: HOST: x86_64-w64-mingw32 -i686-pc-linux-gnu: - extends: - - .build-depends-template - - .skip-in-fast-mode-template - variables: - HOST: i686-pc-linux-gnu - x86_64-pc-linux-gnu-debug: extends: .build-depends-template variables: @@ -194,12 +193,12 @@ x86_64-pc-linux-gnu-nowallet: HOST: x86_64-pc-linux-gnu DEP_OPTS: "NO_WALLET=1" -x86_64-apple-darwin19: +x86_64-apple-darwin: extends: - .build-depends-template - .skip-in-fast-mode-template variables: - HOST: x86_64-apple-darwin19 + HOST: x86_64-apple-darwin ### @@ -219,24 +218,6 @@ win64-build: variables: BUILD_TARGET: win64 -linux32-build: - extends: - - .build-template - - .skip-in-fast-mode-template - needs: - - i686-pc-linux-gnu - variables: - BUILD_TARGET: linux32 - -linux32_ubsan-build: - extends: - - .build-template - - .skip-in-fast-mode-template - needs: - - i686-pc-linux-gnu - variables: - BUILD_TARGET: linux32_ubsan - linux64-build: extends: .build-template needs: @@ -245,26 +226,41 @@ linux64-build: BUILD_TARGET: linux64 linux64_cxx20-build: - extends: .build-template + extends: + - .build-template + - .skip-in-fast-mode-template needs: - x86_64-pc-linux-gnu-debug variables: BUILD_TARGET: linux64_cxx20 linux64_sqlite-build: - extends: .build-template + extends: + - .build-template + - .skip-in-fast-mode-template needs: - x86_64-pc-linux-gnu-debug variables: BUILD_TARGET: linux64_sqlite linux64_fuzz-build: - extends: .build-template + extends: + - .build-template + - .skip-in-fast-mode-template needs: - x86_64-pc-linux-gnu-debug variables: BUILD_TARGET: linux64_fuzz +#linux64_asan-build: +# extends: +# - .build-template +# - .skip-in-fast-mode-template +# needs: +# - x86_64-pc-linux-gnu-debug +# variables: +# BUILD_TARGET: linux64_asan + linux64_tsan-build: extends: - .build-template @@ -274,43 +270,43 @@ linux64_tsan-build: variables: BUILD_TARGET: linux64_tsan -linux64_nowallet-build: +linux64_ubsan-build: extends: - .build-template - .skip-in-fast-mode-template needs: - - x86_64-pc-linux-gnu-nowallet + - x86_64-pc-linux-gnu-debug variables: - BUILD_TARGET: linux64_nowallet + BUILD_TARGET: linux64_ubsan -mac-build: +linux64_nowallet-build: extends: - .build-template - .skip-in-fast-mode-template needs: - - x86_64-apple-darwin19 + - x86_64-pc-linux-gnu-nowallet variables: - BUILD_TARGET: mac + BUILD_TARGET: linux64_nowallet -### +#linux64_valgrind-build: +# extends: +# - .build-template +# - .skip-in-fast-mode-template +# needs: +# - x86_64-pc-linux-gnu-debug +# variables: +# BUILD_TARGET: linux64_valgrind -linux32-test: +mac-build: extends: - - .test-template + - .build-template - .skip-in-fast-mode-template needs: - - linux32-build + - x86_64-apple-darwin variables: - BUILD_TARGET: linux32 + BUILD_TARGET: mac -linux32_ubsan-test: - extends: - - .test-template - - .skip-in-fast-mode-template - needs: - - linux32_ubsan-build - variables: - BUILD_TARGET: linux32_ubsan +### linux64-test: extends: .test-template @@ -320,12 +316,23 @@ linux64-test: BUILD_TARGET: linux64 linux64_sqlite-test: - extends: .test-template + extends: + - .test-template + - .skip-in-fast-mode-template needs: - linux64_sqlite-build variables: BUILD_TARGET: linux64_sqlite +#linux64_asan-test: +# extends: +# - .test-template +# - .skip-in-fast-mode-template +# needs: +# - linux64_asan-build +# variables: +# BUILD_TARGET: linux64_asan + linux64_tsan-test: extends: - .test-template @@ -334,3 +341,21 @@ linux64_tsan-test: - linux64_tsan-build variables: BUILD_TARGET: linux64_tsan + +linux64_ubsan-test: + extends: + - .test-template + - .skip-in-fast-mode-template + needs: + - linux64_ubsan-build + variables: + BUILD_TARGET: linux64_ubsan + +#linux64_valgrind-test: +# extends: +# - .test-template +# - .skip-in-fast-mode-template +# needs: +# - linux64_valgrind-build +# variables: +# BUILD_TARGET: linux64_valgrind diff --git a/.python-version b/.python-version index 7b59a5caab64b..eee6392d5c718 100644 --- a/.python-version +++ b/.python-version @@ -1 +1 @@ -3.10.2 +3.8.16 diff --git a/.travis.yml b/.travis.yml index 242203a55721b..e135852c3958f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,6 +2,23 @@ # - sudo/dist/group are set so as to get Blue Box VMs, necessary for [loopback] # IPv6 support +# The test build matrix (stage: test) is constructed to test a wide range of +# configurations, rather than a single pass/fail. This helps to catch build +# failures and logic errors that present on platforms other than the ones the +# author has tested. +# +# Some builders use the dependency-generator in `./depends`, rather than using +# apt-get to install build dependencies. This guarantees that the tester is +# using the same versions as Gitian, so the build results are nearly identical +# to what would be found in a final release. +# +# In order to avoid rebuilding all dependencies for each build, the binaries +# are cached and re-used when possible. Changes in the dependency-generator +# will trigger cache-invalidation and rebuilds as necessary. +# + +version: ~> 1.0 + dist: xenial os: linux @@ -15,9 +32,6 @@ cache: ccache: true directories: - $BASE_BUILD_DIR/ci/scratch/.ccache - # macOS - - $HOME/Library/Caches/Homebrew - - /usr/local/Homebrew before_cache: - if [ "${TRAVIS_OS_NAME}" = "osx" ]; then brew cleanup; fi env: @@ -82,9 +96,6 @@ jobs: - <<: *builddepends name: depends-win64 env: BUILD_TARGET=win64 - - <<: *builddepends - name: depends-linux32 - env: BUILD_TARGET=linux32 - <<: *builddepends name: depends-linux64 env: BUILD_TARGET=linux64 @@ -106,9 +117,6 @@ jobs: - <<: *buildsrc name: src-win64 env: BUILD_TARGET=win64 - - <<: *buildsrc - name: src-linux32 - env: BUILD_TARGET=linux32 - <<: *buildsrc name: src-linux64 env: BUILD_TARGET=linux64 @@ -127,9 +135,6 @@ jobs: - <<: *runtests name: tests-win64 env: BUILD_TARGET=win64 - - <<: *runtests - name: tests-linux32 - env: BUILD_TARGET=linux32 - <<: *runtests name: tests-linux64 env: BUILD_TARGET=linux64 @@ -168,7 +173,7 @@ install: - ls -lah $HOST_CACHE_DIR && ls -lah $HOST_CACHE_DIR/depends && ls -lah $HOST_CACHE_DIR/ccache && ls -lah $HOST_CACHE_DIR/docker # Load cached builder image - if [ -f $HOST_CACHE_DIR/docker/dash-builder-$BUILD_TARGET.tar.gz ]; then zcat $HOST_CACHE_DIR/docker/dash-builder-$BUILD_TARGET.tar.gz | docker load || true; fi - - travis_retry docker pull ubuntu:bionic + - travis_retry docker pull ubuntu:focal - travis_retry docker build -t $BUILDER_IMAGE_NAME --build-arg=USER_ID=$UID --build-arg=GROUP_ID=$UID --build-arg=BUILD_TARGET=$BUILD_TARGET -f contrib/containers/ci/Dockerfile ci before_script: # Make sure stdout is in blocking mode. Otherwise builds will fail due to large writes to stdout @@ -177,7 +182,7 @@ before_script: # Build docker image only for develop branch of the main repo - if [ "$TRAVIS_REPO_SLUG" != "dashpay/dash" -o "$TRAVIS_BRANCH" != "develop" -o "$TRAVIS_PULL_REQUEST" != "false" ]; then export DOCKER_BUILD="false"; echo DOCKER_BUILD=$DOCKER_BUILD; fi # TODO(ignore if don't use travis): Check keys and signed commits - #- if [ "$TRAVIS_REPO_SLUG" = "dashpay/dash" -a "$TRAVIS_PULL_REQUEST" = "false" ]; then while read LINE; do travis_retry gpg --keyserver hkp://subset.pool.sks-keyservers.net --recv-keys $LINE; done < contrib/verify-commits/trusted-keys; fi + #- if [ "$TRAVIS_REPO_SLUG" = "dashpay/dash" -a "$TRAVIS_PULL_REQUEST" = "false" ]; then travis_retry gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys $(= 60 seconds]' - env: - cache: false - language: python - python: '3.5' - install: - - set -o errexit; source ./ci/extended_lint/04_install.sh - before_script: - - set -o errexit; source ./ci/lint/05_before_script.sh - script: - - set -o errexit; source ./ci/extended_lint/06_script.sh - - stage: test - name: 'ARM [GOAL: install] [unit tests, functional tests]' - arch: arm64 + name: 'ARM [GOAL: install] [focal] [unit tests, functional tests]' + arch: arm64 # Can disable QEMU_USER_CMD and run the tests natively without qemu env: >- FILE_ENV="./ci/test/00_setup_env_arm.sh" - QEMU_USER_CMD="" # Can run the tests natively without qemu + QEMU_USER_CMD="" - stage: test - name: 'S390x [GOAL: install] [unit tests, functional tests]' + name: 's390x native BE [GOAL: install] [bionic] [no depends, no GUI]' arch: s390x + dist: bionic + addons: + apt: + packages: + - bsdmainutils + - libboost-filesystem-dev + - libboost-system-dev + - libboost-test-dev + - libboost-thread-dev + - libdb++-dev + - libdb-dev + - libevent-dev env: >- - FILE_ENV="./ci/test/00_setup_env_s390x.sh" + DANGER_RUN_CI_ON_HOST=true + CI_USE_APT_INSTALL=no + FILE_ENV="./ci/test/00_setup_env_s390x_host.sh" + +# s390 build was disabled temporarily because of disk space issues on the Travis VM +# +# - stage: test +# name: 'S390x [GOAL: install] [focal] [unit tests, functional tests]' +# arch: s390x # Can disable QEMU_USER_CMD and run the tests natively without qemu +# env: >- +# FILE_ENV="./ci/test/00_setup_env_s390x.sh" +# QEMU_USER_CMD="" - stage: test name: 'Win64 [GOAL: deploy] [unit tests, no gui, no functional tests]' @@ -229,17 +244,7 @@ after_success: FILE_ENV="./ci/test/00_setup_env_win64.sh" - stage: test - name: '32-bit + dash [GOAL: install] [GUI: no BIP70]' - env: >- - FILE_ENV="./ci/test/00_setup_env_i686.sh" - - - stage: test - name: 'x86_64 Linux [GOAL: install] [CentOS 7] [no depends, only system libs]' - env: >- - FILE_ENV="./ci/test/00_setup_env_native_centos.sh" - - - stage: test - name: 'x86_64 Linux [GOAL: install] [bionic] [uses qt5 dev package instead of depends Qt to speed up build and avoid timeout] [unsigned char]' + name: 'x86_64 Linux [GOAL: install] [focal] [uses qt5 dev package and some depends packages] [unsigned char]' env: >- FILE_ENV="./ci/test/00_setup_env_native_qt5.sh" # x86_64 Linux (xenial, no depends, only system libs, sanitizers: thread (TSan)) @@ -247,23 +252,25 @@ after_success: name: 'x86_64 Linux [GOAL: install] [xenial] [no depends, only system libs, sanitizers: thread (TSan), no wallet]' env: >- FILE_ENV="./ci/test/00_setup_env_native_tsan.sh" + TEST_RUNNER_EXTRA="--exclude feature_block" # Not enough memory on travis machines # x86_64 Linux (no depends, only system libs, sanitizers: address/leak (ASan + LSan) + undefined (UBSan) + integer) - stage: test - name: 'x86_64 Linux [GOAL: install] [bionic] [no depends, only system libs, sanitizers: address/leak (ASan + LSan) + undefined (UBSan) + integer]' + name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, sanitizers: address/leak (ASan + LSan) + undefined (UBSan) + integer]' env: >- FILE_ENV="./ci/test/00_setup_env_native_asan.sh" - stage: test - name: 'x86_64 Linux [GOAL: install] [bionic] [no wallet]' + name: 'x86_64 Linux [GOAL: install] [focal] [no wallet]' env: >- FILE_ENV="./ci/test/00_setup_env_native_fuzz.sh" - stage: test - name: 'x86_64 Linux [GOAL: install] [bionic] [no depends, only system libs, fuzzers under valgrind]' + name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, fuzzers under valgrind]' env: >- FILE_ENV="./ci/test/00_setup_env_native_fuzz_with_valgrind.sh" - stage: test + name: 'x86_64 Linux [GOAL: install] [focal] [no wallet]' env: >- FILE_ENV="./ci/test/00_setup_env_native_nowallet.sh" @@ -273,11 +280,31 @@ after_success: FILE_ENV="./ci/test/00_setup_env_mac.sh" - stage: test - name: 'macOS 10.14 native [GOAL: install] [GUI: BIP70 enabled] [no depends]' + name: 'macOS 10.14 native [GOAL: install] [GUI] [no depends]' os: osx # Use the most recent version: - # Xcode 11, macOS 10.14, JDK 12.0.1 + # Xcode 11.3.1, macOS 10.14, SDK 10.15 # https://docs.travis-ci.com/user/reference/osx/#macos-version - osx_image: xcode11 + osx_image: xcode11.3 + cache: + directories: + - $TRAVIS_BUILD_DIR/ci/scratch/.ccache + - $TRAVIS_BUILD_DIR/releases/$HOST + - $HOME/Library/Caches/Homebrew + - /usr/local/Homebrew + addons: + homebrew: + packages: + - libtool + - berkeley-db4 + - boost + - miniupnpc + - qt + - qrencode + - python3 + - ccache + - zeromq env: >- + DANGER_RUN_CI_ON_HOST=true + CI_USE_APT_INSTALL=no FILE_ENV="./ci/test/00_setup_env_mac_host.sh" diff --git a/.tx/config b/.tx/config index f20b64d17eb20..7699aee5fffe6 100644 --- a/.tx/config +++ b/.tx/config @@ -1,7 +1,7 @@ [main] host = https://www.transifex.com -[dash.qt-translation-018x] +[dash.dash_ents] file_filter = src/qt/locale/dash_.ts source_file = src/qt/locale/dash_en.xlf source_lang = en diff --git a/CMakeLists.txt b/CMakeLists.txt index 2a31b734b7c04..863806b85754a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,6 +49,8 @@ add_definitions( -DENABLE_WALLET=1 ) +# find src/ -name '*.h' -or -name '*.cpp' | sed -r 's/[^/]*.(h|cpp)$/*.\1/' | sort | uniq | grep -Ev "/(bench/data|obj)/" > list.txt + file(GLOB SOURCE_FILES src/*.cpp src/*.h @@ -69,13 +71,20 @@ file(GLOB SOURCE_FILES src/evo/*.h src/governance/*.cpp src/governance/*.h + src/index/*.cpp + src/index/*.h + src/interfaces/*.cpp + src/interfaces/*.h src/leveldb/db/*.cc src/leveldb/db/*.h src/leveldb/include/*.h src/llmq/*.cpp src/llmq/*.h + src/logging/*.h src/masternode/*.cpp src/masternode/*.h + src/node/*.cpp + src/node/*.h src/policy/*.cpp src/policy/*.h src/primitives/*.cpp @@ -89,11 +98,20 @@ file(GLOB SOURCE_FILES src/script/*.cpp src/script/*.h src/secp256k1/include/*.h + src/support/allocators/*.h + src/support/*.cpp + src/support/*.h src/test/*.cpp src/test/*.h + src/test/fuzz/*.cpp + src/test/fuzz/*.h + src/test/util/*.cpp + src/test/util/*.h src/univalue/include/*.h src/univalue/lib/*.cpp src/univalue/lib/*.h + src/util/*.h + src/util/*.cpp src/wallet/*.cpp src/wallet/*.h src/wallet/test/*.cpp diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 29d3f169b2296..3a1f9123cdd02 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -89,7 +89,7 @@ include: Examples: feat(consensus): add new opcode for BIP-XXXX OP_CHECKAWESOMESIG - feat(net): automatically create hidden service, listen on Tor + feat(net): automatically create onion service, listen on Tor feat(qt): add feed bump button fix(log): fix typo in log message feat(rpc)!: modify gettransaction parameter type diff --git a/Makefile.am b/Makefile.am index 2c4f4f9330bcf..2ea6bb1f5245c 100644 --- a/Makefile.am +++ b/Makefile.am @@ -4,8 +4,8 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Pattern rule to print variables, e.g. make print-top_srcdir -print-%: - @echo $* = $($*) +print-%: FORCE + @echo '$*'='$($*)' ACLOCAL_AMFLAGS = -I build-aux/m4 SUBDIRS = src @@ -36,43 +36,37 @@ OSX_APP=Dash-Qt.app OSX_VOLNAME = $(subst $(space),-,$(PACKAGE_NAME)) OSX_DMG = $(OSX_VOLNAME).dmg OSX_TEMP_ISO = $(OSX_DMG:.dmg=).temp.iso -OSX_BACKGROUND_SVG=background.svg -OSX_BACKGROUND_IMAGE=background.tiff -OSX_BACKGROUND_IMAGE_DPIS=36 72 -OSX_DSSTORE_GEN=$(top_srcdir)/contrib/macdeploy/custom_dsstore.py +OSX_BACKGROUND_IMAGE=$(top_srcdir)/contrib/macdeploy/background.tiff OSX_DEPLOY_SCRIPT=$(top_srcdir)/contrib/macdeploy/macdeployqtplus -OSX_FANCY_PLIST=$(top_srcdir)/contrib/macdeploy/fancy.plist OSX_INSTALLER_ICONS=$(top_srcdir)/src/qt/res/icons/dash.icns OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed -OSX_QT_TRANSLATIONS = ar,bg,ca,cs,da,de,es,fa,fi,fr,gd,gl,he,hu,it,ja,ko,lt,lv,pl,pt,ru,sk,sl,sv,uk,zh_CN,zh_TW DIST_CONTRIB = \ $(top_srcdir)/contrib/debian/copyright \ - $(top_srcdir)/contrib/linearize/linearize-data.py \ - $(top_srcdir)/contrib/linearize/linearize-hashes.py + $(top_srcdir)/contrib/install_db4.sh \ + $(top_srcdir)/contrib/linearize/linearize-data.py \ + $(top_srcdir)/contrib/linearize/linearize-hashes.py DIST_SHARE = \ $(top_srcdir)/share/genbuild.sh \ $(top_srcdir)/share/rpcauth BIN_CHECKS=$(top_srcdir)/contrib/devtools/symbol-check.py \ $(top_srcdir)/contrib/devtools/security-check.py \ - $(top_srcdir)/contrib/devtools/pixie.py + $(top_srcdir)/contrib/devtools/utils.py WINDOWS_PACKAGING = $(top_srcdir)/share/pixmaps/dash.ico \ $(top_srcdir)/share/pixmaps/nsis-header.bmp \ $(top_srcdir)/share/pixmaps/nsis-wizard.bmp \ $(top_srcdir)/doc/README_windows.txt -OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_FANCY_PLIST) $(OSX_INSTALLER_ICONS) \ - $(top_srcdir)/contrib/macdeploy/$(OSX_BACKGROUND_SVG) \ - $(OSX_DSSTORE_GEN) \ +OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_INSTALLER_ICONS) \ $(top_srcdir)/contrib/macdeploy/detached-sig-apply.sh \ $(top_srcdir)/contrib/macdeploy/detached-sig-create.sh COVERAGE_INFO = baseline.info \ test_dash_filtered.info total_coverage.info \ baseline_filtered.info functional_test.info functional_test_filtered.info \ - test_dash_coverage.info test_dash.info + test_dash_coverage.info test_dash.info fuzz.info fuzz_coverage.info dist-hook: -$(GIT) archive --format=tar HEAD -- src/clientversion.cpp | $(AMTAR) -C $(top_distdir) -xf - @@ -125,20 +119,13 @@ osx_volname: echo $(OSX_VOLNAME) >$@ if BUILD_DARWIN -$(OSX_DMG): $(OSX_APP_BUILT) $(OSX_PACKAGING) $(OSX_BACKGROUND_IMAGE) - $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) -add-qt-tr $(OSX_QT_TRANSLATIONS) -translations-dir=$(QT_TRANSLATION_DIR) -dmg -fancy $(OSX_FANCY_PLIST) -verbose 2 -volname $(OSX_VOLNAME) - -$(OSX_BACKGROUND_IMAGE).png: contrib/macdeploy/$(OSX_BACKGROUND_SVG) - sed 's/PACKAGE_NAME/$(PACKAGE_NAME)/' < "$<" | $(RSVG_CONVERT) -f png -d 36 -p 36 -o $@ -$(OSX_BACKGROUND_IMAGE)@2x.png: contrib/macdeploy/$(OSX_BACKGROUND_SVG) - sed 's/PACKAGE_NAME/$(PACKAGE_NAME)/' < "$<" | $(RSVG_CONVERT) -f png -d 72 -p 72 -o $@ -$(OSX_BACKGROUND_IMAGE): $(OSX_BACKGROUND_IMAGE).png $(OSX_BACKGROUND_IMAGE)@2x.png - tiffutil -cathidpicheck $^ -out $@ +$(OSX_DMG): $(OSX_APP_BUILT) $(OSX_PACKAGING) + $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR) -dmg deploydir: $(OSX_DMG) else !BUILD_DARWIN APP_DIST_DIR=$(top_builddir)/dist -APP_DIST_EXTRAS=$(APP_DIST_DIR)/.background/$(OSX_BACKGROUND_IMAGE) $(APP_DIST_DIR)/.DS_Store $(APP_DIST_DIR)/Applications +APP_DIST_EXTRAS=$(APP_DIST_DIR)/.background/background.tiff $(APP_DIST_DIR)/.DS_Store $(APP_DIST_DIR)/Applications $(APP_DIST_DIR)/Applications: @rm -f $@ @@ -147,23 +134,17 @@ $(APP_DIST_DIR)/Applications: $(APP_DIST_EXTRAS): $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Dash-Qt $(OSX_TEMP_ISO): $(APP_DIST_EXTRAS) - $(XORRISOFS) -D -l -V "$(OSX_VOLNAME)" -no-pad -r -dir-mode 0755 -o $@ dist + $(XORRISOFS) -D -l -V "$(OSX_VOLNAME)" -no-pad -r -dir-mode 0755 -o $@ $(APP_DIST_DIR) -- $(if $(SOURCE_DATE_EPOCH),-volume_date all_file_dates =$(SOURCE_DATE_EPOCH)) $(OSX_DMG): $(OSX_TEMP_ISO) $(DMG) dmg "$<" "$@" -dpi%.$(OSX_BACKGROUND_IMAGE): contrib/macdeploy/$(OSX_BACKGROUND_SVG) - sed 's/PACKAGE_NAME/$(PACKAGE_NAME)/' < "$<" | $(RSVG_CONVERT) -f png -d $* -p $* | $(IMAGEMAGICK_CONVERT) - $@ -OSX_BACKGROUND_IMAGE_DPIFILES := $(foreach dpi,$(OSX_BACKGROUND_IMAGE_DPIS),dpi$(dpi).$(OSX_BACKGROUND_IMAGE)) -$(APP_DIST_DIR)/.background/$(OSX_BACKGROUND_IMAGE): $(OSX_BACKGROUND_IMAGE_DPIFILES) +$(APP_DIST_DIR)/.background/background.tiff: $(MKDIR_P) $(@D) - $(TIFFCP) -c none $(OSX_BACKGROUND_IMAGE_DPIFILES) $@ - -$(APP_DIST_DIR)/.DS_Store: $(OSX_DSSTORE_GEN) - $(PYTHON) $< "$@" "$(OSX_VOLNAME)" + cp $(OSX_BACKGROUND_IMAGE) $@ $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Dash-Qt: $(OSX_APP_BUILT) $(OSX_PACKAGING) - INSTALLNAMETOOL=$(INSTALLNAMETOOL) OTOOL=$(OTOOL) STRIP=$(STRIP) $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) -translations-dir=$(QT_TRANSLATION_DIR) -add-qt-tr $(OSX_QT_TRANSLATIONS) -verbose 2 + INSTALLNAMETOOL=$(INSTALLNAMETOOL) OTOOL=$(OTOOL) STRIP=$(STRIP) $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR) deploydir: $(APP_DIST_EXTRAS) endif !BUILD_DARWIN @@ -210,6 +191,15 @@ baseline_filtered.info: baseline.info $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@ $(LCOV) -a $@ $(LCOV_OPTS) -o $@ +fuzz.info: baseline_filtered.info + @TIMEOUT=15 test/fuzz/test_runner.py qa-assets/fuzz_seed_corpus -l DEBUG + $(LCOV) -c $(LCOV_OPTS) -d $(abs_builddir)/src --t fuzz-tests -o $@ + $(LCOV) -z $(LCOV_OPTS) -d $(abs_builddir)/src + +fuzz_filtered.info: fuzz.info + $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@ + $(LCOV) -a $@ $(LCOV_OPTS) -o $@ + test_dash.info: baseline_filtered.info $(MAKE) -C src/ check $(LCOV) -c $(LCOV_OPTS) -d $(abs_builddir)/src -t test_dash -o $@ @@ -228,12 +218,19 @@ functional_test_filtered.info: functional_test.info $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@ $(LCOV) -a $@ $(LCOV_OPTS) -o $@ +fuzz_coverage.info: fuzz_filtered.info + $(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a fuzz_filtered.info -o $@ | $(GREP) "\%" | $(AWK) '{ print substr($$3,2,50) "/" $$5 }' > coverage_percent.txt + test_dash_coverage.info: baseline_filtered.info test_dash_filtered.info $(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a test_dash_filtered.info -o $@ total_coverage.info: test_dash_filtered.info functional_test_filtered.info $(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a test_dash_filtered.info -a functional_test_filtered.info -o $@ | $(GREP) "\%" | $(AWK) '{ print substr($$3,2,50) "/" $$5 }' > coverage_percent.txt +fuzz.coverage/.dirstamp: fuzz_coverage.info + $(GENHTML) -s $(LCOV_OPTS) $< -o $(@D) + @touch $@ + test_dash.coverage/.dirstamp: test_dash_coverage.info $(GENHTML) -s $(LCOV_OPTS) $< -o $(@D) @touch $@ @@ -242,6 +239,8 @@ total.coverage/.dirstamp: total_coverage.info $(GENHTML) -s $(LCOV_OPTS) $< -o $(@D) @touch $@ +cov_fuzz: fuzz.coverage/.dirstamp + cov: test_dash.coverage/.dirstamp total.coverage/.dirstamp endif @@ -317,14 +316,20 @@ clean-docs: rm -rf doc/doxygen clean-local: clean-docs - rm -rf coverage_percent.txt test_dash.coverage/ total.coverage/ test/tmp/ cache/ $(OSX_APP) + rm -rf coverage_percent.txt test_dash.coverage/ total.coverage/ fuzz.coverage/ test/tmp/ cache/ $(OSX_APP) rm -rf test/functional/__pycache__ test/functional/test_framework/__pycache__ test/cache share/rpcauth/__pycache__ - rm -rf osx_volname dist/ dpi36.background.tiff dpi72.background.tiff + rm -rf osx_volname dist/ test-security-check: if TARGET_DARWIN - $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO +endif +if TARGET_WINDOWS + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE endif if TARGET_LINUX - $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF endif diff --git a/build-aux/m4/ax_pthread.m4 b/build-aux/m4/ax_pthread.m4 index 1598d077ff020..9f35d139149f8 100644 --- a/build-aux/m4/ax_pthread.m4 +++ b/build-aux/m4/ax_pthread.m4 @@ -14,20 +14,24 @@ # flags that are needed. (The user can also force certain compiler # flags/libs to be tested by setting these environment variables.) # -# Also sets PTHREAD_CC to any special C compiler that is needed for -# multi-threaded programs (defaults to the value of CC otherwise). (This -# is necessary on AIX to use the special cc_r compiler alias.) +# Also sets PTHREAD_CC and PTHREAD_CXX to any special C compiler that is +# needed for multi-threaded programs (defaults to the value of CC +# respectively CXX otherwise). (This is necessary on e.g. AIX to use the +# special cc_r/CC_r compiler alias.) # # NOTE: You are assumed to not only compile your program with these flags, # but also to link with them as well. For example, you might link with # $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS +# $PTHREAD_CXX $CXXFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS # # If you are only building threaded programs, you may wish to use these # variables in your default LIBS, CFLAGS, and CC: # # LIBS="$PTHREAD_LIBS $LIBS" # CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +# CXXFLAGS="$CXXFLAGS $PTHREAD_CFLAGS" # CC="$PTHREAD_CC" +# CXX="$PTHREAD_CXX" # # In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant # has a nonstandard name, this macro defines PTHREAD_CREATE_JOINABLE to @@ -83,7 +87,7 @@ # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. -#serial 27 +#serial 31 AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD]) AC_DEFUN([AX_PTHREAD], [ @@ -105,6 +109,7 @@ if test "x$PTHREAD_CFLAGS$PTHREAD_LIBS" != "x"; then ax_pthread_save_CFLAGS="$CFLAGS" ax_pthread_save_LIBS="$LIBS" AS_IF([test "x$PTHREAD_CC" != "x"], [CC="$PTHREAD_CC"]) + AS_IF([test "x$PTHREAD_CXX" != "x"], [CXX="$PTHREAD_CXX"]) CFLAGS="$CFLAGS $PTHREAD_CFLAGS" LIBS="$PTHREAD_LIBS $LIBS" AC_MSG_CHECKING([for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS]) @@ -386,7 +391,7 @@ if test "x$ax_pthread_clang" = "xyes"; then # step ax_pthread_save_ac_link="$ac_link" ax_pthread_sed='s/conftest\.\$ac_ext/conftest.$ac_objext/g' - ax_pthread_link_step=`$as_echo "$ac_link" | sed "$ax_pthread_sed"` + ax_pthread_link_step=`AS_ECHO(["$ac_link"]) | sed "$ax_pthread_sed"` ax_pthread_2step_ac_link="($ac_compile) && (echo ==== >&5) && ($ax_pthread_link_step)" ax_pthread_save_CFLAGS="$CFLAGS" for ax_pthread_try in '' -Qunused-arguments -Wno-unused-command-line-argument unknown; do @@ -482,18 +487,28 @@ if test "x$ax_pthread_ok" = "xyes"; then [#handle absolute path differently from PATH based program lookup AS_CASE(["x$CC"], [x/*], - [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])], - [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])]) + [ + AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"]) + AS_IF([test "x${CXX}" != "x"], [AS_IF([AS_EXECUTABLE_P([${CXX}_r])],[PTHREAD_CXX="${CXX}_r"])]) + ], + [ + AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC]) + AS_IF([test "x${CXX}" != "x"], [AC_CHECK_PROGS([PTHREAD_CXX],[${CXX}_r],[$CXX])]) + ] + ) + ]) ;; esac fi fi test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" +test -n "$PTHREAD_CXX" || PTHREAD_CXX="$CXX" AC_SUBST([PTHREAD_LIBS]) AC_SUBST([PTHREAD_CFLAGS]) AC_SUBST([PTHREAD_CC]) +AC_SUBST([PTHREAD_CXX]) # Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: if test "x$ax_pthread_ok" = "xyes"; then diff --git a/build-aux/m4/bitcoin_find_bdb48.m4 b/build-aux/m4/bitcoin_find_bdb48.m4 index 5fc5b493d36a0..3d6c8210ede2e 100644 --- a/build-aux/m4/bitcoin_find_bdb48.m4 +++ b/build-aux/m4/bitcoin_find_bdb48.m4 @@ -48,15 +48,22 @@ AC_DEFUN([BITCOIN_FIND_BDB48],[ if test "x$bdbpath" = "xX"; then use_bdb=no AC_MSG_RESULT([no]) - AC_MSG_ERROR([libdb_cxx headers missing, ]AC_PACKAGE_NAME[ requires this library for BDB wallet support (--without-bdb to disable BDB wallet support)]) + AC_MSG_WARN([libdb_cxx headers missing]) + AC_MSG_WARN(AC_PACKAGE_NAME[ requires this library for BDB (legacy) wallet support]) + AC_MSG_WARN([Passing --without-bdb will suppress this warning]) elif test "x$bdb48path" = "xX"; then BITCOIN_SUBDIR_TO_INCLUDE(BDB_CPPFLAGS,[${bdbpath}],db_cxx) AC_ARG_WITH([incompatible-bdb],[AS_HELP_STRING([--with-incompatible-bdb], [allow using a bdb version other than 4.8])],[ - AC_MSG_WARN([Found Berkeley DB other than 4.8; BDB wallets opened by this build will not be portable!]) + AC_MSG_WARN([Found Berkeley DB other than 4.8]) + AC_MSG_WARN([BDB (legacy) wallets opened by this build will not be portable!]) + use_bdb=yes ],[ - AC_MSG_ERROR([Found Berkeley DB other than 4.8, required for portable BDB wallets (--with-incompatible-bdb to ignore or --without-bdb to disable BDB wallet support)]) + AC_MSG_WARN([Found Berkeley DB other than 4.8]) + AC_MSG_WARN([BDB (legacy) wallets opened by this build would not be portable!]) + AC_MSG_WARN([If this is intended, pass --with-incompatible-bdb]) + AC_MSG_WARN([Passing --without-bdb will suppress this warning]) + use_bdb=no ]) - use_bdb=yes else BITCOIN_SUBDIR_TO_INCLUDE(BDB_CPPFLAGS,[${bdb48path}],db_cxx) bdbpath="${bdb48path}" @@ -78,7 +85,9 @@ AC_DEFUN([BITCOIN_FIND_BDB48],[ ]) done if test "x$BDB_LIBS" = "x"; then - AC_MSG_ERROR([libdb_cxx missing, ]AC_PACKAGE_NAME[ requires this library for BDB wallet support (--without-bdb to disable BDB wallet support)]) + AC_MSG_WARN([libdb_cxx headers missing]) + AC_MSG_WARN(AC_PACKAGE_NAME[ requires this library for BDB (legacy) wallet support]) + AC_MSG_WARN([Passing --without-bdb will suppress this warning]) fi fi if test "x$use_bdb" != "xno"; then diff --git a/ci/dash/build-docker.sh b/ci/dash/build-docker.sh index fd541928e85d2..053f28e6eb2bd 100755 --- a/ci/dash/build-docker.sh +++ b/ci/dash/build-docker.sh @@ -12,17 +12,14 @@ DOCKER_IMAGE=${DOCKER_IMAGE:-dashpay/dashd-develop} DOCKER_TAG=${DOCKER_TAG:-latest} DOCKER_RELATIVE_PATH=contrib/containers/deploy -BASE_BUILD_DIR=${BASE_BUILD_DIR:-.} - - if [ -d $DOCKER_RELATIVE_PATH/bin ]; then rm $DOCKER_RELATIVE_PATH/bin/* fi mkdir $DOCKER_RELATIVE_PATH/bin -cp "$BASE_BUILD_DIR"/src/dashd $DOCKER_RELATIVE_PATH/bin/ -cp "$BASE_BUILD_DIR"/src/dash-cli $DOCKER_RELATIVE_PATH/bin/ -cp "$BASE_BUILD_DIR"/src/dash-tx $DOCKER_RELATIVE_PATH/bin/ +cp "$BASE_ROOT_DIR"/src/dashd $DOCKER_RELATIVE_PATH/bin/ +cp "$BASE_ROOT_DIR"/src/dash-cli $DOCKER_RELATIVE_PATH/bin/ +cp "$BASE_ROOT_DIR"/src/dash-tx $DOCKER_RELATIVE_PATH/bin/ strip $DOCKER_RELATIVE_PATH/bin/dashd strip $DOCKER_RELATIVE_PATH/bin/dash-cli strip $DOCKER_RELATIVE_PATH/bin/dash-tx diff --git a/ci/dash/build_depends.sh b/ci/dash/build_depends.sh index cefd55047c74e..e25f55df41d29 100755 --- a/ci/dash/build_depends.sh +++ b/ci/dash/build_depends.sh @@ -17,19 +17,19 @@ unset DISPLAY mkdir -p $CACHE_DIR/depends mkdir -p $CACHE_DIR/sdk-sources -ln -s $CACHE_DIR/depends depends/built -ln -s $CACHE_DIR/sdk-sources depends/sdk-sources +ln -s $CACHE_DIR/depends ${DEPENDS_DIR}/built +ln -s $CACHE_DIR/sdk-sources ${DEPENDS_DIR}/sdk-sources -mkdir -p depends/SDKs +mkdir -p ${DEPENDS_DIR}/SDKs if [ -n "$XCODE_VERSION" ]; then OSX_SDK_BASENAME="Xcode-${XCODE_VERSION}-${XCODE_BUILD_ID}-extracted-SDK-with-libcxx-headers.tar.gz" - OSX_SDK_PATH="depends/sdk-sources/${OSX_SDK_BASENAME}" + OSX_SDK_PATH="${DEPENDS_DIR}/sdk-sources/${OSX_SDK_BASENAME}" if [ ! -f "$OSX_SDK_PATH" ]; then curl --location --fail "${SDK_URL}/${OSX_SDK_BASENAME}" -o "$OSX_SDK_PATH" fi if [ -f "$OSX_SDK_PATH" ]; then - tar -C depends/SDKs -xf "$OSX_SDK_PATH" + tar -C ${DEPENDS_DIR}/SDKs -xf "$OSX_SDK_PATH" fi fi diff --git a/ci/dash/build_src.sh b/ci/dash/build_src.sh index d2af35179fb30..a0bd392ea2235 100755 --- a/ci/dash/build_src.sh +++ b/ci/dash/build_src.sh @@ -40,7 +40,7 @@ if [ -n "$CONFIG_SHELL" ]; then export CONFIG_SHELL="$CONFIG_SHELL" fi -BITCOIN_CONFIG_ALL="--disable-dependency-tracking --prefix=$BASE_BUILD_DIR/depends/$HOST --bindir=$BASE_OUTDIR/bin --libdir=$BASE_OUTDIR/lib" +BITCOIN_CONFIG_ALL="--disable-dependency-tracking --prefix=$DEPENDS_DIR/$HOST --bindir=$BASE_OUTDIR/bin --libdir=$BASE_OUTDIR/lib" ( test -n "$CONFIG_SHELL" && eval '"$CONFIG_SHELL" -c "./autogen.sh"' ) || ./autogen.sh @@ -55,4 +55,16 @@ cd dashcore-$BUILD_TARGET ./configure --cache-file=../config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false) make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && make $GOAL V=1 ; false ) -make $MAKEJOBS -C src check-symbols + +if [ -n "$USE_VALGRIND" ]; then + echo "valgrind in USE!" + ${BASE_ROOT_DIR}/ci/test/wrap-valgrind.sh +fi + +if [ "$RUN_SECURITY_TESTS" = "true" ]; then + make test-security-check +fi + +if [ "$RUN_SYMBOL_TESTS" = "true" ]; then + make $MAKEJOBS -C src check-symbols +fi diff --git a/ci/dash/matrix.sh b/ci/dash/matrix.sh index 6b21a0f5d891e..26dcba42efc32 100755 --- a/ci/dash/matrix.sh +++ b/ci/dash/matrix.sh @@ -11,6 +11,8 @@ export LC_ALL=C.UTF-8 source ./ci/test/00_setup_env.sh # Configure sanitizers options +export ASAN_OPTIONS="" +export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/lsan" export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan" export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan" @@ -18,14 +20,14 @@ if [ "$BUILD_TARGET" = "arm-linux" ]; then source ./ci/test/00_setup_env_arm.sh elif [ "$BUILD_TARGET" = "win64" ]; then source ./ci/test/00_setup_env_win64.sh -elif [ "$BUILD_TARGET" = "linux32" ]; then - source ./ci/test/00_setup_env_i686.sh -elif [ "$BUILD_TARGET" = "linux32_ubsan" ]; then - source ./ci/test/00_setup_env_i686_ubsan.sh elif [ "$BUILD_TARGET" = "linux64" ]; then source ./ci/test/00_setup_env_native_qt5.sh +elif [ "$BUILD_TARGET" = "linux64_asan" ]; then + source ./ci/test/00_setup_env_native_asan.sh elif [ "$BUILD_TARGET" = "linux64_tsan" ]; then source ./ci/test/00_setup_env_native_tsan.sh +elif [ "$BUILD_TARGET" = "linux64_ubsan" ]; then + source ./ci/test/00_setup_env_native_ubsan.sh elif [ "$BUILD_TARGET" = "linux64_fuzz" ]; then source ./ci/test/00_setup_env_native_fuzz.sh elif [ "$BUILD_TARGET" = "linux64_cxx20" ]; then @@ -34,6 +36,10 @@ elif [ "$BUILD_TARGET" = "linux64_sqlite" ]; then source ./ci/test/00_setup_env_native_sqlite.sh elif [ "$BUILD_TARGET" = "linux64_nowallet" ]; then source ./ci/test/00_setup_env_native_nowallet.sh +elif [ "$BUILD_TARGET" = "linux64_valgrind" ]; then + source ./ci/test/00_setup_env_native_valgrind.sh elif [ "$BUILD_TARGET" = "mac" ]; then source ./ci/test/00_setup_env_mac.sh +elif [ "$BUILD_TARGET" = "s390x" ]; then + source ./ci/test/00_setup_env_s390x.sh fi diff --git a/ci/dash/test_integrationtests.sh b/ci/dash/test_integrationtests.sh index fb45f14bd9799..de5d8a19c28c4 100755 --- a/ci/dash/test_integrationtests.sh +++ b/ci/dash/test_integrationtests.sh @@ -18,7 +18,7 @@ if [ "$RUN_INTEGRATION_TESTS" != "true" ]; then exit 0 fi -export LD_LIBRARY_PATH=$BASE_BUILD_DIR/depends/$HOST/lib +export LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib cd build-ci/dashcore-$BUILD_TARGET @@ -37,7 +37,7 @@ echo "Using socketevents mode: $SOCKETEVENTS" EXTRA_ARGS="--dashd-arg=-socketevents=$SOCKETEVENTS" set +e -./test/functional/test_runner.py --ci --combinedlogslen=4000 ${TEST_RUNNER_EXTRA} --failfast --nocleanup --tmpdir=$(pwd)/testdatadirs $PASS_ARGS $EXTRA_ARGS +./test/functional/test_runner.py --ci --attempts=3 --ansi --combinedlogslen=4000 ${TEST_RUNNER_EXTRA} --failfast --nocleanup --tmpdir=$(pwd)/testdatadirs $PASS_ARGS $EXTRA_ARGS RESULT=$? set -e diff --git a/ci/dash/test_unittests.sh b/ci/dash/test_unittests.sh index 595effffaf1ed..0b7bbf46db202 100755 --- a/ci/dash/test_unittests.sh +++ b/ci/dash/test_unittests.sh @@ -11,13 +11,13 @@ set -e source ./ci/dash/matrix.sh -if [ "$RUN_UNIT_TESTS" != "true" ]; then +if [ "$RUN_UNIT_TESTS" != "true" ] && [ "$RUN_UNIT_TESTS_SEQUENTIAL" != "true" ]; then echo "Skipping unit tests" exit 0 fi export BOOST_TEST_RANDOM=${BOOST_TEST_RANDOM:-1} -export LD_LIBRARY_PATH=$BASE_BUILD_DIR/depends/$HOST/lib +export LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib export WINEDEBUG=fixme-all export BOOST_TEST_LOG_LEVEL=test_suite @@ -29,5 +29,9 @@ if [ "$DIRECT_WINE_EXEC_TESTS" = "true" ]; then # Inside Docker, binfmt isn't working so we can't trust in make invoking windows binaries correctly wine ./src/test/test_dash.exe else - make $MAKEJOBS check VERBOSE=1 + if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then + ./src/test/test_dash --catch_system_errors=no -l test_suite + else + make $MAKEJOBS check VERBOSE=1 + fi fi diff --git a/ci/extended_lint/04_install.sh b/ci/extended_lint/04_install.sh deleted file mode 100755 index 123d874a84da4..0000000000000 --- a/ci/extended_lint/04_install.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C - -CPPCHECK_VERSION=1.86 -curl -s https://codeload.github.com/danmar/cppcheck/tar.gz/${CPPCHECK_VERSION} | tar -zxf - --directory /tmp/ -(cd /tmp/cppcheck-${CPPCHECK_VERSION}/ && make CFGDIR=/tmp/cppcheck-${CPPCHECK_VERSION}/cfg/ > /dev/null) -export PATH="$PATH:/tmp/cppcheck-${CPPCHECK_VERSION}/" diff --git a/ci/extended_lint/06_script.sh b/ci/extended_lint/06_script.sh deleted file mode 100755 index e8228c9c4d912..0000000000000 --- a/ci/extended_lint/06_script.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C - -test/lint/extended-lint-all.sh diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh index 0687b981f2a51..c7dea599dcd7e 100755 --- a/ci/lint/06_script.sh +++ b/ci/lint/06_script.sh @@ -20,6 +20,6 @@ test/lint/lint-all.sh if [ "$TRAVIS_REPO_SLUG" = "bitcoin/bitcoin" ] && [ "$TRAVIS_EVENT_TYPE" = "cron" ]; then git log --merges --before="2 days ago" -1 --format='%H' > ./contrib/verify-commits/trusted-sha512-root-commit - while read -r LINE; do travis_retry gpg --keyserver hkp://subset.pool.sks-keyservers.net --recv-keys $LINE; done < contrib/verify-commits/trusted-keys && + travis_retry gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys $( \$HOME/.dashcore fi -DOCKER_EXEC mkdir -p depends/SDKs depends/sdk-sources +DOCKER_EXEC mkdir -p ${DEPENDS_DIR}/SDKs ${DEPENDS_DIR}/sdk-sources if [ -n "$XCODE_VERSION" ] && [ ! -f "$OSX_SDK_PATH" ]; then DOCKER_EXEC curl --location --fail "${SDK_URL}/${OSX_SDK_BASENAME}" -o "$OSX_SDK_PATH" fi if [ -n "$XCODE_VERSION" ] && [ -f "$OSX_SDK_PATH" ]; then - DOCKER_EXEC tar -C "depends/SDKs" -xf "$OSX_SDK_PATH" + DOCKER_EXEC tar -C "${DEPENDS_DIR}/SDKs" -xf "$OSX_SDK_PATH" fi if [[ $HOST = *-mingw32 ]]; then DOCKER_EXEC update-alternatives --set $HOST-g++ \$\(which $HOST-g++-posix\) fi if [ -z "$NO_DEPENDS" ]; then - DOCKER_EXEC CONFIG_SHELL= make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS + if [[ $DOCKER_NAME_TAG == centos* ]]; then + # CentOS has problems building the depends if the config shell is not explicitely set + # (i.e. for libevent a Makefile with an empty SHELL variable is generated, leading to + # an error as the first command is executed) + SHELL_OPTS="CONFIG_SHELL=/bin/bash" + else + SHELL_OPTS="CONFIG_SHELL=" + fi + DOCKER_EXEC $SHELL_OPTS make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS fi diff --git a/ci/test/wrap-valgrind.sh b/ci/test/wrap-valgrind.sh new file mode 100755 index 0000000000000..d2192061dbc90 --- /dev/null +++ b/ci/test/wrap-valgrind.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +for b_name in "${BASE_OUTDIR}/bin"/*; do + # shellcheck disable=SC2044 + for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name $(basename $b_name)); do + echo "Wrap $b ..." + mv "$b" "${b}_orig" + echo '#!/usr/bin/env bash' > "$b" + echo "valgrind --gen-suppressions=all --quiet --error-exitcode=1 --suppressions=${BASE_ROOT_DIR}/contrib/valgrind.supp \"${b}_orig\" \"\$@\"" >> "$b" + chmod +x "$b" + done +done diff --git a/configure.ac b/configure.ac index eb6e4d9ddbeaf..4a326c3f2b231 100644 --- a/configure.ac +++ b/configure.ac @@ -1,9 +1,9 @@ AC_PREREQ([2.69]) -define(_CLIENT_VERSION_MAJOR, 19) -define(_CLIENT_VERSION_MINOR, 2) +define(_CLIENT_VERSION_MAJOR, 20) +define(_CLIENT_VERSION_MINOR, 0) define(_CLIENT_VERSION_BUILD, 0) define(_CLIENT_VERSION_RC, 0) -define(_CLIENT_VERSION_IS_RELEASE, true) +define(_CLIENT_VERSION_IS_RELEASE, false) define(_COPYRIGHT_YEAR, 2023) define(_COPYRIGHT_HOLDERS,[The %s developers]) define(_COPYRIGHT_HOLDERS_SUBSTITUTION,[[Dash Core]]) @@ -20,6 +20,7 @@ BITCOIN_TX_NAME=dash-tx BITCOIN_WALLET_TOOL_NAME=dash-wallet dnl Unless the user specified ARFLAGS, force it to be cr +dnl This is also the default as-of libtool 2.4.7 AC_ARG_VAR(ARFLAGS, [Flags for the archiver, defaults to if not set]) if test "x${ARFLAGS+set}" != "xset"; then ARFLAGS="cr" @@ -99,14 +100,13 @@ AC_PATH_TOOL(RANLIB, ranlib) AC_PATH_TOOL(STRIP, strip) AC_PATH_TOOL(GCOV, gcov) AC_PATH_PROG(LCOV, lcov) -dnl Python 3.5 is specified in .python-version and should be used if available, see doc/dependencies.md -AC_PATH_PROGS([PYTHON], [python3.5 python3.6 python3.7 python3.8 python3 python]) +dnl Python 3.8 is specified in .python-version and should be used if available, see doc/dependencies.md +AC_PATH_PROGS([PYTHON], [python3.8 python3.9 python3.10 python3.11 python3.12 python3 python]) AC_PATH_PROG(GENHTML, genhtml) AC_PATH_PROG([GIT], [git]) AC_PATH_PROG(CCACHE,ccache) AC_PATH_PROG(XGETTEXT,xgettext) AC_PATH_PROG(HEXDUMP,hexdump) -AC_PATH_TOOL(CPPFILT, c++filt) AC_PATH_TOOL(OBJCOPY, objcopy) AC_PATH_TOOL(DSYMUTIL, dsymutil) AC_PATH_PROG(DOXYGEN, doxygen) @@ -435,16 +435,18 @@ if test "x$enable_werror" = "xyes"; then AX_CHECK_COMPILE_FLAG([-Werror=suggest-override],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=suggest-override"],,[[$CXXFLAG_WERROR]], [AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])]) AX_CHECK_COMPILE_FLAG([-Werror=unreachable-code-loop-increment],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unreachable-code-loop-increment"],,[[$CXXFLAG_WERROR]]) + AX_CHECK_COMPILE_FLAG([-Werror=mismatched-tags], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=mismatched-tags"], [], [$CXXFLAG_WERROR]) fi if test "x$CXXFLAGS_overridden" = "xno"; then AX_CHECK_COMPILE_FLAG([-Wall],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wall"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wextra],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wextra"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wformat],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat"],,[[$CXXFLAG_WERROR]]) + AX_CHECK_COMPILE_FLAG([-Wgnu],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wgnu"],,[[$CXXFLAG_WERROR]]) + dnl some compilers will ignore -Wformat-security without -Wformat, so just combine the two here. + AX_CHECK_COMPILE_FLAG([-Wformat -Wformat-security],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat -Wformat-security"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wvla],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wvla"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wshadow-field],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wshadow-field"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wswitch],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wswitch"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wformat-security],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat-security"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wthread-safety],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wthread-safety"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wrange-loop-analysis],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wrange-loop-analysis"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wredundant-decls],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wredundant-decls"],,[[$CXXFLAG_WERROR]]) @@ -692,7 +694,6 @@ case $host in TARGET_OS=darwin if test x$cross_compiling != xyes; then BUILD_OS=darwin - AC_PATH_PROGS([RSVG_CONVERT], [rsvg-convert rsvg],rsvg-convert) AC_CHECK_PROG([BREW],brew, brew) if test x$BREW = xbrew; then dnl These Homebrew packages may be keg-only, meaning that they won't be found @@ -757,9 +758,6 @@ case $host in AC_PATH_TOOL([OTOOL], [otool], otool) AC_PATH_PROGS([XORRISOFS], [xorrisofs], xorrisofs) AC_PATH_PROGS([DMG], [dmg], dmg) - AC_PATH_PROGS([RSVG_CONVERT], [rsvg-convert rsvg],rsvg-convert) - AC_PATH_PROGS([IMAGEMAGICK_CONVERT], [convert],convert) - AC_PATH_PROGS([TIFFCP], [tiffcp],tiffcp) dnl libtool will try to strip the static lib, which is a problem for dnl cross-builds because strip attempts to call a hard-coded ld, @@ -777,6 +775,18 @@ case $host in *android*) dnl make sure android stays above linux for hosts like *linux-android* TARGET_OS=android + case $host in + *x86_64*) + ANDROID_ARCH=x86_64 + ;; + *aarch64*) + ANDROID_ARCH=arm64-v8a + ;; + *armv7a*) + ANDROID_ARCH=armeabi-v7a + ;; + *) AC_MSG_ERROR("Could not determine Android arch") ;; + esac ;; *linux*) TARGET_OS=linux @@ -815,7 +825,6 @@ if test x$use_lcov = xyes; then [AC_MSG_ERROR("lcov testing requested but --coverage linker flag does not work")]) AX_CHECK_COMPILE_FLAG([--coverage],[CXXFLAGS="$CXXFLAGS --coverage"], [AC_MSG_ERROR("lcov testing requested but --coverage flag does not work")]) - AC_DEFINE(USE_COVERAGE, 1, [Define this symbol if coverage is enabled]) CXXFLAGS="$CXXFLAGS -Og" fi @@ -855,6 +864,11 @@ AX_GCC_FUNC_ATTRIBUTE([dllimport]) if test x$use_glibc_compat != xno; then AX_CHECK_LINK_FLAG([[-Wl,--wrap=__divmoddi4]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=__divmoddi4"]) AX_CHECK_LINK_FLAG([[-Wl,--wrap=log2f]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=log2f"]) + case $host in + powerpc64* | ppc64*) + AX_CHECK_LINK_FLAG([[-Wl,--no-tls-get-addr-optimize]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--no-tls-get-addr-optimize"]) + ;; + esac AX_CHECK_LINK_FLAG([[-Wl,--wrap=log2]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=log2"]) AX_CHECK_LINK_FLAG([[-Wl,--wrap=log]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=log"]) AX_CHECK_LINK_FLAG([[-Wl,--wrap=pow]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=pow"]) @@ -924,6 +938,7 @@ if test x$use_hardening != xno; then ]) fi + AX_CHECK_LINK_FLAG([[-Wl,--enable-reloc-section]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--enable-reloc-section"],, [[$LDFLAG_WERROR]]) AX_CHECK_LINK_FLAG([[-Wl,--dynamicbase]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--dynamicbase"]) AX_CHECK_LINK_FLAG([[-Wl,--nxcompat]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--nxcompat"]) AX_CHECK_LINK_FLAG([[-Wl,--high-entropy-va]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--high-entropy-va"]) @@ -995,6 +1010,22 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [ AC_MSG_RESULT(no)] ) +dnl Check for posix_fallocate +AC_MSG_CHECKING(for posix_fallocate) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + // same as in src/util/system.cpp + #ifdef __linux__ + #ifdef _POSIX_C_SOURCE + #undef _POSIX_C_SOURCE + #endif + #define _POSIX_C_SOURCE 200112L + #endif // __linux__ + #include ]], + [[ int f = posix_fallocate(0, 0, 0); ]])], + [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_POSIX_FALLOCATE, 1,[Define this symbol if you have posix_fallocate]) ], + [ AC_MSG_RESULT(no)] +) + AC_MSG_CHECKING([for visibility attribute]) AC_LINK_IFELSE([AC_LANG_SOURCE([ int foo_def( void ) __attribute__((visibility("default"))); @@ -1438,7 +1469,7 @@ if test x$use_pkgconfig = xyes; then if test x$use_qr != xno; then BITCOIN_QT_CHECK([PKG_CHECK_MODULES([QR], [libqrencode], [have_qrencode=yes], [have_qrencode=no])]) fi - if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests != xnononono; then + if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnononono; then PKG_CHECK_MODULES([EVENT], [libevent >= 2.0.21], [use_libevent=yes], [AC_MSG_ERROR(libevent version 2.0.21 or greater not found.)]) if test x$TARGET_OS != xwindows; then PKG_CHECK_MODULES([EVENT_PTHREADS], [libevent_pthreads >= 2.0.21],, [AC_MSG_ERROR(libevent_pthreads version 2.0.21 or greater not found.)]) @@ -1458,7 +1489,7 @@ if test x$use_pkgconfig = xyes; then ) else - if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests != xnononono; then + if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then AC_CHECK_HEADER([event2/event.h],, AC_MSG_ERROR(libevent headers missing),) AC_CHECK_LIB([event],[main],EVENT_LIBS=-levent,AC_MSG_ERROR(libevent missing)) if test x$TARGET_OS != xwindows; then @@ -1654,7 +1685,11 @@ AM_CONDITIONAL([ENABLE_ZMQ], [test "x$use_zmq" = "xyes"]) AC_MSG_CHECKING([whether to build test_dash]) if test x$use_tests = xyes; then - AC_MSG_RESULT([yes]) + if test "x$enable_fuzz" = "xyes"; then + AC_MSG_RESULT([no, because fuzzing is enabled]) + else + AC_MSG_RESULT([yes]) + fi BUILD_TEST="yes" else AC_MSG_RESULT([no]) @@ -1785,13 +1820,18 @@ AC_SUBST(HAVE_MM_PREFETCH) AC_SUBST(HAVE_STRONG_GETAUXVAL) AC_SUBST(HAVE_WEAK_GETAUXVAL) AC_SUBST(HAVE_GMTIME_R) +AC_SUBST(ANDROID_ARCH) AC_CONFIG_FILES([Makefile src/Makefile doc/man/Makefile share/setup.nsi share/qt/Info.plist test/config.ini]) AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/split-debug.sh]) AM_COND_IF([HAVE_DOXYGEN], [AC_CONFIG_FILES([doc/Doxyfile])]) +AC_CONFIG_LINKS([contrib/devtools/security-check.py:contrib/devtools/security-check.py]) +AC_CONFIG_LINKS([contrib/devtools/test-security-check.py:contrib/devtools/test-security-check.py]) AC_CONFIG_LINKS([contrib/devtools/symbol-check.py:contrib/devtools/symbol-check.py]) AC_CONFIG_LINKS([contrib/devtools/test-symbol-check.py:contrib/devtools/test-symbol-check.py]) AC_CONFIG_LINKS([contrib/filter-lcov.py:contrib/filter-lcov.py]) +AC_CONFIG_LINKS([contrib/macdeploy/background.tiff:contrib/macdeploy/background.tiff]) AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py]) +AC_CONFIG_LINKS([test/fuzz/test_runner.py:test/fuzz/test_runner.py]) AC_CONFIG_LINKS([test/util/bitcoin-util-test.py:test/util/bitcoin-util-test.py]) AC_CONFIG_LINKS([test/util/rpcauth-test.py:test/util/rpcauth-test.py]) @@ -1843,9 +1883,12 @@ if test x$bitcoin_enable_qt != xno; then echo " with qr = $use_qr" fi echo " with zmq = $use_zmq" -echo " with test = $use_tests" +if test x$enable_fuzz == xno; then + echo " with test = $use_tests" +else + echo " with test = not building test_dash because fuzzing is enabled" + echo " with fuzz = $enable_fuzz" echo " with bench = $use_bench" -if test x$use_tests != xno; then echo " with fuzz = $enable_fuzz" fi echo " with upnp = $use_upnp" diff --git a/contrib/README.md b/contrib/README.md index 831ab3655a4f1..5116636e32674 100644 --- a/contrib/README.md +++ b/contrib/README.md @@ -27,8 +27,8 @@ for Debian-based Linux systems. If you compile dashd/dash-qt yourself, there are ### [Gitian-descriptors](/contrib/gitian-descriptors) ### Notes on getting Gitian builds up and running using KVM. -### [Gitian-keys](/contrib/gitian-keys) -PGP keys used for signing Dash Core [Gitian release](/doc/release-process.md) results. +### [Builder keys](/contrib/builder-keys) +PGP keys used for signing Dash Core [release](/doc/release-process.md) results. ### [MacDeploy](/contrib/macdeploy) ### Scripts and notes for Mac builds. diff --git a/contrib/gitian-keys/README.md b/contrib/builder-keys/README.md similarity index 94% rename from contrib/gitian-keys/README.md rename to contrib/builder-keys/README.md index 4b0b7a261579c..c883b3fa6fc63 100644 --- a/contrib/gitian-keys/README.md +++ b/contrib/builder-keys/README.md @@ -3,8 +3,7 @@ PGP keys This folder contains the public keys of developers and active contributors. -The keys are mainly used to sign git commits or the build results of Gitian -builds. +The keys are mainly used to sign git commits or the build results of builds. You can import the keys into gpg as follows. Also, make sure to fetch the latest version from the key server to see if any key was revoked in the diff --git a/contrib/gitian-keys/codablock.pgp b/contrib/builder-keys/codablock.pgp similarity index 100% rename from contrib/gitian-keys/codablock.pgp rename to contrib/builder-keys/codablock.pgp diff --git a/contrib/gitian-keys/dustinface.pgp b/contrib/builder-keys/dustinface.pgp similarity index 100% rename from contrib/gitian-keys/dustinface.pgp rename to contrib/builder-keys/dustinface.pgp diff --git a/contrib/gitian-keys/gladcow.pgp b/contrib/builder-keys/gladcow.pgp similarity index 100% rename from contrib/gitian-keys/gladcow.pgp rename to contrib/builder-keys/gladcow.pgp diff --git a/contrib/gitian-keys/nmarley.pgp b/contrib/builder-keys/nmarley.pgp similarity index 100% rename from contrib/gitian-keys/nmarley.pgp rename to contrib/builder-keys/nmarley.pgp diff --git a/contrib/gitian-keys/pasta.pgp b/contrib/builder-keys/pasta.pgp similarity index 100% rename from contrib/gitian-keys/pasta.pgp rename to contrib/builder-keys/pasta.pgp diff --git a/contrib/gitian-keys/schinzelh.pgp b/contrib/builder-keys/schinzelh.pgp similarity index 100% rename from contrib/gitian-keys/schinzelh.pgp rename to contrib/builder-keys/schinzelh.pgp diff --git a/contrib/gitian-keys/thephez.pgp b/contrib/builder-keys/thephez.pgp similarity index 100% rename from contrib/gitian-keys/thephez.pgp rename to contrib/builder-keys/thephez.pgp diff --git a/contrib/gitian-keys/udjinm6.pgp b/contrib/builder-keys/udjinm6.pgp similarity index 100% rename from contrib/gitian-keys/udjinm6.pgp rename to contrib/builder-keys/udjinm6.pgp diff --git a/contrib/containers/ci/Dockerfile b/contrib/containers/ci/Dockerfile index 45aeb57ff86c7..68b2a0821c2d7 100644 --- a/contrib/containers/ci/Dockerfile +++ b/contrib/containers/ci/Dockerfile @@ -6,7 +6,14 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="Europe/London" # Build and base stuff # (zlib1g-dev is needed for the Qt host binary builds, but should not be used by target binaries) ENV APT_ARGS="-y --no-install-recommends --no-upgrade" -RUN dpkg --add-architecture i386 + + +# Install packages for i386; disabled on aarch64 and arm64 hosts +RUN (dpkg --print-architecture | grep -Eq 'aarch64|arm64' || dpkg --add-architecture i386) +RUN (dpkg --print-architecture | grep -Eq 'aarch64|arm64' || (apt-get update && apt-get install $APT_ARGS \ + g++-9-multilib \ + wine32) && rm -rf /var/lib/apt/lists/*) + RUN apt-get update && apt-get install $APT_ARGS \ autotools-dev \ automake \ @@ -37,6 +44,7 @@ RUN pip3 install \ codespell==1.17.1 \ flake8==3.8.3 \ jinja2 \ + lief==0.12.1 \ pyzmq \ vulture==2.3 \ yq \ @@ -60,27 +68,22 @@ RUN useradd -u ${USER_ID} -g dash -s /bin/bash -m -d /home/dash dash RUN apt-get update && apt-get install $APT_ARGS \ bc \ gawk \ - g++-9-multilib \ g++-arm-linux-gnueabihf \ g++-mingw-w64-x86-64 \ - imagemagick \ jq \ libcap-dev \ - librsvg2-bin \ libz-dev \ libbz2-dev \ - libtiff-tools \ libncurses5 \ nsis \ python3-zmq \ parallel \ + valgrind \ wine-stable \ - wine32 \ wine64 \ - xorriso \ - && rm -rf /var/lib/apt/lists/* + xorriso -ARG CPPCHECK_VERSION=2.9 +ARG CPPCHECK_VERSION=2.10 RUN curl -sL "https://github.com/danmar/cppcheck/archive/${CPPCHECK_VERSION}.tar.gz" | tar -xvzf - --directory /tmp/ RUN cd /tmp/cppcheck-${CPPCHECK_VERSION} && mkdir build && cd build && cmake .. && cmake --build . -j 8 ENV PATH "/tmp/cppcheck-${CPPCHECK_VERSION}/build/bin:${PATH}" @@ -102,6 +105,20 @@ RUN \ update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix; \ exit 0 +ARG LLVM_VERSION=15 +# Setup Clang+LLVM support +RUN apt-get update && apt-get install $APT_ARGS \ + lsb-release \ + software-properties-common \ + gnupg \ + && rm -rf /var/lib/apt/lists/* + +RUN cd /tmp && \ + wget https://apt.llvm.org/llvm.sh && \ + chmod +x llvm.sh && \ + /tmp/llvm.sh ${LLVM_VERSION} && \ + rm -rf /tmp/llvm.sh + RUN \ mkdir -p /src/dash && \ mkdir -p /cache/ccache && \ diff --git a/contrib/containers/deploy/Dockerfile b/contrib/containers/deploy/Dockerfile index 19a84057a918f..c9a4ce08fdc83 100644 --- a/contrib/containers/deploy/Dockerfile +++ b/contrib/containers/deploy/Dockerfile @@ -1,4 +1,4 @@ -FROM phusion/baseimage:bionic-1.0.0 +FROM phusion/baseimage:focal-1.0.0 LABEL maintainer="Dash Developers " LABEL description="Dockerised DashCore, built from Travis" diff --git a/contrib/containers/deploy/Dockerfile.GitHubActions.Release b/contrib/containers/deploy/Dockerfile.GitHubActions.Release index 433669579af39..ed3caae6f6c09 100644 --- a/contrib/containers/deploy/Dockerfile.GitHubActions.Release +++ b/contrib/containers/deploy/Dockerfile.GitHubActions.Release @@ -1,4 +1,4 @@ -FROM ubuntu:bionic +FROM ubuntu:focal LABEL maintainer="Dash Developers " LABEL description="Dockerised DashCore" diff --git a/contrib/containers/guix/Dockerfile b/contrib/containers/guix/Dockerfile new file mode 100644 index 0000000000000..12b531a7150a0 --- /dev/null +++ b/contrib/containers/guix/Dockerfile @@ -0,0 +1,91 @@ +# Note: Using 'docker compose up' will leave you hanging, you need +# to use 'docker compose run guix_ubuntu' to drop into an +# interactive shell + +FROM ubuntu:focal + +SHELL ["/bin/bash", "-c"] + +RUN apt-get update && \ + apt-get install -y --no-install-recommends --no-upgrade \ + build-essential \ + bzip2 \ + ca-certificates \ + curl \ + git \ + locales \ + netbase \ + sudo \ + wget \ + xz-utils && \ + rm -rf /var/lib/apt/lists/* + +ARG guix_download_path=ftp://ftp.gnu.org/gnu/guix +ARG guix_version=1.4.0 +ARG guix_checksum_aarch64=72d807392889919940b7ec9632c45a259555e6b0942ea7bfd131101e08ebfcf4 +ARG guix_checksum_x86_64=236ca7c9c5958b1f396c2924fcc5bc9d6fdebcb1b4cf3c7c6d46d4bf660ed9c9 +ARG builder_count=32 + +ENV PATH /usr/local/bin:/usr/local/guix/current/bin:$PATH + +# Application Setup +# https://guix.gnu.org/manual/en/html_node/Application-Setup.html +ENV GUIX_LOCPATH="/usr/local/guix/profile" \ + LC_ALL="C" + +RUN guix_file_name=guix-binary-${guix_version}.$(uname -m)-linux.tar.xz && \ + eval "guix_checksum=\${guix_checksum_$(uname -m)}" && \ + cd /tmp && \ + wget -q -O "$guix_file_name" "${guix_download_path}/${guix_file_name}" && \ + echo "${guix_checksum} ${guix_file_name}" | sha256sum -c && \ + tar xJf "$guix_file_name" && \ + mv var/guix /var/ && \ + mv gnu / && \ + mkdir -p /usr/local/guix && \ + ln -sf /var/guix/profiles/per-user/root/current-guix /usr/local/guix/current && \ + ln -sf /var/guix/profiles/per-user/root/guix-profile /usr/local/guix/profile && \ + chmod 1777 /tmp /var/tmp && \ + source /usr/local/guix/current/etc/profile + +RUN touch /etc/nsswitch.conf + +RUN guix archive --authorize < /usr/local/guix/current/share/guix/ci.guix.gnu.org.pub && \ + guix archive --authorize < /usr/local/guix/current/share/guix/bordeaux.guix.gnu.org.pub + +# Build Environment Setup +# https://guix.gnu.org/manual/en/html_node/Build-Environment-Setup.html +RUN groupadd --system guixbuild && \ + for i in $(seq -w 1 ${builder_count}); do \ + useradd -g guixbuild -G guixbuild \ + -d /var/empty -s $(which nologin) \ + -c "Guix build user ${i}" --system \ + "guixbuilder${i}" ; \ + done + +# Create unprivileged user +ARG USER_ID=1000 \ + GROUP_ID=1000 \ + USERNAME=ubuntu +RUN groupadd -g ${GROUP_ID} ${USERNAME} && \ + useradd -u ${USER_ID} -g ${USERNAME} -s /bin/bash -m -d /home/${USERNAME} ${USERNAME} + +# Grant it passwordless admin permissions +RUN usermod -aG sudo ${USERNAME} && \ + echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers + +# Copy required files to container +COPY --from=docker_root ./motd.txt /etc/motd +COPY --from=docker_root ./scripts/entrypoint /usr/local/bin/entrypoint +COPY --from=docker_root ./scripts/guix-check /usr/local/bin/guix-check +COPY --from=docker_root ./scripts/guix-start /usr/local/bin/guix-start + +# Create directory for mounting and grant necessary permissions +RUN mkdir -p /src/dash && \ + chown -R ${USER_ID}:${GROUP_ID} /src +WORKDIR "/src/dash" + +# Switch to unprivileged context +USER ${USERNAME} + +# Set entrypoint to copied file +ENTRYPOINT ["/usr/local/bin/entrypoint"] diff --git a/contrib/containers/guix/docker-compose.yml b/contrib/containers/guix/docker-compose.yml new file mode 100644 index 0000000000000..dc90916531155 --- /dev/null +++ b/contrib/containers/guix/docker-compose.yml @@ -0,0 +1,18 @@ +version: "3.9" +services: + guix_ubuntu: + build: + context: '../../..' + additional_contexts: + - docker_root=. + dockerfile: './contrib/containers/guix/Dockerfile' + args: + USER_ID: 1000 # set this to $(id -u) of the host + GROUP_ID: 1000 # set this to $(id -g) of the host + container_name: guix_ubuntu + tty: true + stdin_open: true + privileged: true + network_mode: host + volumes: + - "../../..:/src/dash:rw" diff --git a/contrib/containers/guix/motd.txt b/contrib/containers/guix/motd.txt new file mode 100644 index 0000000000000..448da605920a1 --- /dev/null +++ b/contrib/containers/guix/motd.txt @@ -0,0 +1,4 @@ +##################################################### +To get started, run 'guix-start' and then calculate +hashes using 'guix-check' +##################################################### diff --git a/contrib/containers/guix/scripts/entrypoint b/contrib/containers/guix/scripts/entrypoint new file mode 100755 index 0000000000000..f84cf42270935 --- /dev/null +++ b/contrib/containers/guix/scripts/entrypoint @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -eo pipefail + +# Read instructions +cat /etc/motd + +# Start the Guix daemon +sudo env PATH=${PATH} guix-daemon \ + --build-users-group='guixbuild' \ + --substitute-urls='https://bordeaux.guix.gnu.org https://ci.guix.gnu.org' < /dev/null 2>&1 | + sudo tee /var/log/guix.log > /dev/null & + +# Hand over control +exec bash diff --git a/contrib/containers/guix/scripts/guix-check b/contrib/containers/guix/scripts/guix-check new file mode 100755 index 0000000000000..e16891bc72a2e --- /dev/null +++ b/contrib/containers/guix/scripts/guix-check @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -eo pipefail + +if [[ -n "${1}" ]]; then + WORKSPACE_PATH="$1" +else + WORKSPACE_PATH="/src/dash" +fi + +if [[ ! -d "$WORKSPACE_PATH" ]]; then + echo "$0: $WORKSPACE_PATH is not a valid directory, exiting!" + exit 1 +fi + +cd "$WORKSPACE_PATH" +COMMIT_ID="$(git rev-parse --short=12 HEAD)" + +printf "\nBinaries:\n\n" +( \ +SRC_PATH_PREFIX="guix-build-${COMMIT_ID}/distsrc-" && \ +sha256sum ${SRC_PATH_PREFIX}*/src/dash{d,-cli,-tx,-wallet}{,.exe} && \ +sha256sum ${SRC_PATH_PREFIX}*/src/qt/dash-qt{,.exe} && \ +sha256sum ${SRC_PATH_PREFIX}*/src/test/test_dash{,.exe} \ +) | sort -k 2 + +printf "\nArchives:\n\n" +find "guix-build-${COMMIT_ID}/output" -type f | grep -v SHA256 | xargs sha256sum | sort -k 2 diff --git a/contrib/containers/guix/scripts/guix-start b/contrib/containers/guix/scripts/guix-start new file mode 100755 index 0000000000000..a32c4f71e1381 --- /dev/null +++ b/contrib/containers/guix/scripts/guix-start @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -eo pipefail + +if [[ -n "${1}" ]]; then + WORKSPACE_PATH="$1" +else + WORKSPACE_PATH="/src/dash" +fi + +if [[ ! -d "$WORKSPACE_PATH" ]]; then + echo "$0: $WORKSPACE_PATH is not a valid directory, exiting!" + exit 1 +fi + +XCODE_VERSION="12.2" +XCODE_RELEASE="12B45b" +XCODE_ARCHIVE="Xcode-${XCODE_VERSION}-${XCODE_RELEASE}-extracted-SDK-with-libcxx-headers" + +# Check if macOS SDK is present, if not, download it +if [ ! -d "${WORKSPACE_PATH}/depends/SDKs/${XCODE_ARCHIVE}" ] +then + mkdir -p "${WORKSPACE_PATH}/depends/SDKs" + curl -L https://bitcoincore.org/depends-sources/sdks/${XCODE_ARCHIVE}.tar.gz | tar -xz -C "${WORKSPACE_PATH}/depends/SDKs" +fi + +cd "${WORKSPACE_PATH}" +git config --global --add safe.directory "${WORKSPACE_PATH}" +git status >> /dev/null + +./contrib/guix/guix-build diff --git a/contrib/debian/copyright b/contrib/debian/copyright index b76936c9f7bd6..7d80f18b2486c 100644 --- a/contrib/debian/copyright +++ b/contrib/debian/copyright @@ -4,7 +4,7 @@ Upstream-Contact: Dash Core Group, Inc https://www.dash.org/team/ Source: https://github.com/dashpay/dash Files: * -Copyright: 2009-2019, Bitcoin Core Developers, +Copyright: 2009-2020, Bitcoin Core Developers, 2019-2020, Dash Core Developers License: Expat Comment: The Bitcoin Core Developers encompasses the current developers listed on bitcoin.org, diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md index 357767b0ae635..c9860d83bc97f 100644 --- a/contrib/devtools/README.md +++ b/contrib/devtools/README.md @@ -170,7 +170,7 @@ certain symbols and are only linked against allowed libraries. For Linux this means checking for allowed gcc, glibc and libstdc++ version symbols. This makes sure they are still compatible with the minimum supported distribution versions. -For macOS we check that the executables are only linked against libraries we allow. +For macOS and Windows we check that the executables are only linked against libraries we allow. Example usage after a Gitian build: diff --git a/contrib/devtools/pixie.py b/contrib/devtools/pixie.py deleted file mode 100644 index 64660968ad255..0000000000000 --- a/contrib/devtools/pixie.py +++ /dev/null @@ -1,323 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2020 Wladimir J. van der Laan -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -''' -Compact, self-contained ELF implementation for bitcoin-core security checks. -''' -import struct -import types -from typing import Dict, List, Optional, Union, Tuple - -# you can find all these values in elf.h -EI_NIDENT = 16 - -# Byte indices in e_ident -EI_CLASS = 4 # ELFCLASSxx -EI_DATA = 5 # ELFDATAxxxx - -ELFCLASS32 = 1 # 32-bit -ELFCLASS64 = 2 # 64-bit - -ELFDATA2LSB = 1 # little endian -ELFDATA2MSB = 2 # big endian - -# relevant values for e_machine -EM_386 = 3 -EM_PPC64 = 21 -EM_ARM = 40 -EM_AARCH64 = 183 -EM_X86_64 = 62 -EM_RISCV = 243 - -# relevant values for e_type -ET_DYN = 3 - -# relevant values for sh_type -SHT_PROGBITS = 1 -SHT_STRTAB = 3 -SHT_DYNAMIC = 6 -SHT_DYNSYM = 11 -SHT_GNU_verneed = 0x6ffffffe -SHT_GNU_versym = 0x6fffffff - -# relevant values for p_type -PT_LOAD = 1 -PT_GNU_STACK = 0x6474e551 -PT_GNU_RELRO = 0x6474e552 - -# relevant values for p_flags -PF_X = (1 << 0) -PF_W = (1 << 1) -PF_R = (1 << 2) - -# relevant values for d_tag -DT_NEEDED = 1 -DT_FLAGS = 30 - -# relevant values of `d_un.d_val' in the DT_FLAGS entry -DF_BIND_NOW = 0x00000008 - -# relevant d_tags with string payload -STRING_TAGS = {DT_NEEDED} - -# rrlevant values for ST_BIND subfield of st_info (symbol binding) -STB_LOCAL = 0 - -class ELFRecord(types.SimpleNamespace): - '''Unified parsing for ELF records.''' - def __init__(self, data: bytes, offset: int, eh: 'ELFHeader', total_size: Optional[int]) -> None: - hdr_struct = self.STRUCT[eh.ei_class][0][eh.ei_data] - if total_size is not None and hdr_struct.size > total_size: - raise ValueError(f'{self.__class__.__name__} header size too small ({total_size} < {hdr_struct.size})') - for field, value in zip(self.STRUCT[eh.ei_class][1], hdr_struct.unpack(data[offset:offset + hdr_struct.size])): - setattr(self, field, value) - -def BiStruct(chars: str) -> Dict[int, struct.Struct]: - '''Compile a struct parser for both endians.''' - return { - ELFDATA2LSB: struct.Struct('<' + chars), - ELFDATA2MSB: struct.Struct('>' + chars), - } - -class ELFHeader(ELFRecord): - FIELDS = ['e_type', 'e_machine', 'e_version', 'e_entry', 'e_phoff', 'e_shoff', 'e_flags', 'e_ehsize', 'e_phentsize', 'e_phnum', 'e_shentsize', 'e_shnum', 'e_shstrndx'] - STRUCT = { - ELFCLASS32: (BiStruct('HHIIIIIHHHHHH'), FIELDS), - ELFCLASS64: (BiStruct('HHIQQQIHHHHHH'), FIELDS), - } - - def __init__(self, data: bytes, offset: int) -> None: - self.e_ident = data[offset:offset + EI_NIDENT] - if self.e_ident[0:4] != b'\x7fELF': - raise ValueError('invalid ELF magic') - self.ei_class = self.e_ident[EI_CLASS] - self.ei_data = self.e_ident[EI_DATA] - - super().__init__(data, offset + EI_NIDENT, self, None) - - def __repr__(self) -> str: - return f'Header(e_ident={self.e_ident!r}, e_type={self.e_type}, e_machine={self.e_machine}, e_version={self.e_version}, e_entry={self.e_entry}, e_phoff={self.e_phoff}, e_shoff={self.e_shoff}, e_flags={self.e_flags}, e_ehsize={self.e_ehsize}, e_phentsize={self.e_phentsize}, e_phnum={self.e_phnum}, e_shentsize={self.e_shentsize}, e_shnum={self.e_shnum}, e_shstrndx={self.e_shstrndx})' - -class Section(ELFRecord): - name: Optional[bytes] = None - FIELDS = ['sh_name', 'sh_type', 'sh_flags', 'sh_addr', 'sh_offset', 'sh_size', 'sh_link', 'sh_info', 'sh_addralign', 'sh_entsize'] - STRUCT = { - ELFCLASS32: (BiStruct('IIIIIIIIII'), FIELDS), - ELFCLASS64: (BiStruct('IIQQQQIIQQ'), FIELDS), - } - - def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None: - super().__init__(data, offset, eh, eh.e_shentsize) - self._data = data - - def __repr__(self) -> str: - return f'Section(sh_name={self.sh_name}({self.name!r}), sh_type=0x{self.sh_type:x}, sh_flags={self.sh_flags}, sh_addr=0x{self.sh_addr:x}, sh_offset=0x{self.sh_offset:x}, sh_size={self.sh_size}, sh_link={self.sh_link}, sh_info={self.sh_info}, sh_addralign={self.sh_addralign}, sh_entsize={self.sh_entsize})' - - def contents(self) -> bytes: - '''Return section contents.''' - return self._data[self.sh_offset:self.sh_offset + self.sh_size] - -class ProgramHeader(ELFRecord): - STRUCT = { - # different ELF classes have the same fields, but in a different order to optimize space versus alignment - ELFCLASS32: (BiStruct('IIIIIIII'), ['p_type', 'p_offset', 'p_vaddr', 'p_paddr', 'p_filesz', 'p_memsz', 'p_flags', 'p_align']), - ELFCLASS64: (BiStruct('IIQQQQQQ'), ['p_type', 'p_flags', 'p_offset', 'p_vaddr', 'p_paddr', 'p_filesz', 'p_memsz', 'p_align']), - } - - def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None: - super().__init__(data, offset, eh, eh.e_phentsize) - - def __repr__(self) -> str: - return f'ProgramHeader(p_type={self.p_type}, p_offset={self.p_offset}, p_vaddr={self.p_vaddr}, p_paddr={self.p_paddr}, p_filesz={self.p_filesz}, p_memsz={self.p_memsz}, p_flags={self.p_flags}, p_align={self.p_align})' - -class Symbol(ELFRecord): - STRUCT = { - # different ELF classes have the same fields, but in a different order to optimize space versus alignment - ELFCLASS32: (BiStruct('IIIBBH'), ['st_name', 'st_value', 'st_size', 'st_info', 'st_other', 'st_shndx']), - ELFCLASS64: (BiStruct('IBBHQQ'), ['st_name', 'st_info', 'st_other', 'st_shndx', 'st_value', 'st_size']), - } - - def __init__(self, data: bytes, offset: int, eh: ELFHeader, symtab: Section, strings: bytes, version: Optional[bytes]) -> None: - super().__init__(data, offset, eh, symtab.sh_entsize) - self.name = _lookup_string(strings, self.st_name) - self.version = version - - def __repr__(self) -> str: - return f'Symbol(st_name={self.st_name}({self.name!r}), st_value={self.st_value}, st_size={self.st_size}, st_info={self.st_info}, st_other={self.st_other}, st_shndx={self.st_shndx}, version={self.version!r})' - - @property - def is_import(self) -> bool: - '''Returns whether the symbol is an imported symbol.''' - return self.st_bind != STB_LOCAL and self.st_shndx == 0 - - @property - def is_export(self) -> bool: - '''Returns whether the symbol is an exported symbol.''' - return self.st_bind != STB_LOCAL and self.st_shndx != 0 - - @property - def st_bind(self) -> int: - '''Returns STB_*.''' - return self.st_info >> 4 - -class Verneed(ELFRecord): - DEF = (BiStruct('HHIII'), ['vn_version', 'vn_cnt', 'vn_file', 'vn_aux', 'vn_next']) - STRUCT = { ELFCLASS32: DEF, ELFCLASS64: DEF } - - def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None: - super().__init__(data, offset, eh, None) - - def __repr__(self) -> str: - return f'Verneed(vn_version={self.vn_version}, vn_cnt={self.vn_cnt}, vn_file={self.vn_file}, vn_aux={self.vn_aux}, vn_next={self.vn_next})' - -class Vernaux(ELFRecord): - DEF = (BiStruct('IHHII'), ['vna_hash', 'vna_flags', 'vna_other', 'vna_name', 'vna_next']) - STRUCT = { ELFCLASS32: DEF, ELFCLASS64: DEF } - - def __init__(self, data: bytes, offset: int, eh: ELFHeader, strings: bytes) -> None: - super().__init__(data, offset, eh, None) - self.name = _lookup_string(strings, self.vna_name) - - def __repr__(self) -> str: - return f'Veraux(vna_hash={self.vna_hash}, vna_flags={self.vna_flags}, vna_other={self.vna_other}, vna_name={self.vna_name}({self.name!r}), vna_next={self.vna_next})' - -class DynTag(ELFRecord): - STRUCT = { - ELFCLASS32: (BiStruct('II'), ['d_tag', 'd_val']), - ELFCLASS64: (BiStruct('QQ'), ['d_tag', 'd_val']), - } - - def __init__(self, data: bytes, offset: int, eh: ELFHeader, section: Section) -> None: - super().__init__(data, offset, eh, section.sh_entsize) - - def __repr__(self) -> str: - return f'DynTag(d_tag={self.d_tag}, d_val={self.d_val})' - -def _lookup_string(data: bytes, index: int) -> bytes: - '''Look up string by offset in ELF string table.''' - endx = data.find(b'\x00', index) - assert endx != -1 - return data[index:endx] - -VERSYM_S = BiStruct('H') # .gnu_version section has a single 16-bit integer per symbol in the linked section -def _parse_symbol_table(section: Section, strings: bytes, eh: ELFHeader, versym: bytes, verneed: Dict[int, bytes]) -> List[Symbol]: - '''Parse symbol table, return a list of symbols.''' - data = section.contents() - symbols = [] - versym_iter = (verneed.get(v[0]) for v in VERSYM_S[eh.ei_data].iter_unpack(versym)) - for ofs, version in zip(range(0, len(data), section.sh_entsize), versym_iter): - symbols.append(Symbol(data, ofs, eh, section, strings, version)) - return symbols - -def _parse_verneed(section: Section, strings: bytes, eh: ELFHeader) -> Dict[int, bytes]: - '''Parse .gnu.version_r section, return a dictionary of {versym: 'GLIBC_...'}.''' - data = section.contents() - ofs = 0 - result = {} - while True: - verneed = Verneed(data, ofs, eh) - aofs = ofs + verneed.vn_aux - while True: - vernaux = Vernaux(data, aofs, eh, strings) - result[vernaux.vna_other] = vernaux.name - if not vernaux.vna_next: - break - aofs += vernaux.vna_next - - if not verneed.vn_next: - break - ofs += verneed.vn_next - - return result - -def _parse_dyn_tags(section: Section, strings: bytes, eh: ELFHeader) -> List[Tuple[int, Union[bytes, int]]]: - '''Parse dynamic tags. Return array of tuples.''' - data = section.contents() - ofs = 0 - result = [] - for ofs in range(0, len(data), section.sh_entsize): - tag = DynTag(data, ofs, eh, section) - val = _lookup_string(strings, tag.d_val) if tag.d_tag in STRING_TAGS else tag.d_val - result.append((tag.d_tag, val)) - - return result - -class ELFFile: - sections: List[Section] - program_headers: List[ProgramHeader] - dyn_symbols: List[Symbol] - dyn_tags: List[Tuple[int, Union[bytes, int]]] - - def __init__(self, data: bytes) -> None: - self.data = data - self.hdr = ELFHeader(self.data, 0) - self._load_sections() - self._load_program_headers() - self._load_dyn_symbols() - self._load_dyn_tags() - self._section_to_segment_mapping() - - def _load_sections(self) -> None: - self.sections = [] - for idx in range(self.hdr.e_shnum): - offset = self.hdr.e_shoff + idx * self.hdr.e_shentsize - self.sections.append(Section(self.data, offset, self.hdr)) - - shstr = self.sections[self.hdr.e_shstrndx].contents() - for section in self.sections: - section.name = _lookup_string(shstr, section.sh_name) - - def _load_program_headers(self) -> None: - self.program_headers = [] - for idx in range(self.hdr.e_phnum): - offset = self.hdr.e_phoff + idx * self.hdr.e_phentsize - self.program_headers.append(ProgramHeader(self.data, offset, self.hdr)) - - def _load_dyn_symbols(self) -> None: - # first, load 'verneed' section - verneed = None - for section in self.sections: - if section.sh_type == SHT_GNU_verneed: - strtab = self.sections[section.sh_link].contents() # associated string table - assert verneed is None # only one section of this kind please - verneed = _parse_verneed(section, strtab, self.hdr) - assert verneed is not None - - # then, correlate GNU versym sections with dynamic symbol sections - versym = {} - for section in self.sections: - if section.sh_type == SHT_GNU_versym: - versym[section.sh_link] = section - - # finally, load dynsym sections - self.dyn_symbols = [] - for idx, section in enumerate(self.sections): - if section.sh_type == SHT_DYNSYM: # find dynamic symbol tables - strtab_data = self.sections[section.sh_link].contents() # associated string table - versym_data = versym[idx].contents() # associated symbol version table - self.dyn_symbols += _parse_symbol_table(section, strtab_data, self.hdr, versym_data, verneed) - - def _load_dyn_tags(self) -> None: - self.dyn_tags = [] - for idx, section in enumerate(self.sections): - if section.sh_type == SHT_DYNAMIC: # find dynamic tag tables - strtab = self.sections[section.sh_link].contents() # associated string table - self.dyn_tags += _parse_dyn_tags(section, strtab, self.hdr) - - def _section_to_segment_mapping(self) -> None: - for ph in self.program_headers: - ph.sections = [] - for section in self.sections: - if ph.p_vaddr <= section.sh_addr < (ph.p_vaddr + ph.p_memsz): - ph.sections.append(section) - - def query_dyn_tags(self, tag_in: int) -> List[Union[int, bytes]]: - '''Return the values of all dyn tags with the specified tag.''' - return [val for (tag, val) in self.dyn_tags if tag == tag_in] - - -def load(filename: str) -> ELFFile: - with open(filename, 'rb') as f: - data = f.read() - return ELFFile(data) diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py index ae4384a4f60d3..e0ea46c3bc080 100755 --- a/contrib/devtools/security-check.py +++ b/contrib/devtools/security-check.py @@ -1,308 +1,263 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2015-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Perform basic security checks on a series of executables. Exit status will be 0 if successful, and the program will be silent. Otherwise the exit status will be 1 and it will log which executables failed which checks. -Needs `objdump` (for PE) and `otool` (for MACHO). ''' -import subprocess import sys -import os -from typing import List, Optional +from typing import List -import pixie +import lief -OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump') -OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool') - -def run_command(command) -> str: - p = subprocess.run(command, stdout=subprocess.PIPE, check=True, universal_newlines=True) - return p.stdout - -def check_ELF_PIE(executable) -> bool: - ''' - Check for position independent executable (PIE), allowing for address space randomization. - ''' - elf = pixie.load(executable) - return elf.hdr.e_type == pixie.ET_DYN - -def check_ELF_NX(executable) -> bool: - ''' - Check that no sections are writable and executable (including the stack) - ''' - elf = pixie.load(executable) - have_wx = False - have_gnu_stack = False - for ph in elf.program_headers: - if ph.p_type == pixie.PT_GNU_STACK: - have_gnu_stack = True - if (ph.p_flags & pixie.PF_W) != 0 and (ph.p_flags & pixie.PF_X) != 0: # section is both writable and executable - have_wx = True - return have_gnu_stack and not have_wx - -def check_ELF_RELRO(executable) -> bool: +def check_ELF_RELRO(binary) -> bool: ''' Check for read-only relocations. GNU_RELRO program header must exist Dynamic section must have BIND_NOW flag ''' - elf = pixie.load(executable) have_gnu_relro = False - for ph in elf.program_headers: + for segment in binary.segments: # Note: not checking p_flags == PF_R: here as linkers set the permission differently # This does not affect security: the permission flags of the GNU_RELRO program # header are ignored, the PT_LOAD header determines the effective permissions. # However, the dynamic linker need to write to this area so these are RW. # Glibc itself takes care of mprotecting this area R after relocations are finished. # See also https://marc.info/?l=binutils&m=1498883354122353 - if ph.p_type == pixie.PT_GNU_RELRO: + if segment.type == lief.ELF.SEGMENT_TYPES.GNU_RELRO: have_gnu_relro = True have_bindnow = False - for flags in elf.query_dyn_tags(pixie.DT_FLAGS): - assert isinstance(flags, int) - if flags & pixie.DF_BIND_NOW: + try: + flags = binary.get(lief.ELF.DYNAMIC_TAGS.FLAGS) + if flags.value & lief.ELF.DYNAMIC_FLAGS.BIND_NOW: have_bindnow = True + except: + have_bindnow = False return have_gnu_relro and have_bindnow -def check_ELF_Canary(executable) -> bool: +def check_ELF_Canary(binary) -> bool: ''' Check for use of stack canary ''' - elf = pixie.load(executable) - ok = False - for symbol in elf.dyn_symbols: - if symbol.name == b'__stack_chk_fail': - ok = True - return ok + return binary.has_symbol('__stack_chk_fail') -def check_ELF_separate_code(executable): +def check_ELF_separate_code(binary): ''' Check that sections are appropriately separated in virtual memory, based on their permissions. This checks for missing -Wl,-z,separate-code and potentially other problems. ''' - elf = pixie.load(executable) - R = pixie.PF_R - W = pixie.PF_W - E = pixie.PF_X + R = lief.ELF.SEGMENT_FLAGS.R + W = lief.ELF.SEGMENT_FLAGS.W + E = lief.ELF.SEGMENT_FLAGS.X EXPECTED_FLAGS = { # Read + execute - b'.init': R | E, - b'.plt': R | E, - b'.plt.got': R | E, - b'.plt.sec': R | E, - b'.text': R | E, - b'.fini': R | E, + '.init': R | E, + '.plt': R | E, + '.plt.got': R | E, + '.plt.sec': R | E, + '.text': R | E, + '.fini': R | E, # Read-only data - b'.interp': R, - b'.note.gnu.property': R, - b'.note.gnu.build-id': R, - b'.note.ABI-tag': R, - b'.gnu.hash': R, - b'.dynsym': R, - b'.dynstr': R, - b'.gnu.version': R, - b'.gnu.version_r': R, - b'.rela.dyn': R, - b'.rela.plt': R, - b'.rodata': R, - b'.eh_frame_hdr': R, - b'.eh_frame': R, - b'.qtmetadata': R, - b'.gcc_except_table': R, - b'.stapsdt.base': R, + '.interp': R, + '.note.gnu.property': R, + '.note.gnu.build-id': R, + '.note.ABI-tag': R, + '.gnu.hash': R, + '.dynsym': R, + '.dynstr': R, + '.gnu.version': R, + '.gnu.version_r': R, + '.rela.dyn': R, + '.rela.plt': R, + '.rodata': R, + '.eh_frame_hdr': R, + '.eh_frame': R, + '.qtmetadata': R, + '.gcc_except_table': R, + '.stapsdt.base': R, # Writable data - b'.init_array': R | W, - b'.fini_array': R | W, - b'.dynamic': R | W, - b'.got': R | W, - b'.data': R | W, - b'.bss': R | W, + '.init_array': R | W, + '.fini_array': R | W, + '.dynamic': R | W, + '.got': R | W, + '.data': R | W, + '.bss': R | W, } - if elf.hdr.e_machine == pixie.EM_PPC64: + if binary.header.machine_type == lief.ELF.ARCH.PPC64: # .plt is RW on ppc64 even with separate-code - EXPECTED_FLAGS[b'.plt'] = R | W + EXPECTED_FLAGS['.plt'] = R | W # For all LOAD program headers get mapping to the list of sections, # and for each section, remember the flags of the associated program header. flags_per_section = {} - for ph in elf.program_headers: - if ph.p_type == pixie.PT_LOAD: - for section in ph.sections: - assert(section.name not in flags_per_section) - flags_per_section[section.name] = ph.p_flags + for segment in binary.segments: + if segment.type == lief.ELF.SEGMENT_TYPES.LOAD: + for section in segment.sections: + flags_per_section[section.name] = segment.flags # Spot-check ELF LOAD program header flags per section # If these sections exist, check them against the expected R/W/E flags for (section, flags) in flags_per_section.items(): if section in EXPECTED_FLAGS: - if EXPECTED_FLAGS[section] != flags: + if int(EXPECTED_FLAGS[section]) != int(flags): return False return True -def get_PE_dll_characteristics(executable) -> int: - '''Get PE DllCharacteristics bits''' - stdout = run_command([OBJDUMP_CMD, '-x', executable]) - - bits = 0 - for line in stdout.splitlines(): - tokens = line.split() - if len(tokens)>=2 and tokens[0] == 'DllCharacteristics': - bits = int(tokens[1],16) - return bits +def check_ELF_control_flow(binary) -> bool: + ''' + Check for control flow instrumentation + ''' + main = binary.get_function_address('main') + content = binary.get_content_from_virtual_address(main, 4, lief.Binary.VA_TYPES.AUTO) -IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020 -IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040 -IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100 + if content == [243, 15, 30, 250]: # endbr64 + return True + return False -def check_PE_DYNAMIC_BASE(executable) -> bool: +def check_PE_DYNAMIC_BASE(binary) -> bool: '''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)''' - bits = get_PE_dll_characteristics(executable) - return (bits & IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE) == IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE + return lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE in binary.optional_header.dll_characteristics_lists # Must support high-entropy 64-bit address space layout randomization # in addition to DYNAMIC_BASE to have secure ASLR. -def check_PE_HIGH_ENTROPY_VA(executable) -> bool: +def check_PE_HIGH_ENTROPY_VA(binary) -> bool: '''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR''' - bits = get_PE_dll_characteristics(executable) - return (bits & IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA) == IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA + return lief.PE.DLL_CHARACTERISTICS.HIGH_ENTROPY_VA in binary.optional_header.dll_characteristics_lists -def check_PE_RELOC_SECTION(executable) -> bool: +def check_PE_RELOC_SECTION(binary) -> bool: '''Check for a reloc section. This is required for functional ASLR.''' - stdout = run_command([OBJDUMP_CMD, '-h', executable]) + return binary.has_relocations - for line in stdout.splitlines(): - if '.reloc' in line: - return True - return False - -def check_PE_NX(executable) -> bool: - '''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)''' - bits = get_PE_dll_characteristics(executable) - return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT +def check_PE_control_flow(binary) -> bool: + ''' + Check for control flow instrumentation + ''' + main = binary.get_symbol('main').value -def get_MACHO_executable_flags(executable) -> List[str]: - stdout = run_command([OTOOL_CMD, '-vh', executable]) + section_addr = binary.section_from_rva(main).virtual_address + virtual_address = binary.optional_header.imagebase + section_addr + main - flags: List[str] = [] - for line in stdout.splitlines(): - tokens = line.split() - # filter first two header lines - if 'magic' in tokens or 'Mach' in tokens: - continue - # filter ncmds and sizeofcmds values - flags += [t for t in tokens if not t.isdigit()] - return flags + content = binary.get_content_from_virtual_address(virtual_address, 4, lief.Binary.VA_TYPES.VA) -def check_MACHO_PIE(executable) -> bool: - ''' - Check for position independent executable (PIE), allowing for address space randomization. - ''' - flags = get_MACHO_executable_flags(executable) - if 'PIE' in flags: + if content == [243, 15, 30, 250]: # endbr64 return True return False -def check_MACHO_NOUNDEFS(executable) -> bool: +def check_MACHO_NOUNDEFS(binary) -> bool: ''' Check for no undefined references. ''' - flags = get_MACHO_executable_flags(executable) - if 'NOUNDEFS' in flags: - return True - return False + return binary.header.has(lief.MachO.HEADER_FLAGS.NOUNDEFS) -def check_MACHO_NX(executable) -> bool: +def check_MACHO_LAZY_BINDINGS(binary) -> bool: ''' - Check for no stack execution + Check for no lazy bindings. + We don't use or check for MH_BINDATLOAD. See #18295. ''' - flags = get_MACHO_executable_flags(executable) - if 'ALLOW_STACK_EXECUTION' in flags: - return False - return True + return binary.dyld_info.lazy_bind == (0,0) -def check_MACHO_LAZY_BINDINGS(executable) -> bool: +def check_MACHO_Canary(binary) -> bool: ''' - Check for no lazy bindings. - We don't use or check for MH_BINDATLOAD. See #18295. + Check for use of stack canary ''' - stdout = run_command([OTOOL_CMD, '-l', executable]) + return binary.has_symbol('___stack_chk_fail') - for line in stdout.splitlines(): - tokens = line.split() - if 'lazy_bind_off' in tokens or 'lazy_bind_size' in tokens: - if tokens[1] != '0': - return False - return True +def check_PIE(binary) -> bool: + ''' + Check for position independent executable (PIE), + allowing for address space randomization. + ''' + return binary.is_pie -def check_MACHO_Canary(executable) -> bool: +def check_NX(binary) -> bool: ''' - Check for use of stack canary + Check for no stack execution ''' - stdout = run_command([OTOOL_CMD, '-Iv', executable]) + return binary.has_nx - ok = False - for line in stdout.splitlines(): - if '___stack_chk_fail' in line: - ok = True - return ok +def check_MACHO_control_flow(binary) -> bool: + ''' + Check for control flow instrumentation + ''' + content = binary.get_content_from_virtual_address(binary.entrypoint, 4, lief.Binary.VA_TYPES.AUTO) -CHECKS = { -'ELF': [ - ('PIE', check_ELF_PIE), - ('NX', check_ELF_NX), + if content == [243, 15, 30, 250]: # endbr64 + return True + return False + +BASE_ELF = [ + ('PIE', check_PIE), + ('NX', check_NX), ('RELRO', check_ELF_RELRO), ('Canary', check_ELF_Canary), ('separate_code', check_ELF_separate_code), -], -'PE': [ +] + +BASE_PE = [ + ('PIE', check_PIE), ('DYNAMIC_BASE', check_PE_DYNAMIC_BASE), ('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA), - ('NX', check_PE_NX), - ('RELOC_SECTION', check_PE_RELOC_SECTION) -], -'MACHO': [ - ('PIE', check_MACHO_PIE), + ('NX', check_NX), + ('RELOC_SECTION', check_PE_RELOC_SECTION), + ('CONTROL_FLOW', check_PE_control_flow), +] + +BASE_MACHO = [ ('NOUNDEFS', check_MACHO_NOUNDEFS), - ('NX', check_MACHO_NX), ('LAZY_BINDINGS', check_MACHO_LAZY_BINDINGS), - ('Canary', check_MACHO_Canary) + ('Canary', check_MACHO_Canary), ] -} -def identify_executable(executable) -> Optional[str]: - with open(filename, 'rb') as f: - magic = f.read(4) - if magic.startswith(b'MZ'): - return 'PE' - elif magic.startswith(b'\x7fELF'): - return 'ELF' - elif magic.startswith(b'\xcf\xfa'): - return 'MACHO' - return None +CHECKS = { + lief.EXE_FORMATS.ELF: { + lief.ARCHITECTURES.X86: BASE_ELF + [('CONTROL_FLOW', check_ELF_control_flow)], + lief.ARCHITECTURES.ARM: BASE_ELF, + lief.ARCHITECTURES.ARM64: BASE_ELF, + lief.ARCHITECTURES.PPC: BASE_ELF, + lief.ARCHITECTURES.RISCV: BASE_ELF, + }, + lief.EXE_FORMATS.PE: { + lief.ARCHITECTURES.X86: BASE_PE, + }, + lief.EXE_FORMATS.MACHO: { + lief.ARCHITECTURES.X86: BASE_MACHO + [('PIE', check_PIE), + ('NX', check_NX), + ('CONTROL_FLOW', check_MACHO_control_flow)], + lief.ARCHITECTURES.ARM64: BASE_MACHO, + } +} if __name__ == '__main__': - retval = 0 + retval: int = 0 for filename in sys.argv[1:]: try: - etype = identify_executable(filename) - if etype is None: - print('%s: unknown format' % filename) + binary = lief.parse(filename) + etype = binary.format + arch = binary.abstract.header.architecture + binary.concrete + + if etype == lief.EXE_FORMATS.UNKNOWN: + print(f'{filename}: unknown executable format') + retval = 1 + continue + + if arch == lief.ARCHITECTURES.NONE: + print(f'{filename}: unknown architecture') retval = 1 continue - failed = [] - for (name, func) in CHECKS[etype]: - if not func(filename): + failed: List[str] = [] + for (name, func) in CHECKS[etype][arch]: + if not func(binary): failed.append(name) if failed: - print('%s: failed %s' % (filename, ' '.join(failed))) + print(f'{filename}: failed {" ".join(failed)}') retval = 1 except IOError: - print('%s: cannot open' % filename) + print(f'{filename}: cannot open') retval = 1 sys.exit(retval) diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py index 492647eea8268..5fd78ced13948 100755 --- a/contrib/devtools/symbol-check.py +++ b/contrib/devtools/symbol-check.py @@ -3,20 +3,17 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' -A script to check that the (Linux) executables produced by Gitian only contain -allowed gcc and glibc version symbols. This makes sure they are still compatible -with the minimum supported Linux distribution versions. +A script to check that the (Linux) release executables only contain +certain symbols and are only linked against allowed libraries. Example usage: - find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py + find ../path/to/binaries -type f -executable | xargs python3 contrib/devtools/symbol-check.py ''' -import subprocess import sys -import os -from typing import List, Optional +from typing import Dict -import pixie +import lief # Debian 9 (Stretch) EOL: 2022. https://wiki.debian.org/DebianReleases#Production_Releases # @@ -34,30 +31,55 @@ # - libc version 2.28 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/) # # See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html for more info. +# +# For 32-bit systems the minimum libc version is 2.28 to embrace new fcntl{64} symbols. +# It is safer than handling them in the glibc_compat.cpp due to their variadic arguments +# with possible different sizes. +# See: https://stackoverflow.com/a/58472959 +# MAX_VERSIONS = { 'GCC': (4,8,0), 'GLIBC': { - pixie.EM_386: (2,18), - pixie.EM_X86_64: (2,18), - pixie.EM_ARM: (2,18), - pixie.EM_AARCH64:(2,18), - pixie.EM_PPC64: (2,18), - pixie.EM_RISCV: (2,27), + lief.ELF.ARCH.x86_64: (2,18), + lief.ELF.ARCH.ARM: (2,28), + lief.ELF.ARCH.AARCH64:(2,18), + lief.ELF.ARCH.PPC64: (2,18), + lief.ELF.ARCH.RISCV: (2,27), }, 'LIBATOMIC': (1,0), 'V': (0,5,0), # xkb (bitcoin-qt only) } -# See here for a description of _IO_stdin_used: -# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109 # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { -'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr', +'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', +'__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr', 'environ', '_environ', '__environ', +# Used in stacktraces.cpp +'__cxa_demangle' +} + +# Expected linker-loader names can be found here: +# https://sourceware.org/glibc/wiki/ABIList?action=recall&rev=16 +ELF_INTERPRETER_NAMES: Dict[lief.ELF.ARCH, Dict[lief.ENDIANNESS, str]] = { + lief.ELF.ARCH.x86_64: { + lief.ENDIANNESS.LITTLE: "/lib64/ld-linux-x86-64.so.2", + }, + lief.ELF.ARCH.ARM: { + lief.ENDIANNESS.LITTLE: "/lib/ld-linux-armhf.so.3", + }, + lief.ELF.ARCH.AARCH64: { + lief.ENDIANNESS.LITTLE: "/lib/ld-linux-aarch64.so.1", + }, + lief.ELF.ARCH.PPC64: { + lief.ENDIANNESS.BIG: "/lib64/ld64.so.1", + lief.ENDIANNESS.LITTLE: "/lib64/ld64.so.2", + }, + lief.ELF.ARCH.RISCV: { + lief.ENDIANNESS.LITTLE: "/lib/ld-linux-riscv64-lp64d.so.1", + }, } -CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt') -OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool') # Allowed NEEDED libraries ELF_ALLOWED_LIBRARIES = { @@ -75,8 +97,10 @@ 'ld64.so.1', # POWER64 ABIv1 dynamic linker 'ld64.so.2', # POWER64 ABIv2 dynamic linker 'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker +'libz.so.1', # zlib # dash-qt only 'libxcb.so.1', # part of X11 +'libxcb-shm.so.0', # X11 shared memory extension 'libxkbcommon.so.0', # keyboard keymapping 'libxkbcommon-x11.so.0', # keyboard keymapping 'libfontconfig.so.1', # font support @@ -107,31 +131,32 @@ 'QuartzCore', # animation } -class CPPFilt(object): - ''' - Demangle C++ symbol names. - - Use a pipe to the 'c++filt' command. - ''' - def __init__(self): - self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) - - def __call__(self, mangled): - self.proc.stdin.write(mangled + '\n') - self.proc.stdin.flush() - return self.proc.stdout.readline().rstrip() - - def close(self): - self.proc.stdin.close() - self.proc.stdout.close() - self.proc.wait() +PE_ALLOWED_LIBRARIES = { +'ADVAPI32.dll', # security & registry +'IPHLPAPI.DLL', # IP helper API +'KERNEL32.dll', # win32 base APIs +'msvcrt.dll', # C standard library for MSVC +'SHELL32.dll', # shell API +'USER32.dll', # user interface +'WS2_32.dll', # sockets +'bcrypt.dll', +# bitcoin-qt only +'dwmapi.dll', # desktop window manager +'GDI32.dll', # graphics device interface +'IMM32.dll', # input method editor +'NETAPI32.dll', +'ole32.dll', # component object model +'OLEAUT32.dll', # OLE Automation API +'SHLWAPI.dll', # light weight shell API +'USERENV.dll', +'UxTheme.dll', +'VERSION.dll', # version checking +'WINMM.dll', # WinMM audio API +'WTSAPI32.dll', +} def check_version(max_versions, version, arch) -> bool: - if '_' in version: - (lib, _, ver) = version.rpartition('_') - else: - lib = version - ver = '0' + (lib, _, ver) = version.rpartition('_') ver = tuple([int(x) for x in ver.split('.')]) if not lib in max_versions: return False @@ -140,106 +165,119 @@ def check_version(max_versions, version, arch) -> bool: else: return ver <= max_versions[lib][arch] -def check_imported_symbols(filename) -> bool: - elf = pixie.load(filename) - cppfilt = CPPFilt() +def check_imported_symbols(binary) -> bool: ok = True - for symbol in elf.dyn_symbols: - if not symbol.is_import: + for symbol in binary.imported_symbols: + if not symbol.imported: continue - sym = symbol.name.decode() - version = symbol.version.decode() if symbol.version is not None else None - if version and not check_version(MAX_VERSIONS, version, elf.hdr.e_machine): - print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version)) - ok = False + + version = symbol.symbol_version if symbol.has_version else None + + if version: + aux_version = version.symbol_version_auxiliary.name if version.has_auxiliary_version else None + if aux_version and not check_version(MAX_VERSIONS, aux_version, binary.header.machine_type): + print(f'{filename}: symbol {symbol.name} from unsupported version {version}') + ok = False return ok -def check_exported_symbols(filename) -> bool: - elf = pixie.load(filename) - cppfilt = CPPFilt() +def check_exported_symbols(binary) -> bool: ok = True - for symbol in elf.dyn_symbols: - if not symbol.is_export: + + for symbol in binary.dynamic_symbols: + if not symbol.exported: continue - sym = symbol.name.decode() - if elf.hdr.e_machine == pixie.EM_RISCV or sym in IGNORE_EXPORTS: + name = symbol.name + if binary.header.machine_type == lief.ELF.ARCH.RISCV or name in IGNORE_EXPORTS: continue - print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym))) + print(f'{binary.name}: export of symbol {name} not allowed!') ok = False return ok -def check_ELF_libraries(filename) -> bool: +def check_ELF_libraries(binary) -> bool: ok = True - elf = pixie.load(filename) - for library_name in elf.query_dyn_tags(pixie.DT_NEEDED): - assert(isinstance(library_name, bytes)) - if library_name.decode() not in ELF_ALLOWED_LIBRARIES: - print('{}: NEEDED library {} is not allowed'.format(filename, library_name.decode())) + for library in binary.libraries: + if library not in ELF_ALLOWED_LIBRARIES: + print(f'{filename}: {library} is not in ALLOWED_LIBRARIES!') ok = False return ok -def macho_read_libraries(filename) -> List[str]: - p = subprocess.Popen([OTOOL_CMD, '-L', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - libraries = [] - for line in stdout.splitlines(): - tokens = line.split() - if len(tokens) == 1: # skip executable name - continue - libraries.append(tokens[0].split('/')[-1]) - return libraries +def check_MACHO_libraries(binary) -> bool: + ok = True + for dylib in binary.libraries: + split = dylib.name.split('/') + if split[-1] not in MACHO_ALLOWED_LIBRARIES: + print(f'{split[-1]} is not in ALLOWED_LIBRARIES!') + ok = False + return ok + +def check_MACHO_min_os(binary) -> bool: + if binary.build_version.minos == [10,15,0]: + return True + return False -def check_MACHO_libraries(filename) -> bool: +def check_MACHO_sdk(binary) -> bool: + if binary.build_version.sdk == [11, 0, 0]: + return True + return False + +def check_PE_libraries(binary) -> bool: ok = True - for dylib in macho_read_libraries(filename): - if dylib not in MACHO_ALLOWED_LIBRARIES: - print('{} is not in ALLOWED_LIBRARIES!'.format(dylib)) + for dylib in binary.libraries: + if dylib not in PE_ALLOWED_LIBRARIES: + print(f'{dylib} is not in ALLOWED_LIBRARIES!') ok = False return ok +def check_PE_subsystem_version(binary) -> bool: + major: int = binary.optional_header.major_subsystem_version + minor: int = binary.optional_header.minor_subsystem_version + if major == 6 and minor == 1: + return True + return False + +def check_ELF_interpreter(binary) -> bool: + expected_interpreter = ELF_INTERPRETER_NAMES[binary.header.machine_type][binary.abstract.header.endianness] + + return binary.concrete.interpreter == expected_interpreter + CHECKS = { -'ELF': [ +lief.EXE_FORMATS.ELF: [ ('IMPORTED_SYMBOLS', check_imported_symbols), ('EXPORTED_SYMBOLS', check_exported_symbols), - ('LIBRARY_DEPENDENCIES', check_ELF_libraries) + ('LIBRARY_DEPENDENCIES', check_ELF_libraries), + ('INTERPRETER_NAME', check_ELF_interpreter), +], +lief.EXE_FORMATS.MACHO: [ + ('DYNAMIC_LIBRARIES', check_MACHO_libraries), + ('MIN_OS', check_MACHO_min_os), + ('SDK', check_MACHO_sdk), ], -'MACHO': [ - ('DYNAMIC_LIBRARIES', check_MACHO_libraries) +lief.EXE_FORMATS.PE: [ + ('DYNAMIC_LIBRARIES', check_PE_libraries), + ('SUBSYSTEM_VERSION', check_PE_subsystem_version), ] } -def identify_executable(executable) -> Optional[str]: - with open(filename, 'rb') as f: - magic = f.read(4) - if magic.startswith(b'MZ'): - return 'PE' - elif magic.startswith(b'\x7fELF'): - return 'ELF' - elif magic.startswith(b'\xcf\xfa'): - return 'MACHO' - return None - if __name__ == '__main__': retval = 0 for filename in sys.argv[1:]: try: - etype = identify_executable(filename) - if etype is None: - print('{}: unknown format'.format(filename)) + binary = lief.parse(filename) + etype = binary.format + if etype == lief.EXE_FORMATS.UNKNOWN: + print(f'{filename}: unknown executable format') retval = 1 continue failed = [] for (name, func) in CHECKS[etype]: - if not func(filename): + if not func(binary): failed.append(name) if failed: - print('{}: failed {}'.format(filename, ' '.join(failed))) + print(f'{filename}: failed {" ".join(failed)}') retval = 1 except IOError: - print('{}: cannot open'.format(filename)) + print(f'{filename}: cannot open') retval = 1 sys.exit(retval) diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py index 19b7b0572bc7f..46839a0c496f4 100755 --- a/contrib/devtools/test-security-check.py +++ b/contrib/devtools/test-security-check.py @@ -5,9 +5,14 @@ ''' Test script for security-check.py ''' +import lief #type:ignore +import os import subprocess +from typing import List import unittest +from utils import determine_wellknown_cmd + def write_testcode(filename): with open(filename, 'w', encoding="utf8") as f: f.write(''' @@ -19,68 +24,128 @@ def write_testcode(filename): } ''') +def clean_files(source, executable): + os.remove(source) + os.remove(executable) + def call_security_check(cc, source, executable, options): - subprocess.check_call([cc,source,'-o',executable] + options) - p = subprocess.Popen(['./security-check.py',executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - return (p.returncode, stdout.rstrip()) + # This should behave the same as AC_TRY_LINK, so arrange well-known flags + # in the same order as autoconf would. + # + # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for + # reference. + env_flags: List[str] = [] + for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: + env_flags += filter(None, os.environ.get(var, '').split(' ')) + + subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True) + p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True) + return (p.returncode, p.stdout.rstrip()) + +def get_arch(cc, source, executable): + subprocess.run([*cc, source, '-o', executable], check=True) + binary = lief.parse(executable) + arch = binary.abstract.header.architecture + os.remove(executable) + return arch class TestSecurityChecks(unittest.TestCase): def test_ELF(self): source = 'test1.c' executable = 'test1' - cc = 'gcc' + cc = determine_wellknown_cmd('CC', 'gcc') write_testcode(source) + arch = get_arch(cc, source, executable) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), - (1, executable+': failed PIE NX RELRO Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), - (1, executable+': failed PIE RELRO Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), - (1, executable+': failed PIE RELRO')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']), - (1, executable+': failed RELRO')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']), - (1, executable+': failed separate_code')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']), - (0, '')) + if arch == lief.ARCHITECTURES.X86: + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE NX RELRO Canary CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE RELRO Canary CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE RELRO CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']), + (1, executable+': failed RELRO CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']), + (1, executable+': failed separate_code CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']), + (1, executable+': failed CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']), + (0, '')) + else: + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE NX RELRO Canary')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE RELRO Canary')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE RELRO')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']), + (1, executable+': failed RELRO')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']), + (1, executable+': failed separate_code')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']), + (0, '')) + + clean_files(source, executable) def test_PE(self): source = 'test1.c' executable = 'test1.exe' - cc = 'x86_64-w64-mingw32-gcc' + cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc') write_testcode(source) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']), - (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']), - (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']), - (1, executable+': failed HIGH_ENTROPY_VA RELOC_SECTION')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va','-no-pie','-fno-PIE']), - (1, executable+': failed RELOC_SECTION')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE']), + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--disable-nxcompat','-Wl,--disable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE']), + (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--disable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE']), + (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE']), + (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-pie','-fPIE']), + (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA CONTROL_FLOW')) # -pie -fPIE does nothing unless --dynamicbase is also supplied + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--disable-high-entropy-va','-pie','-fPIE']), + (1, executable+': failed HIGH_ENTROPY_VA CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE']), + (1, executable+': failed CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE', '-fcf-protection=full']), (0, '')) + clean_files(source, executable) + def test_MACHO(self): source = 'test1.c' executable = 'test1' - cc = 'clang' + cc = determine_wellknown_cmd('CC', 'clang') write_testcode(source) + arch = get_arch(cc, source, executable) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']), - (1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']), - (1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']), - (1, executable+': failed PIE NOUNDEFS LAZY_BINDINGS')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']), - (1, executable+': failed PIE LAZY_BINDINGS')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']), - (1, executable+': failed PIE')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all']), - (0, '')) + if arch == lief.ARCHITECTURES.X86: + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']), + (1, executable+': failed NOUNDEFS LAZY_BINDINGS Canary PIE NX CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']), + (1, executable+': failed NOUNDEFS LAZY_BINDINGS PIE NX CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']), + (1, executable+': failed NOUNDEFS LAZY_BINDINGS PIE CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']), + (1, executable+': failed LAZY_BINDINGS PIE CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']), + (1, executable+': failed PIE CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']), + (1, executable+': failed PIE')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']), + (0, '')) + else: + # arm64 darwin doesn't support non-PIE binaries, control flow or executable stacks + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector']), + (1, executable+': failed NOUNDEFS LAZY_BINDINGS Canary')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all']), + (1, executable+': failed NOUNDEFS LAZY_BINDINGS')) + self.assertEqual(call_security_check(cc, source, executable, ['-fstack-protector-all']), + (1, executable+': failed LAZY_BINDINGS')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-bind_at_load','-fstack-protector-all']), + (0, '')) + + + clean_files(source, executable) if __name__ == '__main__': unittest.main() - diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py index b07ec2ffdf263..48729ccdb66e6 100755 --- a/contrib/devtools/test-symbol-check.py +++ b/contrib/devtools/test-symbol-check.py @@ -5,47 +5,67 @@ ''' Test script for symbol-check.py ''' +import os import subprocess +from typing import List import unittest -def call_symbol_check(cc, source, executable, options): - subprocess.run([cc,source,'-o',executable] + options, check=True) +from utils import determine_wellknown_cmd + +def call_symbol_check(cc: List[str], source, executable, options): + # This should behave the same as AC_TRY_LINK, so arrange well-known flags + # in the same order as autoconf would. + # + # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for + # reference. + env_flags: List[str] = [] + for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: + env_flags += filter(None, os.environ.get(var, '').split(' ')) + + subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True) p = subprocess.run(['./contrib/devtools/symbol-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True) + os.remove(source) + os.remove(executable) return (p.returncode, p.stdout.rstrip()) -def get_machine(cc): - p = subprocess.run([cc,'-dumpmachine'], stdout=subprocess.PIPE, universal_newlines=True) +def get_machine(cc: List[str]): + p = subprocess.run([*cc,'-dumpmachine'], stdout=subprocess.PIPE, universal_newlines=True) return p.stdout.rstrip() class TestSymbolChecks(unittest.TestCase): def test_ELF(self): source = 'test1.c' executable = 'test1' - cc = 'gcc' + cc = determine_wellknown_cmd('CC', 'gcc') - # there's no way to do this test for RISC-V at the moment; bionic's libc is 2.27 - # and we allow all symbols from 2.27. + # there's no way to do this test for RISC-V at the moment; we build for + # RISC-V in a glibc 2.31 envinonment and we allow all symbols from 2.27. if 'riscv' in get_machine(cc): self.skipTest("test not available for RISC-V") - # memfd_create was introduced in GLIBC 2.27, so is newer than the upper limit of - # all but RISC-V but still available on bionic + # there's no way to do this test for ARM at the moment; we build for + # ARM in a glibc 2.31 envinonment and we allow all symbols from 2.28. + if 'arm' in get_machine(cc): + self.skipTest("test not available for 32-bit ARM") + + # nextup was introduced in GLIBC 2.24, so is newer than our supported + # glibc (2.18), and available in our release build environment (2.31). with open(source, 'w', encoding="utf8") as f: f.write(''' #define _GNU_SOURCE - #include + #include - int memfd_create(const char *name, unsigned int flags); + double nextup(double x); int main() { - memfd_create("test", 0); + nextup(3.14); return 0; } ''') - self.assertEqual(call_symbol_check(cc, source, executable, []), - (1, executable + ': symbol memfd_create from unsupported version GLIBC_2.27\n' + + self.assertEqual(call_symbol_check(cc, source, executable, ['-lm']), + (1, executable + ': symbol nextup from unsupported version GLIBC_2.24(3)\n' + executable + ': failed IMPORTED_SYMBOLS')) # -lutil is part of the libc6 package so a safe bet that it's installed @@ -64,29 +84,30 @@ def test_ELF(self): ''') self.assertEqual(call_symbol_check(cc, source, executable, ['-lutil']), - (1, executable + ': NEEDED library libutil.so.1 is not allowed\n' + + (1, executable + ': libutil.so.1 is not in ALLOWED_LIBRARIES!\n' + executable + ': failed LIBRARY_DEPENDENCIES')) - # finally, check a conforming file that simply uses a math function + # finally, check a simple conforming binary source = 'test3.c' executable = 'test3' with open(source, 'w', encoding="utf8") as f: f.write(''' - #include + #include int main() { - return (int)pow(2.0, 4.0); + printf("42"); + return 0; } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-lm']), + self.assertEqual(call_symbol_check(cc, source, executable, []), (0, '')) def test_MACHO(self): source = 'test1.c' executable = 'test1' - cc = 'clang' + cc = determine_wellknown_cmd('CC', 'clang') with open(source, 'w', encoding="utf8") as f: f.write(''' @@ -100,9 +121,9 @@ def test_MACHO(self): ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-lexpat']), + self.assertEqual(call_symbol_check(cc, source, executable, ['-lexpat', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']), (1, 'libexpat.1.dylib is not in ALLOWED_LIBRARIES!\n' + - executable + ': failed DYNAMIC_LIBRARIES')) + f'{executable}: failed DYNAMIC_LIBRARIES MIN_OS SDK')) source = 'test2.c' executable = 'test2' @@ -117,9 +138,72 @@ def test_MACHO(self): } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-framework', 'CoreGraphics']), + self.assertEqual(call_symbol_check(cc, source, executable, ['-framework', 'CoreGraphics', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']), + (1, f'{executable}: failed MIN_OS SDK')) + + source = 'test3.c' + executable = 'test3' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + int main() + { + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,10.15', '-Wl,11.4']), + (1, f'{executable}: failed SDK')) + + def test_PE(self): + source = 'test1.c' + executable = 'test1.exe' + cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc') + + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + PdhConnectMachineA(NULL); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-lpdh', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), + (1, 'pdh.dll is not in ALLOWED_LIBRARIES!\n' + + executable + ': failed DYNAMIC_LIBRARIES')) + + source = 'test2.c' + executable = 'test2.exe' + + with open(source, 'w', encoding="utf8") as f: + f.write(''' + int main() + { + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,--major-subsystem-version', '-Wl,9', '-Wl,--minor-subsystem-version', '-Wl,9']), + (1, executable + ': failed SUBSYSTEM_VERSION')) + + source = 'test3.c' + executable = 'test3.exe' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + CoFreeUnusedLibrariesEx(0,0); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-lole32', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), (0, '')) + if __name__ == '__main__': unittest.main() - diff --git a/contrib/devtools/utils.py b/contrib/devtools/utils.py new file mode 100755 index 0000000000000..68ad1c3aba191 --- /dev/null +++ b/contrib/devtools/utils.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Common utility functions +''' +import shutil +import sys +import os +from typing import List + + +def determine_wellknown_cmd(envvar, progname) -> List[str]: + maybe_env = os.getenv(envvar) + maybe_which = shutil.which(progname) + if maybe_env: + return maybe_env.split(' ') # Well-known vars are often meant to be word-split + elif maybe_which: + return [ maybe_which ] + else: + sys.exit(f"{progname} not found") diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py index b45abecb34ee3..348c99039863a 100755 --- a/contrib/gitian-build.py +++ b/contrib/gitian-build.py @@ -74,8 +74,8 @@ def build(): if args.macos: print('\nCompiling ' + args.version + ' MacOS') - subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/depends-sources/sdks/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz']) - subprocess.check_output(["echo 'be17f48fd0b08fb4dcd229f55a6ae48d9f781d210839b4ea313ef17dd12d6ea5 inputs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz' | sha256sum -c"], shell=True) + subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/depends-sources/sdks/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz']) + subprocess.check_output(["echo 'df75d30ecafc429e905134333aeae56ac65fac67cb4182622398fd717df77619 inputs/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz' | sha256sum -c"], shell=True) subprocess.check_call(['bin/gbuild', '--fetch-tags', '-j', args.jobs, '-m', args.memory, '--commit', 'dash='+args.commit, '--url', 'dash='+args.url, '../dash/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../dash/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call('mv build/out/dashcore-*-osx-unsigned.tar.gz inputs/', shell=True) @@ -217,6 +217,11 @@ def main(): args.windows = 'w' in args.os args.macos = 'm' in args.os + # Disable for MacOS if no SDK found + if args.macos and not os.path.isfile('gitian-builder/inputs/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz'): + print('Cannot build for MacOS, SDK does not exist. Will build for other OSes') + args.macos = False + args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign' if not args.signer: diff --git a/contrib/gitian-descriptors/README.md b/contrib/gitian-descriptors/README.md index f0dc1b4efa604..532ab6bf5c59f 100644 --- a/contrib/gitian-descriptors/README.md +++ b/contrib/gitian-descriptors/README.md @@ -26,7 +26,7 @@ Once you've got the right hardware and software: # Create base images cd gitian-builder - bin/make-base-vm --suite bionic --arch amd64 + bin/make-base-vm --suite focal --arch amd64 cd .. # Get inputs (see doc/release-process.md for exact inputs needed and where to get them) diff --git a/contrib/gitian-descriptors/assign_DISTNAME b/contrib/gitian-descriptors/assign_DISTNAME index eef36882df848..515802876a887 100755 --- a/contrib/gitian-descriptors/assign_DISTNAME +++ b/contrib/gitian-descriptors/assign_DISTNAME @@ -4,7 +4,7 @@ # # A helper script to be sourced into the gitian descriptors -if RECENT_TAG="$(git describe --exact-match HEAD)"; then +if RECENT_TAG="$(git describe --exact-match HEAD 2> /dev/null)"; then VERSION="${RECENT_TAG#v}" else VERSION="$(git rev-parse --short=12 HEAD)" diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml index 809e0d5870249..4f0b3253aee35 100755 --- a/contrib/gitian-descriptors/gitian-linux.yml +++ b/contrib/gitian-descriptors/gitian-linux.yml @@ -1,5 +1,5 @@ --- -name: "dash-linux-19" +name: "dash-linux-20" enable_cache: true distro: "ubuntu" suites: @@ -23,6 +23,7 @@ packages: - "patch" - "pkg-config" - "python3" +- "python3-pip" - "libxkbcommon0" - "ccache" # Cross compilation HOSTS: @@ -41,7 +42,7 @@ script: | WRAP_DIR=$HOME/wrapped HOSTS="x86_64-linux-gnu aarch64-linux-gnu riscv64-linux-gnu" - CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests --enable-crash-hooks" + CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary --enable-crash-hooks" FAKETIME_HOST_PROGS="gcc g++" FAKETIME_PROGS="date ar ranlib nm" HOST_CFLAGS="-O2 -g" @@ -85,7 +86,7 @@ script: | echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog} echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} + echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${prog} chmod +x ${WRAP_DIR}/${prog} touch -d "${REFERENCE_DATETIME}" ${WRAP_DIR}/${prog} done @@ -101,7 +102,7 @@ script: | echo "REAL=\`which -a ${i}-${prog}-8 | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog} echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \"\$@\"" >> $WRAP_DIR/${i}-${prog} + echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${i}-${prog} chmod +x ${WRAP_DIR}/${i}-${prog} touch -d "${REFERENCE_DATETIME}" ${WRAP_DIR}/${i}-${prog} fi @@ -109,6 +110,8 @@ script: | done } + pip3 install lief==0.12.1 + # Faketime for depends so intermediate results are comparable export PATH_orig=${PATH} create_global_faketime_wrappers "2000-01-01 12:00:00" @@ -142,7 +145,7 @@ script: | # Extract the git archive into a dir for each host and build for i in ${HOSTS}; do export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - if [ "${i}" = "powerpc64-linux-gnu" ]; then + if [ "${i}" = "powerpc64-linux-gnu" ] || [ "${i}" = "powerpc64le-linux-gnu" ]; then # Workaround for https://bugs.launchpad.net/ubuntu/+source/gcc-8-cross-ports/+bug/1853740 # TODO: remove this when no longer needed HOST_LDFLAGS="${HOST_LDFLAGS_BASE} -Wl,-z,noexecstack" diff --git a/contrib/gitian-descriptors/gitian-osx-signer.yml b/contrib/gitian-descriptors/gitian-osx-signer.yml index 886d6553c0928..6615e0369a26d 100644 --- a/contrib/gitian-descriptors/gitian-osx-signer.yml +++ b/contrib/gitian-descriptors/gitian-osx-signer.yml @@ -31,7 +31,7 @@ script: | echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog} echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} + echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${prog} chmod +x ${WRAP_DIR}/${prog} done diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml index 0a2102887fe40..b6cda7ac8952e 100644 --- a/contrib/gitian-descriptors/gitian-osx.yml +++ b/contrib/gitian-descriptors/gitian-osx.yml @@ -1,5 +1,5 @@ --- -name: "dash-osx-19" +name: "dash-osx-20" enable_cache: true distro: "ubuntu" suites: @@ -13,19 +13,17 @@ packages: - "git" - "pkg-config" - "autoconf" -- "librsvg2-bin" -- "libtiff-tools" - "libtool" - "automake" - "faketime" - "bsdmainutils" -- "imagemagick" - "libcap-dev" - "libz-dev" - "libbz2-dev" - "python3" - "python3-dev" - "python3-setuptools" +- "python3-pip" - "fonts-tuffy" - "ccache" - "cmake" @@ -35,13 +33,13 @@ remotes: - "url": "https://github.com/dashpay/dash.git" "dir": "dash" files: -- "Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz" +- "Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz" script: | set -e -o pipefail WRAP_DIR=$HOME/wrapped - HOSTS="x86_64-apple-darwin19" - CONFIGFLAGS="--enable-reduce-exports --disable-miner --disable-bench --disable-gui-tests XORRISOFS=${WRAP_DIR}/xorrisofs DMG=${WRAP_DIR}/dmg --enable-crash-hooks" + HOSTS="x86_64-apple-darwin" + CONFIGFLAGS="--enable-reduce-exports --disable-miner --disable-bench --disable-gui-tests --disable-fuzz-binary XORRISOFS=${WRAP_DIR}/xorrisofs DMG=${WRAP_DIR}/dmg --enable-crash-hooks" FAKETIME_HOST_PROGS="" FAKETIME_PROGS="ar ranlib date dmg xorrisofs" @@ -78,7 +76,7 @@ script: | echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog} echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} + echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${prog} chmod +x ${WRAP_DIR}/${prog} touch -d "${REFERENCE_DATETIME}" ${WRAP_DIR}/${prog} done @@ -91,13 +89,15 @@ script: | echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog} echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} + echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${i}-${prog} chmod +x ${WRAP_DIR}/${i}-${prog} touch -d "${REFERENCE_DATETIME}" ${WRAP_DIR}/${i}-${prog} done done } + pip3 install lief==0.12.1 + # Faketime for depends so intermediate results are comparable export PATH_orig=${PATH} create_global_faketime_wrappers "2000-01-01 12:00:00" @@ -108,7 +108,7 @@ script: | BASEPREFIX="${PWD}/depends" mkdir -p ${BASEPREFIX}/SDKs - tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz + tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz # Build dependencies for each host for i in $HOSTS; do @@ -172,8 +172,8 @@ script: | find ${DISTNAME} -path '*.dSYM*' | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}-debug.tar.gz cd ../../ done - mv ${OUTDIR}/${DISTNAME}-x86_64-apple-darwin19.tar.gz ${OUTDIR}/${DISTNAME}-osx64.tar.gz - mv ${OUTDIR}/${DISTNAME}-x86_64-apple-darwin19-debug.tar.gz ${OUTDIR}/${DISTNAME}-osx64-debug.tar.gz + mv ${OUTDIR}/${DISTNAME}-x86_64-apple-darwin.tar.gz ${OUTDIR}/${DISTNAME}-osx64.tar.gz + mv ${OUTDIR}/${DISTNAME}-x86_64-apple-darwin-debug.tar.gz ${OUTDIR}/${DISTNAME}-osx64-debug.tar.gz # Compress ccache (otherwise the assert file will get too huge) if [ "$CCACHE_DIR" != "" ]; then diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml index cbf76eb5097e9..e3154a246ee39 100755 --- a/contrib/gitian-descriptors/gitian-win.yml +++ b/contrib/gitian-descriptors/gitian-win.yml @@ -1,5 +1,5 @@ --- -name: "dash-win-19" +name: "dash-win-20" enable_cache: true distro: "ubuntu" suites: @@ -22,6 +22,7 @@ packages: - "zip" - "ca-certificates" - "python3" +- "python3-pip" - "ccache" remotes: - "url": "https://github.com/dashpay/dash.git" @@ -32,7 +33,7 @@ script: | WRAP_DIR=$HOME/wrapped HOSTS="x86_64-w64-mingw32" - CONFIGFLAGS="--enable-reduce-exports --disable-miner --disable-bench --disable-gui-tests --enable-crash-hooks" + CONFIGFLAGS="--enable-reduce-exports --disable-miner --disable-bench --disable-gui-tests --disable-fuzz-binary --enable-crash-hooks" FAKETIME_HOST_PROGS="ar ranlib nm windres strip objcopy" FAKETIME_PROGS="date makensis zip" HOST_CFLAGS="-O2 -g -fno-ident" @@ -75,7 +76,7 @@ script: | echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog} echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} + echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${prog} chmod +x ${WRAP_DIR}/${prog} touch -d "${REFERENCE_DATETIME}" ${WRAP_DIR}/${prog} done @@ -89,7 +90,7 @@ script: | echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog} echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} + echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${i}-${prog} chmod +x ${WRAP_DIR}/${i}-${prog} touch -d "${REFERENCE_DATETIME}" ${WRAP_DIR}/${i}-${prog} done @@ -107,13 +108,15 @@ script: | echo "# $(${prog} --version | head -1)" >> ${WRAP_DIR}/${i}-${prog} echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog} echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \"\$@\"" >> $WRAP_DIR/${i}-${prog} + echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${i}-${prog} chmod +x ${WRAP_DIR}/${i}-${prog} touch -d "${REFERENCE_DATETIME}" ${WRAP_DIR}/${i}-${prog} done done } + pip3 install lief==0.12.1 + # Faketime for depends so intermediate results are comparable export PATH_orig=${PATH} create_global_faketime_wrappers "2000-01-01 12:00:00" @@ -159,6 +162,7 @@ script: | CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" make ${MAKEOPTS} make ${MAKEOPTS} -C src check-security + make ${MAKEOPTS} -C src check-symbols make deploy BITCOIN_WIN_INSTALLER="${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" make install DESTDIR=${INSTALLPATH} cd installed diff --git a/contrib/guix/Dockerfile b/contrib/guix/Dockerfile new file mode 100644 index 0000000000000..7705ffa7a4e95 --- /dev/null +++ b/contrib/guix/Dockerfile @@ -0,0 +1,63 @@ +FROM alpine:3.17 + +RUN apk --no-cache --update add \ + bash \ + bzip2 \ + ca-certificates \ + curl \ + git \ + make \ + shadow + +ARG guix_download_path=ftp://ftp.gnu.org/gnu/guix +ARG guix_version=1.4.0 +ARG guix_checksum_aarch64=72d807392889919940b7ec9632c45a259555e6b0942ea7bfd131101e08ebfcf4 +ARG guix_checksum_x86_64=236ca7c9c5958b1f396c2924fcc5bc9d6fdebcb1b4cf3c7c6d46d4bf660ed9c9 +ARG builder_count=32 + +ENV PATH /root/.config/guix/current/bin:$PATH + +# Application Setup +# https://guix.gnu.org/manual/en/html_node/Application-Setup.html +ENV GUIX_LOCPATH /root/.guix-profile/lib/locale +ENV LC_ALL en_US.UTF-8 + +RUN guix_file_name=guix-binary-${guix_version}.$(uname -m)-linux.tar.xz && \ + eval "guix_checksum=\${guix_checksum_$(uname -m)}" && \ + cd /tmp && \ + wget -q -O "$guix_file_name" "${guix_download_path}/${guix_file_name}" && \ + echo "${guix_checksum} ${guix_file_name}" | sha256sum -c && \ + tar xJf "$guix_file_name" && \ + mv var/guix /var/ && \ + mv gnu / && \ + mkdir -p ~root/.config/guix && \ + ln -sf /var/guix/profiles/per-user/root/current-guix ~root/.config/guix/current && \ + source ~root/.config/guix/current/etc/profile + +# Guix expects this file to exist +RUN touch /etc/nsswitch.conf + +RUN guix archive --authorize < ~root/.config/guix/current/share/guix/ci.guix.gnu.org.pub + +# Build Environment Setup +# https://guix.gnu.org/manual/en/html_node/Build-Environment-Setup.html + +RUN groupadd --system guixbuild +RUN for i in $(seq -w 1 ${builder_count}); do \ + useradd -g guixbuild -G guixbuild \ + -d /var/empty -s $(which nologin) \ + -c "Guix build user ${i}" --system \ + "guixbuilder${i}" ; \ + done + +ENTRYPOINT ["/root/.config/guix/current/bin/guix-daemon","--build-users-group=guixbuild"] + +RUN git clone https://github.com/dashpay/dash.git /dash + +RUN mkdir base_cache sources SDKs + +WORKDIR /dash + +RUN mkdir -p depends/SDKs && \ + curl -L https://bitcoincore.org/depends-sources/sdks/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz | tar -xz -C depends/SDKs + diff --git a/contrib/guix/INSTALL.md b/contrib/guix/INSTALL.md new file mode 100644 index 0000000000000..a50b80f35719b --- /dev/null +++ b/contrib/guix/INSTALL.md @@ -0,0 +1,801 @@ +# Guix Installation and Setup + +This only needs to be done once per machine. If you have already completed the +installation and setup, please proceed to [perform a build](./README.md). + +Otherwise, you may choose from one of the following options to install Guix: + +1. Using the official **shell installer script** [⤓ skip to section][install-script] + - Maintained by Guix developers + - Easiest (automatically performs *most* setup) + - Works on nearly all Linux distributions + - Only installs latest release + - Binary installation only, requires high level of trust + - Note: The script needs to be run as root, so it should be inspected before it's run +2. Using the official **binary tarball** [⤓ skip to section][install-bin-tarball] + - Maintained by Guix developers + - Normal difficulty (full manual setup required) + - Works on nearly all Linux distributions + - Installs any release + - Binary installation only, requires high level of trust +3. Using **Docker image** [↗︎ external instructions][install-docker] + - Maintained by pastapastapasta + - Easy (automatically performs *some* setup) + - Works wherever Docker images work + - Installs any release + - Binary installation only, requires high level of trust +4. Using a **distribution-maintained package** [⤓ skip to section][install-distro-pkg] + - Maintained by distribution's Guix package maintainer + - Normal difficulty (manual setup required) + - Works only on distributions with Guix packaged, see: https://repology.org/project/guix/versions + - Installs a release decided on by package maintainer + - Source or binary installation depending on the distribution +5. Building **from source** [⤓ skip to section][install-source] + - Maintained by you + - Hard, but rewarding + - Can be made to work on most Linux distributions + - Installs any commit (more granular) + - Source installation, requires lower level of trust + +## Options 1 and 2: Using the official shell installer script or binary tarball + +The installation instructions for both the official shell installer script and +the binary tarballs can be found in the GNU Guix Manual's [Binary Installation +section](https://guix.gnu.org/manual/en/html_node/Binary-Installation.html). + +Note that running through the binary tarball installation steps is largely +equivalent to manually performing what the shell installer script does. + +Note that at the time of writing (July 5th, 2021), the shell installer script +automatically creates an `/etc/profile.d` entry which the binary tarball +installation instructions do not ask you to create. However, you will likely +need this entry for better desktop integration. Please see [this +section](#add-an-etcprofiled-entry) for instructions on how to add a +`/etc/profile.d/guix.sh` entry. + +Regardless of which installation option you chose, the changes to +`/etc/profile.d` will not take effect until the next shell or desktop session, +so you should log out and log back in. + +## Option 3: Using Docker image + +Please refer to Docker's image +[here](https://github.com/dashpay/dash/tree/master/contrib/guix/Dockerfile). + +Note that the `Dockerfile` is largely equivalent to running through the binary +tarball installation steps. + +## Option 4: Using a distribution-maintained package + +Note that this section is based on the distro packaging situation at the time of +writing (July 2021). Guix is expected to be more widely packaged over time. For +an up-to-date view on Guix's package status/version across distros, please see: +https://repology.org/project/guix/versions + +### Debian 11 (Bullseye)/Ubuntu 21.04 (Hirsute Hippo) + +Guix v1.2.0 is available as a distribution package starting in [Debian +11](https://packages.debian.org/bullseye/guix) and [Ubuntu +21.04](https://packages.ubuntu.com/hirsute/guix). + +Note that if you intend on using Guix without using any substitutes (more +details [here][security-model]), v1.2.0 has a known problem when building GnuTLS +from source. Solutions and workarounds are documented +[here](#gnutls-test-suite-fail-status-request-revoked). + + +To install: +```sh +sudo apt install guix +``` + +For up-to-date information on Debian and Ubuntu's release history: +- [Debian release history](https://www.debian.org/releases/) +- [Ubuntu release history](https://ubuntu.com/about/release-cycle) + +### Arch Linux + +Guix is available in the AUR as +[`guix`](https://aur.archlinux.org/packages/guix/), please follow the +installation instructions in the Arch Linux Wiki ([live +link](https://wiki.archlinux.org/index.php/Guix#AUR_Package_Installation), +[2021/03/30 +permalink](https://wiki.archlinux.org/index.php?title=Guix&oldid=637559#AUR_Package_Installation)) +to install Guix. + +At the time of writing (2021/03/30), the `check` phase will fail if the path to +guix's build directory is longer than 36 characters due to an anachronistic +character limit on the shebang line. Since the `check` phase happens after the +`build` phase, which may take quite a long time, it is recommended that users +either: + +1. Skip the `check` phase + - For `makepkg`: `makepkg --nocheck ...` + - For `yay`: `yay --mflags="--nocheck" ...` + - For `paru`: `paru --nocheck ...` +2. Or, check their build directory's length beforehand + - For those building with `makepkg`: `pwd | wc -c` + +## Option 5: Building from source + +Building Guix from source is a rather involved process but a rewarding one for +those looking to minimize trust and maximize customizability (e.g. building a +particular commit of Guix). Previous experience with using autotools-style build +systems to build packages from source will be helpful. *hic sunt dracones.* + +I strongly urge you to at least skim through the entire section once before you +start issuing commands, as it will save you a lot of unnecessary pain and +anguish. + +### Installing common build tools + +There are a few basic build tools that are required for most things we'll build, +so let's install them now: + +Text transformation/i18n: +- `autopoint` (sometimes packaged in `gettext`) +- `help2man` +- `po4a` +- `texinfo` + +Build system tools: +- `g++` w/ C++11 support +- `libtool` +- `autoconf` +- `automake` +- `pkg-config` (sometimes packaged as `pkgconf`) +- `make` +- `cmake` + +Miscellaneous: +- `git` +- `gnupg` +- `python3` + +### Building and Installing Guix's dependencies + +In order to build Guix itself from source, we need to first make sure that the +necessary dependencies are installed and discoverable. The most up-to-date list +of Guix's dependencies is kept in the ["Requirements" +section](https://guix.gnu.org/manual/en/html_node/Requirements.html) of the Guix +Reference Manual. + +Depending on your distribution, most or all of these dependencies may already be +packaged and installable without manually building and installing. + +For reference, the graphic below outlines Guix v1.3.0's dependency graph: + +![bootstrap map](https://user-images.githubusercontent.com/6399679/125064185-a9a59880-e0b0-11eb-82c1-9b8e5dc9950d.png) + +#### Guile + +##### Choosing a Guile version and sticking to it + +One of the first things you need to decide is which Guile version you want to +use: Guile v2.2 or Guile v3.0. Unlike the python2 to python3 transition, Guile +v2.2 and Guile v3.0 are largely compatible, as evidenced by the fact that most +Guile packages and even [Guix +itself](https://guix.gnu.org/en/blog/2020/guile-3-and-guix/) support running on +both. + +What is important here is that you **choose one**, and you **remain consistent** +with your choice throughout **all Guile-related packages**, no matter if they +are installed via the distribution's package manager or installed from source. +This is because the files for Guile packages are installed to directories which +are separated based on the Guile version. + +###### Example: Checking that Ubuntu's `guile-git` is compatible with your chosen Guile version + +On Ubuntu Focal: + +```sh +$ apt show guile-git +Package: guile-git +... +Depends: guile-2.2, guile-bytestructures, libgit2-dev +... +``` + +As you can see, the package `guile-git` depends on `guile-2.2`, meaning that it +was likely built for Guile v2.2. This means that if you decided to use Guile +v3.0 on Ubuntu Focal, you would need to build guile-git from source instead of +using the distribution package. + +On Ubuntu Hirsute: + +```sh +$ apt show guile-git +Package: guile-git +... +Depends: guile-3.0 | guile-2.2, guile-bytestructures (>= 1.0.7-3~), libgit2-dev (>= 1.0) +... +``` + +In this case, `guile-git` depends on either `guile-3.0` or `guile-2.2`, meaning +that it would work no matter what Guile version you decided to use. + +###### Corner case: Multiple versions of Guile on one system + +It is recommended to only install one version of Guile, so that build systems do +not get confused about which Guile to use. + +However, if you insist on having both Guile v2.2 and Guile v3.0 installed on +your system, then you need to **consistently** specify one of +`GUILE_EFFECTIVE_VERSION=3.0` or `GUILE_EFFECTIVE_VERSION=2.2` to all +`./configure` invocations for Guix and its dependencies. + +##### Installing Guile + +Guile is most likely already packaged for your distribution, so after you have +[chosen a Guile version](#choosing-a-guile-version-and-sticking-to-it), install +it via your distribution's package manager. + +If your distribution splits packages into `-dev`-suffixed and +non-`-dev`-suffixed sub-packages (as is the case for Debian-derived +distributions), please make sure to install both. For example, to install Guile +v2.2 on Debian/Ubuntu: + +```sh +apt install guile-2.2 guile-2.2-dev +``` + +#### Mixing distribution packages and source-built packages + +At the time of writing, most distributions have _some_ of Guix's dependencies +packaged, but not all. This means that you may want to install the distribution +package for some dependencies, and manually build-from-source for others. + +Distribution packages usually install to `/usr`, which is different from the +default `./configure` prefix of source-built packages: `/usr/local`. + +This means that if you mix-and-match distribution packages and source-built +packages and do not specify exactly `--prefix=/usr` to `./configure` for +source-built packages, you will need to augment the `GUILE_LOAD_PATH` and +`GUILE_LOAD_COMPILED_PATH` environment variables so that Guile will look +under the right prefix and find your source-built packages. + +For example, if you are using Guile v2.2, and have Guile packages in the +`/usr/local` prefix, either add the following lines to your `.profile` or +`.bash_profile` so that the environment variable is properly set for all future +shell logins, or paste the lines into a POSIX-style shell to temporarily modify +the environment variables of your current shell session. + +```sh +# Help Guile v2.2.x find packages in /usr/local +export GUILE_LOAD_PATH="/usr/local/share/guile/site/2.2${GUILE_LOAD_PATH:+:}$GUILE_LOAD_PATH" +export GUILE_LOAD_COMPILED_PATH="/usr/local/lib/guile/2.2/site-ccache${GUILE_LOAD_COMPILED_PATH:+:}$GUILE_COMPILED_LOAD_PATH" +``` + +Note that these environment variables are used to check for packages during +`./configure`, so they should be set as soon as possible should you want to use +a prefix other than `/usr`. + +#### Building and installing source-built packages + +***IMPORTANT**: A few dependencies have non-obvious quirks/errata which are +documented in the sub-sections immediately below. Please read these sections +before proceeding to build and install these packages.* + +Although you should always refer to the README or INSTALL files for the most +accurate information, most of these dependencies use autoconf-style build +systems (check if there's a `configure.ac` file), and will likely do the right +thing with the following: + +Clone the repository and check out the latest release: +```sh +git clone /.git +cd +git tag -l # check for the latest release +git checkout +``` + +For autoconf-based build systems (if `./autogen.sh` or `configure.ac` exists at +the root of the repository): + +```sh +./autogen.sh || autoreconf -vfi +./configure --prefix= +make +sudo make install +``` + +For CMake-based build systems (if `CMakeLists.txt` exists at the root of the +repository): + +```sh +mkdir build && cd build +cmake .. -DCMAKE_INSTALL_PREFIX= +sudo cmake --build . --target install +``` + +If you choose not to specify exactly `--prefix=/usr` to `./configure`, please +make sure you've carefully read the [previous section] on mixing distribution +packages and source-built packages. + +##### Binding packages require `-dev`-suffixed packages + +Relevant for: +- Everyone + +When building bindings, the `-dev`-suffixed version of the original package +needs to be installed. For example, building `Guile-zlib` on Debian-derived +distributions requires that `zlib1g-dev` is installed. + +When using bindings, the `-dev`-suffixed version of the original package still +needs to be installed. This is particularly problematic when distribution +packages are mispackaged like `guile-sqlite3` is in Ubuntu Focal such that +installing `guile-sqlite3` does not automatically install `libsqlite3-dev` as a +dependency. + +Below is a list of relevant Guile bindings and their corresponding `-dev` +packages in Debian at the time of writing. + +| Guile binding package | -dev Debian package | +|-----------------------|---------------------| +| guile-gcrypt | libgcrypt-dev | +| guile-git | libgit2-dev | +| guile-lzlib | liblz-dev | +| guile-ssh | libssh-dev | +| guile-sqlite3 | libsqlite3-dev | +| guile-zlib | zlib1g-dev | + +##### `guile-git` actually depends on `libgit2 >= 1.1` + +Relevant for: +- Those building `guile-git` from source against `libgit2 < 1.1` +- Those installing `guile-git` from their distribution where `guile-git` is + built against `libgit2 < 1.1` + +As of v0.4.0, `guile-git` claims to only require `libgit2 >= 0.28.0`, however, +it actually requires `libgit2 >= 1.1`, otherwise, it will be confused by a +reference of `origin/keyring`: instead of interpreting the reference as "the +'keyring' branch of the 'origin' remote", the reference is interpreted as "the +branch literally named 'origin/keyring'" + +This is especially notable because Ubuntu Focal packages `libgit2 v0.28.4`, and +`guile-git` is built against it. + +Should you be in this situation, you need to build both `libgit2 v1.1.x` and +`guile-git` from source. + +Source: http://logs.guix.gnu.org/guix/2020-11-12.log#232527 + +##### `{scheme,guile}-bytestructures` v1.0.8 and v1.0.9 are broken for Guile v2.2 + +Relevant for: +- Those building `{scheme,guile}-bytestructures` from source against Guile v2.2 + +Commit +[707eea3](https://github.com/TaylanUB/scheme-bytestructures/commit/707eea3a85e1e375e86702229ebf73d496377669) +introduced a regression for Guile v2.2 and was first included in v1.0.8, this +was later corrected in commit +[ec9a721](https://github.com/TaylanUB/scheme-bytestructures/commit/ec9a721957c17bcda13148f8faa5f06934431ff7) +and included in v1.1.0. + +TL;DR If you decided to use Guile v2.2, do not use `{scheme,guile}-bytestructures` v1.0.8 or v1.0.9. + +### Building and Installing Guix itself + +Start by cloning Guix: + +``` +git clone https://git.savannah.gnu.org/git/guix.git +cd guix +``` + +You will likely want to build the latest release, however, if the latest release +when you're reading this is still 1.2.0 then you may want to use 95aca29 instead +to avoid a problem in the GnuTLS test suite. + +``` +git branch -a -l 'origin/version-*' # check for the latest release +git checkout +``` + +Bootstrap the build system: +``` +./bootstrap +``` + +Configure with the recommended `--localstatedir` flag: +``` +./configure --localstatedir=/var +``` + +Note: If you intend to hack on Guix in the future, you will need to supply the +same `--localstatedir=` flag for all future Guix `./configure` invocations. See +the last paragraph of this +[section](https://guix.gnu.org/manual/en/html_node/Requirements.html) for more +details. + +Build Guix (this will take a while): +``` +make -j$(nproc) +``` + +Install Guix: + +``` +sudo make install +``` + +### Post-"build from source" Setup + +#### Creating and starting a `guix-daemon-original` service with a fixed `argv[0]` + +At this point, guix will be installed to `${bindir}`, which is likely +`/usr/local/bin` if you did not override directory variables at +`./configure`-time. More information on standard Automake directory variables +can be found +[here](https://www.gnu.org/software/automake/manual/html_node/Standard-Directory-Variables.html). + +However, the Guix init scripts and service configurations for Upstart, systemd, +SysV, and OpenRC are installed (in `${libdir}`) to launch +`${localstatedir}/guix/profiles/per-user/root/current-guix/bin/guix-daemon`, +which does not yet exist, and will only exist after [`root` performs their first +`guix pull`](#guix-pull-as-root). + +We need to create a `-original` version of these init scripts that's pointed to +the binaries we just built and `make install`'ed in `${bindir}` (normally, +`/usr/local/bin`). + +Example for `systemd`, run as `root`: + +```sh +# Create guix-daemon-original.service by modifying guix-daemon.service +libdir=# set according to your PREFIX (default is /usr/local/lib) +bindir="$(dirname $(command -v guix-daemon))" +sed -E -e "s|/\S*/guix/profiles/per-user/root/current-guix/bin/guix-daemon|${bindir}/guix-daemon|" "${libdir}"/systemd/system/guix-daemon.service > /etc/systemd/system/guix-daemon-original.service +chmod 664 /etc/systemd/system/guix-daemon-original.service + +# Make systemd recognize the new service +systemctl daemon-reload + +# Make sure that the non-working guix-daemon.service is stopped and disabled +systemctl stop guix-daemon +systemctl disable guix-daemon + +# Make sure that the working guix-daemon-original.service is started and enabled +systemctl enable guix-daemon-original +systemctl start guix-daemon-original +``` + +#### Creating `guix-daemon` users / groups + +Please see the [relevant +section](https://guix.gnu.org/manual/en/html_node/Build-Environment-Setup.html) +in the Guix Reference Manual for more details. + +## Optional setup + +At this point, you are set up to [use Guix to build Dash +Core](./README.md#usage). However, if you want to polish your setup a bit and +make it "what Guix intended", then read the next few subsections. + +### Add an `/etc/profile.d` entry + +This section definitely does not apply to you if you installed Guix using: +1. The shell installer script +2. Docker image +3. Debian's `guix` package + +#### Background + +Although Guix knows how to update itself and its packages, it does so in a +non-invasive way (it does not modify `/usr/local/bin/guix`). + +Instead, it does the following: + +- After a `guix pull`, it updates + `/var/guix/profiles/per-user/$USER/current-guix`, and creates a symlink + targeting this directory at `$HOME/.config/guix/current` + +- After a `guix install`, it updates + `/var/guix/profiles/per-user/$USER/guix-profile`, and creates a symlink + targeting this directory at `$HOME/.guix-profile` + +Therefore, in order for these operations to affect your shell/desktop sessions +(and for the principle of least astonishment to hold), their corresponding +directories have to be added to well-known environment variables like `$PATH`, +`$INFOPATH`, `$XDG_DATA_DIRS`, etc. + +In other words, if `$HOME/.config/guix/current/bin` does not exist in your +`$PATH`, a `guix pull` will have no effect on what `guix` you are using. Same +goes for `$HOME/.guix-profile/bin`, `guix install`, and installed packages. + +Helpfully, after a `guix pull` or `guix install`, a message will be printed like +so: + +``` +hint: Consider setting the necessary environment variables by running: + + GUIX_PROFILE="$HOME/.guix-profile" + . "$GUIX_PROFILE/etc/profile" + +Alternately, see `guix package --search-paths -p "$HOME/.guix-profile"'. +``` + +However, this is somewhat tedious to do for both `guix pull` and `guix install` +for each user on the system that wants to properly use `guix`. I recommend that +you instead add an entry to `/etc/profile.d` instead. This is done by default +when installing the Debian package later than 1.2.0-4 and when using the shell +script installer. + +#### Instructions + +Create `/etc/profile.d/guix.sh` with the following content: +```sh +# _GUIX_PROFILE: `guix pull` profile +_GUIX_PROFILE="$HOME/.config/guix/current" +if [ -L $_GUIX_PROFILE ]; then + export PATH="$_GUIX_PROFILE/bin${PATH:+:}$PATH" + # Export INFOPATH so that the updated info pages can be found + # and read by both /usr/bin/info and/or $GUIX_PROFILE/bin/info + # When INFOPATH is unset, add a trailing colon so that Emacs + # searches 'Info-default-directory-list'. + export INFOPATH="$_GUIX_PROFILE/share/info:$INFOPATH" +fi + +# GUIX_PROFILE: User's default profile +GUIX_PROFILE="$HOME/.guix-profile" +[ -L $GUIX_PROFILE ] || return +GUIX_LOCPATH="$GUIX_PROFILE/lib/locale" +export GUIX_PROFILE GUIX_LOCPATH + +[ -f "$GUIX_PROFILE/etc/profile" ] && . "$GUIX_PROFILE/etc/profile" + +# set XDG_DATA_DIRS to include Guix installations +export XDG_DATA_DIRS="$GUIX_PROFILE/share:${XDG_DATA_DIRS:-/usr/local/share/:/usr/share/}" +``` + +Please note that this will not take effect until the next shell or desktop +session (log out and log back in). + +### `guix pull` as root + +Before you do this, you need to read the section on [choosing your security +model][security-model] and adjust `guix` and `guix-daemon` flags according to +your choice, as invoking `guix pull` may pull substitutes from substitute +servers (which you may not want). + +As mentioned in a previous section, Guix expects +`${localstatedir}/guix/profiles/per-user/root/current-guix` to be populated with +`root`'s Guix profile, `guix pull`-ed and built by some former version of Guix. +However, this is not the case when we build from source. Therefore, we need to +perform a `guix pull` as `root`: + +```sh +sudo --login guix pull --branch=version- +# or +sudo --login guix pull --commit= +``` + +`guix pull` is quite a long process (especially if you're using +`--no-substitute`). If you encounter build problems, please refer to the +[troubleshooting section](#troubleshooting). + +Note that running a bare `guix pull` with no commit or branch specified will +pull the latest commit on Guix's master branch, which is likely fine, but not +recommended. + +If you installed Guix from source, you may get an error like the following: +```sh +error: while creating symlink '/root/.config/guix/current' No such file or directory +``` +To resolve this, simply: +``` +sudo mkdir -p /root/.config/guix +``` +Then try the `guix pull` command again. + +After the `guix pull` finishes successfully, +`${localstatedir}/guix/profiles/per-user/root/current-guix` should be populated. + +#### Using the newly-pulled `guix` by restarting the daemon + +Depending on how you installed Guix, you should now make sure that your init +scripts and service configurations point to the newly-pulled `guix-daemon`. + +##### If you built Guix from source + +If you followed the instructions for [fixing argv\[0\]][fix-argv0], you can now +do the following: + +```sh +systemctl stop guix-daemon-original +systemctl disable guix-daemon-original + +systemctl enable guix-daemon +systemctl start guix-daemon +``` + +##### If you installed Guix via the Debian/Ubuntu distribution packages + +You will need to create a `guix-daemon-latest` service which points to the new +`guix` rather than a pinned one. + +```sh +# Create guix-daemon-latest.service by modifying guix-daemon.service +sed -E -e "s|/usr/bin/guix-daemon|/var/guix/profiles/per-user/root/current-guix/bin/guix-daemon|" /etc/systemd/system/guix-daemon.service > /lib/systemd/system/guix-daemon-latest.service +chmod 664 /lib/systemd/system/guix-daemon-latest.service + +# Make systemd recognize the new service +systemctl daemon-reload + +# Make sure that the old guix-daemon.service is stopped and disabled +systemctl stop guix-daemon +systemctl disable guix-daemon + +# Make sure that the new guix-daemon-latest.service is started and enabled +systemctl enable guix-daemon-latest +systemctl start guix-daemon-latest +``` + +##### If you installed Guix via lantw44's Arch Linux AUR package + +At the time of writing (July 5th, 2021) the systemd unit for "updated Guix" is +`guix-daemon-latest.service`, therefore, you should do the following: + +```sh +systemctl stop guix-daemon +systemctl disable guix-daemon + +systemctl enable guix-daemon-latest +systemctl start guix-daemon-latest +``` + +##### Otherwise... + +Simply do: + +```sh +systemctl restart guix-daemon +``` + +### Checking everything + +If you followed all the steps above to make your Guix setup "prim and proper," +you can check that you did everything properly by running through this +checklist. + +1. `/etc/profile.d/guix.sh` should exist and be sourced at each shell login + +2. `guix describe` should not print `guix describe: error: failed to determine + origin`, but rather something like: + + ``` + Generation 38 Feb 22 2021 16:39:31 (current) + guix f350df4 + repository URL: https://git.savannah.gnu.org/git/guix.git + branch: version-1.2.0 + commit: f350df405fbcd5b9e27e6b6aa500da7f101f41e7 + ``` + +3. `guix-daemon` should be running from `${localstatedir}/guix/profiles/per-user/root/current-guix` + +# Troubleshooting + +## Derivation failed to build + +When you see a build failure like below: + +``` +building /gnu/store/...-foo-3.6.12.drv... +/ 'check' phasenote: keeping build directory `/tmp/guix-build-foo-3.6.12.drv-0' +builder for `/gnu/store/...-foo-3.6.12.drv' failed with exit code 1 +build of /gnu/store/...-foo-3.6.12.drv failed +View build log at '/var/log/guix/drvs/../...-foo-3.6.12.drv.bz2'. +cannot build derivation `/gnu/store/...-qux-7.69.1.drv': 1 dependencies couldn't be built +cannot build derivation `/gnu/store/...-bar-3.16.5.drv': 1 dependencies couldn't be built +cannot build derivation `/gnu/store/...-baz-2.0.5.drv': 1 dependencies couldn't be built +guix time-machine: error: build of `/gnu/store/...-baz-2.0.5.drv' failed +``` + +It means that `guix` failed to build a package named `foo`, which was a +dependency of `qux`, `bar`, and `baz`. Importantly, note that the last "failed" +line is not necessarily the root cause, the first "failed" line is. + +Most of the time, the build failure is due to a spurious test failure or the +package's build system/test suite breaking when running multi-threaded. To +rebuild _just_ this derivation in a single-threaded fashion (please don't forget +to add other `guix` flags like `--no-substitutes` as appropriate): + +```sh +$ guix build --cores=1 /gnu/store/...-foo-3.6.12.drv +``` + +If the single-threaded rebuild did not succeed, you may need to dig deeper. +You may view `foo`'s build logs in `less` like so (please replace paths with the +path you see in the build failure output): + +```sh +$ bzcat /var/log/guix/drvs/../...-foo-3.6.12.drv.bz2 | less +``` + +`foo`'s build directory is also preserved and available at +`/tmp/guix-build-foo-3.6.12.drv-0`. However, if you fail to build `foo` multiple +times, it may be `/tmp/...drv-1` or `/tmp/...drv-2`. Always consult the build +failure output for the most accurate, up-to-date information. + +### python(-minimal): [Errno 84] Invalid or incomplete multibyte or wide character + +This error occurs when your `$TMPDIR` (default: /tmp) exists on a filesystem +which rejects characters not present in the UTF-8 character code set. An example +is ZFS with the utf8only=on option set. + +More information: https://bugs.python.org/issue37584 + +### GnuTLS: test-suite FAIL: status-request-revoked + +*The derivation is likely identified by: `/gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv`* + +This unfortunate error is most common for non-substitute builders who installed +Guix v1.2.0. The problem stems from the fact that one of GnuTLS's tests uses a +hardcoded certificate which expired on 2020-10-24. + +What's more unfortunate is that this GnuTLS derivation is somewhat special in +Guix's dependency graph and is not affected by the package transformation flags +like `--without-tests=`. + +The easiest solution for those encountering this problem is to install a newer +version of Guix. However, there are ways to work around this issue: + +#### Workaround 1: Using substitutes for this single derivation + +If you've authorized the official Guix build farm's key (more info +[here](./README.md#step-1-authorize-the-signing-keys)), then you can use +substitutes just for this single derivation by invoking the following: + +```sh +guix build --substitute-urls="https://ci.guix.gnu.org" /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +``` + +See [this section](./README.md#removing-authorized-keys) for instructions on how +to remove authorized keys if you don't want to keep the build farm's key +authorized. + +#### Workaround 2: Temporarily setting the system clock back + +This workaround was described [here](https://issues.guix.gnu.org/44559#5). + +Basically: +1. Turn off networking +2. Turn off NTP +3. Set system time to 2020-10-01 +4. guix build --no-substitutes /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +5. Set system time back to accurate current time +6. Turn NTP back on +7. Turn networking back on + +### coreutils: FAIL: tests/tail-2/inotify-dir-recreate + +The inotify-dir-create test fails on "remote" filesystems such as overlayfs +(Docker's default filesystem) due to the filesystem being mistakenly recognized +as non-remote. + +A relatively easy workaround to this is to make sure that a somewhat traditional +filesystem is mounted at `/tmp` (where `guix-daemon` performs its builds). For +Docker users, this might mean [using a volume][docker/volumes], [binding +mounting][docker/bind-mnt] from host, or (for those with enough RAM and swap) +[mounting a tmpfs][docker/tmpfs] using the `--tmpfs` flag. + +Please see the following links for more details: + +- An upstream coreutils bug has been filed: [debbugs#47940](https://debbugs.gnu.org/cgi/bugreport.cgi?bug=47940) +- A Guix bug detailing the underlying problem has been filed: [guix-issues#47935](https://issues.guix.gnu.org/47935) +- A commit to skip this test in Guix has been merged into the core-updates branch: +[savannah/guix@6ba1058](https://git.savannah.gnu.org/cgit/guix.git/commit/?id=6ba1058df0c4ce5611c2367531ae5c3cdc729ab4) + + +[install-script]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball +[install-bin-tarball]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball +[install-docker]: #option-3-using-docker-image +[install-distro-pkg]: #option-4-using-a-distribution-maintained-package +[install-source]: #option-5-building-from-source + +[fix-argv0]: #creating-and-starting-a-guix-daemon-original-service-with-a-fixed-argv0 +[security-model]: ./README.md#choosing-your-security-model + +[docker/volumes]: https://docs.docker.com/storage/volumes/ +[docker/bind-mnt]: https://docs.docker.com/storage/bind-mounts/ +[docker/tmpfs]: https://docs.docker.com/storage/tmpfs/ diff --git a/contrib/guix/README.md b/contrib/guix/README.md index 8ce8cb97a0d7a..93ed69d7dc54e 100644 --- a/contrib/guix/README.md +++ b/contrib/guix/README.md @@ -1,6 +1,6 @@ -# Bootstrappable Bitcoin Core Builds +# Bootstrappable Dash Core Builds -This directory contains the files necessary to perform bootstrappable Bitcoin +This directory contains the files necessary to perform bootstrappable Dash Core builds. [Bootstrappability][b17e] furthers our binary security guarantees by allowing us @@ -9,114 +9,222 @@ downloads. We achieve bootstrappability by using Guix as a functional package manager. -## Requirements +# Requirements Conservatively, a x86_64 machine with: -- 4GB of free disk space on the partition that /gnu/store will reside in -- 24GB of free disk space on the partition that the Bitcoin Core git repository - resides in +- 16GB of free disk space on the partition that /gnu/store will reside in +- 8GB of free disk space **per platform triple** you're planning on building + (see the `HOSTS` [environment variable description][env-vars-list]) -> Note: these requirements are slightly less onerous than those of Gitian builds +# Installation and Setup -## Setup +If you don't have Guix installed and set up, please follow the instructions in +[INSTALL.md](./INSTALL.md) -### Installing Guix +# Usage -If you're just testing this out, you can use the -[Dockerfile][fanquake/guix-docker] for convenience. It automatically speeds up -your builds by [using substitutes](#speeding-up-builds-with-substitute-servers). -If you don't want this behaviour, refer to the [next -section](#choosing-your-security-model). +If you haven't considered your security model yet, please read [the relevant +section](#choosing-your-security-model) before proceeding to perform a build. -Otherwise, follow the [Guix installation guide][guix/bin-install]. +## Making the Xcode SDK available for macOS cross-compilation -> Note: For those who like to keep their filesystems clean, Guix is designed to -> be very standalone and _will not_ conflict with your system's package -> manager/existing setup. It _only_ touches `/var/guix`, `/gnu`, and -> `~/.config/guix`. +In order to perform a build for macOS (which is included in the default set of +platform triples to build), you'll need to extract the macOS SDK tarball using +tools found in the [`macdeploy` directory](../macdeploy/README.md). -### Choosing your security model +You can then either point to the SDK using the `SDK_PATH` environment variable: -Guix allows us to achieve better binary security by using our CPU time to build -everything from scratch. However, it doesn't sacrifice user choice in pursuit of -this: users can decide whether or not to bootstrap and to use substitutes. +```sh +# Extract the SDK tarball to /path/to/parent/dir/of/extracted/SDK/Xcode---extracted-SDK-with-libcxx-headers +tar -C /path/to/parent/dir/of/extracted/SDK -xaf /path/to/Xcode---extracted-SDK-with-libcxx-headers.tar.gz -After installation, you may want to consider [adding substitute -servers](#speeding-up-builds-with-substitute-servers) to speed up your build if -that fits your security model (say, if you're just testing that this works). -This is skippable if you're using the [Dockerfile][fanquake/guix-docker]. +# Indicate where to locate the SDK tarball +export SDK_PATH=/path/to/parent/dir/of/extracted/SDK +``` -If you prefer not to use any substitutes, make sure to set -`ADDITIONAL_GUIX_ENVIRONMENT_FLAGS` like the following snippet. The first build -will take a while, but the resulting packages will be cached for future builds. +or extract it into `depends/SDKs`: ```sh -export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--no-substitutes' +mkdir -p depends/SDKs +tar -C depends/SDKs -xaf /path/to/SDK/tarball ``` -Likewise, to perform a bootstrapped build (takes even longer): +## Building + +*The author highly recommends at least reading over the [common usage patterns +and examples](#common-guix-build-invocation-patterns-and-examples) section below +before starting a build. For a full list of customization options, see the +[recognized environment variables][env-vars-list] section.* + +To build Dash Core reproducibly with all default options, invoke the +following from the top of a clean repository: ```sh -export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--bootstrap --no-substitutes' +./contrib/guix/guix-build ``` -### Using a version of Guix with `guix time-machine` capabilities +## Codesigning build outputs -> Note: This entire section can be skipped if you are already using a version of -> Guix that has [the `guix time-machine` command][guix/time-machine]. +The `guix-codesign` command attaches codesignatures (produced by codesigners) to +existing non-codesigned outputs. Please see the [release process +documentation](/doc/release-process.md) for more context. -Once Guix is installed, if it doesn't have the `guix time-machine` command, pull -the latest `guix`. +It respects many of the same environment variable flags as `guix-build`, with 2 +crucial differences: -```sh -guix pull --max-jobs=4 # change number of jobs accordingly +1. Since only Windows and macOS build outputs require codesigning, the `HOSTS` + environment variable will have a sane default value of `x86_64-w64-mingw32 + x86_64-apple-darwin arm64-apple-darwin` instead of all the platforms. +2. The `guix-codesign` command ***requires*** a `DETACHED_SIGS_REPO` flag. + * _**DETACHED_SIGS_REPO**_ + + Set the directory where detached codesignatures can be found for the current + Dash Core version being built. + + _REQUIRED environment variable_ + +An invocation with all default options would look like: + +``` +env DETACHED_SIGS_REPO= ./contrib/guix/guix-codesign +``` + +## Cleaning intermediate work directories + +By default, `guix-build` leaves all intermediate files or "work directories" +(e.g. `depends/work`, `guix-build-*/distsrc-*`) intact at the end of a build so +that they are available to the user (to aid in debugging, etc.). However, these +directories usually take up a large amount of disk space. Therefore, a +`guix-clean` convenience script is provided which cleans the current `git` +worktree to save disk space: + +``` +./contrib/guix/guix-clean +``` + + +## Attesting to build outputs + +Much like how Gitian build outputs are attested to in a `gitian.sigs` +repository, Guix build outputs are attested to in the [`guix.sigs` +repository](https://github.com/dashpay/guix.sigs). + +After you've cloned the `guix.sigs` repository, to attest to the current +worktree's commit/tag: + +``` +env GUIX_SIGS_REPO= SIGNER= ./contrib/guix/guix-attest ``` -Make sure that you are using your current profile. (You are prompted to do this -at the end of the `guix pull`) +See `./contrib/guix/guix-attest --help` for more information on the various ways +`guix-attest` can be invoked. -```bash -export PATH="${HOME}/.config/guix/current/bin${PATH:+:}$PATH" +## Verifying build output attestations + +After at least one other signer has uploaded their signatures to the `guix.sigs` +repository: + +``` +git -C pull +env GUIX_SIGS_REPO= ./contrib/guix/guix-verify +``` + + +## Common `guix-build` invocation patterns and examples + +### Keeping caches and SDKs outside of the worktree + +If you perform a lot of builds and have a bunch of worktrees, you may find it +more efficient to keep the depends tree's download cache, build cache, and SDKs +outside of the worktrees to avoid duplicate downloads and unnecessary builds. To +help with this situation, the `guix-build` script honours the `SOURCES_PATH`, +`BASE_CACHE`, and `SDK_PATH` environment variables and will pass them on to the +depends tree so that you can do something like: + +```sh +env SOURCES_PATH="$HOME/depends-SOURCES_PATH" BASE_CACHE="$HOME/depends-BASE_CACHE" SDK_PATH="$HOME/macOS-SDKs" ./contrib/guix/guix-build ``` -## Usage +Note that the paths that these environment variables point to **must be +directories**, and **NOT symlinks to directories**. + +See the [recognized environment variables][env-vars-list] section for more +details. -### As a Development Environment +### Building a subset of platform triples -For a Bitcoin Core depends development environment, simply invoke +Sometimes you only want to build a subset of the supported platform triples, in +which case you can override the default list by setting the space-separated +`HOSTS` environment variable: ```sh -guix environment --manifest=contrib/guix/manifest.scm +env HOSTS='x86_64-w64-mingw32 x86_64-apple-darwin' ./contrib/guix/guix-build ``` -And you'll land back in your shell with all the build dependencies required for -a `depends` build injected into your environment. +See the [recognized environment variables][env-vars-list] section for more +details. + +### Controlling the number of threads used by `guix` build commands -### As a Tool for Deterministic Builds +Depending on your system's RAM capacity, you may want to decrease the number of +threads used to decrease RAM usage or vice versa. -From the top of a clean Bitcoin Core repository: +By default, the scripts under `./contrib/guix` will invoke all `guix` build +commands with `--cores="$JOBS"`. Note that `$JOBS` defaults to `$(nproc)` if not +specified. However, astute manual readers will also notice that `guix` build +commands also accept a `--max-jobs=` flag (which defaults to 1 if unspecified). + +Here is the difference between `--cores=` and `--max-jobs=`: + +> Note: When I say "derivation," think "package" + +`--cores=` + + - controls the number of CPU cores to build each derivation. This is the value + passed to `make`'s `--jobs=` flag. + +`--max-jobs=` + + - controls how many derivations can be built in parallel + - defaults to 1 + +Therefore, the default is for `guix` build commands to build one derivation at a +time, utilizing `$JOBS` threads. + +Specifying the `$JOBS` environment variable will only modify `--cores=`, but you +can also modify the value for `--max-jobs=` by specifying +`$ADDITIONAL_GUIX_COMMON_FLAGS`. For example, if you have a LOT of memory, you +may want to set: ```sh -./contrib/guix/guix-build.sh +export ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8' ``` -After the build finishes successfully (check the status code please), compare -hashes: +Which allows for a maximum of 8 derivations to be built at the same time, each +utilizing `$JOBS` threads. + +Or, if you'd like to avoid spurious build failures caused by issues with +parallelism within a single package, but would still like to build multiple +packages when the dependency graph allows for it, you may want to try: ```sh -find output/ -type f -print0 | sort -z | xargs -r0 sha256sum +export JOBS=1 ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8' ``` -#### Recognized environment variables +See the [recognized environment variables][env-vars-list] section for more +details. + +## Recognized environment variables * _**HOSTS**_ Override the space-separated list of platform triples for which to perform a - bootstrappable build. _(defaults to "x86\_64-linux-gnu - arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu")_ + bootstrappable build. - > Windows and OS X platform triplet support are WIP. + _(defaults to "x86\_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu + powerpc64-linux-gnu powerpc64le-linux-gnu + x86\_64-w64-mingw32 x86\_64-apple-darwin arm64-apple-darwin")_ * _**SOURCES_PATH**_ @@ -124,92 +232,241 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum depends tree. Setting this to the same directory across multiple builds of the depends tree can eliminate unnecessary redownloading of package sources. -* _**MAX_JOBS**_ + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**BASE_CACHE**_ + + Set the depends tree cache for built packages. This is passed through to the + depends tree. Setting this to the same directory across multiple builds of the + depends tree can eliminate unnecessary building of packages. + + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**SDK_PATH**_ + + Set the path where _extracted_ SDKs can be found. This is passed through to + the depends tree. Note that this is should be set to the _parent_ directory of + the actual SDK (e.g. `SDK_PATH=$HOME/Downloads/macOS-SDKs` instead of + `$HOME/Downloads/macOS-SDKs/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers`). + + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**JOBS**_ - Override the maximum number of jobs to run simultaneously, you might want to - do so on a memory-limited machine. This may be passed to `make` as in `make - --jobs="$MAX_JOBS"` or `xargs` as in `xargs -P"$MAX_JOBS"`. _(defaults to the - value of `nproc` outside the container)_ + Override the number of jobs to run simultaneously, you might want to do so on + a memory-limited machine. This may be passed to: + + - `guix` build commands as in `guix environment --cores="$JOBS"` + - `make` as in `make --jobs="$JOBS"` + - `xargs` as in `xargs -P"$JOBS"` + + See [here](#controlling-the-number-of-threads-used-by-guix-build-commands) for + more details. + + _(defaults to the value of `nproc` outside the container)_ * _**SOURCE_DATE_EPOCH**_ Override the reference UNIX timestamp used for bit-for-bit reproducibility, - the variable name conforms to [standard][r12e/source-date-epoch]. _(defaults - to the output of `$(git log --format=%at -1)`)_ + the variable name conforms to [standard][r12e/source-date-epoch]. + + _(defaults to the output of `$(git log --format=%at -1)`)_ * _**V**_ If non-empty, will pass `V=1` to all `make` invocations, making `make` output verbose. -* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_ + Note that any given value is ignored. The variable is only checked for + emptiness. More concretely, this means that `V=` (setting `V` to the empty + string) is interpreted the same way as not setting `V` at all, and that `V=0` + has the same effect as `V=1`. + +* _**SUBSTITUTE_URLS**_ - Additional flags to be passed to `guix environment`. For a fully-bootstrapped - build, set this to `--bootstrap --no-substitutes` (refer to the [security - model section](#choosing-your-security-model) for more details). Note that a - fully-bootstrapped build will take quite a long time on the first run. + A whitespace-delimited list of URLs from which to download pre-built packages. + A URL is only used if its signing key is authorized (refer to the [substitute + servers section](#option-1-building-with-substitutes) for more details). -## Tips and Tricks +* _**ADDITIONAL_GUIX_COMMON_FLAGS**_ -### Speeding up builds with substitute servers + Additional flags to be passed to all `guix` commands. -_This whole section is automatically done in the convenience -[Dockerfiles][fanquake/guix-docker]_ +* _**ADDITIONAL_GUIX_TIMEMACHINE_FLAGS**_ -For those who are used to life in the fast _(and trustful)_ lane, you can use -[substitute servers][guix/substitutes] to enable binary downloads of packages. + Additional flags to be passed to `guix time-machine`. -> For those who only want to use substitutes from the official Guix build farm -> and have authorized the build farm's signing key during Guix's installation, -> you don't need to do anything. +* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_ + + Additional flags to be passed to the invocation of `guix environment` inside + `guix time-machine`. + +# Choosing your security model -#### Authorize the signing keys +No matter how you installed Guix, you need to decide on your security model for +building packages with Guix. + +Guix allows us to achieve better binary security by using our CPU time to build +everything from scratch. However, it doesn't sacrifice user choice in pursuit of +this: users can decide whether or not to use **substitutes** (pre-built +packages). + +## Option 1: Building with substitutes + +### Step 1: Authorize the signing keys + +Depending on the installation procedure you followed, you may have already +authorized the Guix build farm key. In particular, the official shell installer +script asks you if you want the key installed, and the debian distribution +package authorized the key during installation. + +You can check the current list of authorized keys at `/etc/guix/acl`. + +At the time of writing, a `/etc/guix/acl` with just the Guix build farm key +authorized looks something like: + +```lisp +(acl + (entry + (public-key + (ecc + (curve Ed25519) + (q #8D156F295D24B0D9A86FA5741A840FF2D24F60F7B6C4134814AD55625971B394#) + ) + ) + (tag + (guix import) + ) + ) + ) +``` -For the official Guix build farm at https://ci.guix.gnu.org, run as root: +If you've determined that the official Guix build farm key hasn't been +authorized, and you would like to authorize it, run the following as root: ``` -guix archive --authorize < ~root/.config/guix/current/share/guix/ci.guix.gnu.org.pub +guix archive --authorize < /var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub +``` + +If +`/var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub` +doesn't exist, try: + +```sh +guix archive --authorize < /share/guix/ci.guix.gnu.org.pub ``` +Where `` is likely: +- `/usr` if you installed from a distribution package +- `/usr/local` if you installed Guix from source and didn't supply any + prefix-modifying flags to Guix's `./configure` + For dongcarl's substitute server at https://guix.carldong.io, run as root: ```sh wget -qO- 'https://guix.carldong.io/signing-key.pub' | guix archive --authorize ``` -#### Use the substitute servers +#### Removing authorized keys -The official Guix build farm at https://ci.guix.gnu.org is automatically used -unless the `--no-substitutes` flag is supplied. +To remove previously authorized keys, simply edit `/etc/guix/acl` and remove the +`(entry (public-key ...))` entry. -This can be overridden for all `guix` invocations by passing the -`--substitute-urls` option to your invocation of `guix-daemon`. This can also be -overridden on a call-by-call basis by passing the same `--substitute-urls` -option to client tools such at `guix environment`. +### Step 2: Specify the substitute servers -To use dongcarl's substitute server for Bitcoin Core builds after having -[authorized his signing key](#authorize-the-signing-keys): +Once its key is authorized, the official Guix build farm at +https://ci.guix.gnu.org is automatically used unless the `--no-substitutes` flag +is supplied. This default list of substitute servers is overridable both on a +`guix-daemon` level and when you invoke `guix` commands. See examples below for +the various ways of adding dongcarl's substitute server after having [authorized +his signing key](#authorize-the-signing-keys). +Change the **default list** of substitute servers by starting `guix-daemon` with +the `--substitute-urls` option (you will likely need to edit your init script): + +```sh +guix-daemon --substitute-urls='https://guix.carldong.io https://ci.guix.gnu.org' +``` + +Override the default list of substitute servers by passing the +`--substitute-urls` option for invocations of `guix` commands: + +```sh +guix --substitute-urls='https://guix.carldong.io https://ci.guix.gnu.org' +``` + +For scripts under `./contrib/guix`, set the `SUBSTITUTE_URLS` environment +variable: + +```sh +export SUBSTITUTE_URLS='https://guix.carldong.io https://ci.guix.gnu.org' +``` + +## Option 2: Disabling substitutes on an ad-hoc basis + +If you prefer not to use any substitutes, make sure to supply `--no-substitutes` +like in the following snippet. The first build will take a while, but the +resulting packages will be cached for future builds. + +For direct invocations of `guix`: +```sh +guix --no-substitutes ``` -export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--substitute-urls="https://guix.carldong.io https://ci.guix.gnu.org"' + +For the scripts under `./contrib/guix/`: +```sh +export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes' ``` -## FAQ +## Option 3: Disabling substitutes by default + +`guix-daemon` accepts a `--no-substitutes` flag, which will make sure that, +unless otherwise overridden by a command line invocation, no substitutes will be +used. -### How can I trust the binary installation? +If you start `guix-daemon` using an init script, you can edit said script to +supply this flag. -As mentioned at the bottom of [this manual page][guix/bin-install]: -> The binary installation tarballs can be (re)produced and verified simply by -> running the following command in the Guix source tree: -> -> make guix-binary.x86_64-linux.tar.xz +# Purging/Uninstalling Guix -### When will Guix be packaged in debian? +In the extraordinarily rare case where you messed up your Guix installation in +an irreversible way, you may want to completely purge Guix from your system and +start over. -Vagrant Cascadian has been making good progress on this -[here][debian/guix-package]. We have all the pieces needed to put up an APT -repository and will likely put one up soon. +1. Uninstall Guix itself according to the way you installed it. (e.g. `sudo apt + purge guix` for Ubuntu packaging, `sudo make uninstall` for + built-from-source). +2. Remove all build users and groups + + You may check for relevant users and groups using: + + ``` + getent passwd | grep guix + getent group | grep guix + ``` + + Then, you may remove users and groups using: + + ``` + sudo userdel + sudo groupdel + ``` + +3. Remove all possible Guix-related directories + - `/var/guix/` + - `/var/log/guix/` + - `/gnu/` + - `/etc/guix/` + - `/home/*/.config/guix/` + - `/home/*/.cache/guix/` + - `/home/*/.guix-profile/` + - `/root/.config/guix/` + - `/root/.cache/guix/` + - `/root/.guix-profile/` [b17e]: http://bootstrappable.org/ [r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/ @@ -221,5 +478,8 @@ repository and will likely put one up soon. [guix/substitute-server-auth]: https://www.gnu.org/software/guix/manual/en/html_node/Substitute-Server-Authorization.html [guix/time-machine]: https://guix.gnu.org/manual/en/html_node/Invoking-guix-time_002dmachine.html -[debian/guix-package]: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=850644 -[fanquake/guix-docker]: https://github.com/fanquake/core-review/tree/master/guix +[debian/guix-bullseye]: https://packages.debian.org/bullseye/guix +[ubuntu/guix-hirsute]: https://packages.ubuntu.com/hirsute/guix +[guix-docker]: https://github.com/dashpay/dash/tree/master/contrib/guix/Dockerfile + +[env-vars-list]: #recognized-environment-variables diff --git a/contrib/guix/guix-attest b/contrib/guix/guix-attest new file mode 100755 index 0000000000000..84fb2840eaf9c --- /dev/null +++ b/contrib/guix/guix-attest @@ -0,0 +1,264 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Dash Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat env basename mkdir diff sort + +if [ -z "$NO_SIGN" ]; then + # make it possible to override the gpg binary + GPG=${GPG:-gpg} + + # $GPG can contain extra arguments passed to the binary + # so let's check only the existence of arg[0] + # shellcheck disable=SC2206 + GPG_ARRAY=($GPG) + check_tools "${GPG_ARRAY[0]}" +fi + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { +cat < \\ + SIGNER=GPG_KEY_NAME[=SIGNER_NAME] \\ + [ NO_SIGN=1 ] + ./contrib/guix/guix-attest + +Example w/o overriding signing name: + + env GUIX_SIGS_REPO=/home/achow101/guix.sigs \\ + SIGNER=achow101 \\ + ./contrib/guix/guix-attest + +Example overriding signing name: + + env GUIX_SIGS_REPO=/home/dongcarl/guix.sigs \\ + SIGNER=0x96AB007F1A7ED999=dongcarl \\ + ./contrib/guix/guix-attest + +Example w/o signing, just creating SHA256SUMS: + + env GUIX_SIGS_REPO=/home/achow101/guix.sigs \\ + SIGNER=achow101 \\ + NO_SIGN=1 \\ + ./contrib/guix/guix-attest + +EOF +} + +if [ -z "$GUIX_SIGS_REPO" ] || [ -z "$SIGNER" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_SIGS_REPO should exist as a directory +################ + +if [ ! -d "$GUIX_SIGS_REPO" ]; then +cat << EOF +ERR: The specified GUIX_SIGS_REPO is not an existent directory: + + '$GUIX_SIGS_REPO' + +Hint: Please clone the guix.sigs repository and point to it with the + GUIX_SIGS_REPO environment variable. + +EOF +cmd_usage +exit 1 +fi + +################ +# The key specified in SIGNER should be usable +################ + +IFS='=' read -r gpg_key_name signer_name <<< "$SIGNER" +if [ -z "${signer_name}" ]; then + signer_name="$gpg_key_name" +fi + +if [ -z "$NO_SIGN" ] && ! ${GPG} --dry-run --list-secret-keys "${gpg_key_name}" >/dev/null 2>&1; then + echo "ERR: GPG can't seem to find any key named '${gpg_key_name}'" + exit 1 +fi + +################ +# We should be able to find at least one output +################ + +echo "Looking for build output SHA256SUMS fragments in ${OUTDIR_BASE}" + +shopt -s nullglob +sha256sum_fragments=( "$OUTDIR_BASE"/*/SHA256SUMS.part ) # This expands to an array of directories... +shopt -u nullglob + +noncodesigned_fragments=() +codesigned_fragments=() + +if (( ${#sha256sum_fragments[@]} )); then + echo "Found build output SHA256SUMS fragments:" + for outdir in "${sha256sum_fragments[@]}"; do + echo " '$outdir'" + case "$outdir" in + "$OUTDIR_BASE"/*-codesigned/SHA256SUMS.part) + codesigned_fragments+=("$outdir") + ;; + *) + noncodesigned_fragments+=("$outdir") + ;; + esac + done + echo +else + echo "ERR: Could not find any build output SHA256SUMS fragments in ${OUTDIR_BASE}" + exit 1 +fi + +############## +## Attest ## +############## + +# Usage: out_name $outdir +# +# HOST: The output directory being attested +# +out_name() { + basename "$(dirname "$1")" +} + +shasum_already_exists() { +cat < "$temp_noncodesigned" + if [ -e noncodesigned.SHA256SUMS ]; then + # The SHA256SUMS already exists, make sure it's exactly what we + # expect, error out if not + if diff -u noncodesigned.SHA256SUMS "$temp_noncodesigned"; then + echo "A noncodesigned.SHA256SUMS file already exists for '${VERSION}' and is up-to-date." + else + shasum_already_exists noncodesigned.SHA256SUMS + exit 1 + fi + else + mv "$temp_noncodesigned" noncodesigned.SHA256SUMS + fi + else + echo "ERR: No noncodesigned outputs found for '${VERSION}', exiting..." + exit 1 + fi + + temp_codesigned="$(mktemp)" + trap 'rm -rf -- "$temp_codesigned"' EXIT + + if (( ${#codesigned_fragments[@]} )); then + # Note: all.SHA256SUMS attests to all of $sha256sum_fragments, but is + # not needed if there are no $codesigned_fragments + cat "${sha256sum_fragments[@]}" \ + | sort -u \ + | sort -k2 \ + | sed 's/$/\r/' \ + | basenameify_SHA256SUMS \ + > "$temp_codesigned" + if [ -e codesigned.SHA256SUMS ]; then + # The SHA256SUMS already exists, make sure it's exactly what we + # expect, error out if not + if diff -u all.SHA256SUMS "$temp_codesigned"; then + echo "An all.SHA256SUMS file already exists for '${VERSION}' and is up-to-date." + else + shasum_already_exists all.SHA256SUMS + exit 1 + fi + else + mv "$temp_codesigned" codesigned.SHA256SUMS + fi + else + # It is fine to have the codesigned outputs be missing (perhaps the + # detached codesigs have not been published yet), just print a log + # message instead of erroring out + echo "INFO: No codesigned outputs found for '${VERSION}', skipping..." + fi + + if [ -z "$NO_SIGN" ]; then + echo "Signing SHA256SUMS to produce SHA256SUMS.asc" + for i in *.SHA256SUMS; do + if [ ! -e "$i".asc ]; then + ${GPG} --detach-sign \ + --digest-algo sha256 \ + --local-user "$gpg_key_name" \ + --armor \ + --output "$i".asc "$i" + else + echo "Signature already there" + fi + done + else + echo "Not signing SHA256SUMS as \$NO_SIGN is not empty" + fi + echo "" +) diff --git a/contrib/guix/guix-build b/contrib/guix/guix-build new file mode 100755 index 0000000000000..a5131d72f7e2a --- /dev/null +++ b/contrib/guix/guix-build @@ -0,0 +1,463 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Dash Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## SANITY CHECKS ## +################### + +################ +# Required non-builtin commands should be invocable +################ + +check_tools cat mkdir make getent curl git guix + +################ +# GUIX_BUILD_OPTIONS should be empty +################ +# +# GUIX_BUILD_OPTIONS is an environment variable recognized by guix commands that +# can perform builds. This seems like what we want instead of +# ADDITIONAL_GUIX_COMMON_FLAGS, but the value of GUIX_BUILD_OPTIONS is actually +# _appended_ to normal command-line options. Meaning that they will take +# precedence over the command-specific ADDITIONAL_GUIX__FLAGS. +# +# This seems like a poor user experience. Thus we check for GUIX_BUILD_OPTIONS's +# existence here and direct users of this script to use our (more flexible) +# custom environment variables. +if [ -n "$GUIX_BUILD_OPTIONS" ]; then +cat << EOF +Error: Environment variable GUIX_BUILD_OPTIONS is not empty: + '$GUIX_BUILD_OPTIONS' + +Unfortunately this script is incompatible with GUIX_BUILD_OPTIONS, please unset +GUIX_BUILD_OPTIONS and use ADDITIONAL_GUIX_COMMON_FLAGS to set build options +across guix commands or ADDITIONAL_GUIX__FLAGS to set build options for a +specific guix command. + +See contrib/guix/README.md for more details. +EOF +exit 1 +fi + +################ +# The git worktree should not be dirty +################ + +if ! git diff-index --quiet HEAD -- && [ -z "$FORCE_DIRTY_WORKTREE" ]; then +cat << EOF +ERR: The current git worktree is dirty, which may lead to broken builds. + + Aborting... + +Hint: To make your git worktree clean, You may want to: + 1. Commit your changes, + 2. Stash your changes, or + 3. Set the 'FORCE_DIRTY_WORKTREE' environment variable if you insist on + using a dirty worktree +EOF +exit 1 +fi + +mkdir -p "$VERSION_BASE" + +################ +# Build directories should not exist +################ + +# Default to building for all supported HOSTs (overridable by environment) +export HOSTS="${HOSTS:-x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu + x86_64-w64-mingw32 + x86_64-apple-darwin arm64-apple-darwin}" + +# Usage: distsrc_for_host HOST +# +# HOST: The current platform triple we're building for +# +distsrc_for_host() { + echo "${DISTSRC_BASE}/distsrc-${VERSION}-${1}" +} + +# Accumulate a list of build directories that already exist... +hosts_distsrc_exists="" +for host in $HOSTS; do + if [ -e "$(distsrc_for_host "$host")" ]; then + hosts_distsrc_exists+=" ${host}" + fi +done + +if [ -n "$hosts_distsrc_exists" ]; then +# ...so that we can print them out nicely in an error message +cat << EOF +ERR: Build directories for this commit already exist for the following platform + triples you're attempting to build, probably because of previous builds. + Please remove, or otherwise deal with them prior to starting another build. + + Aborting... + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +for host in $hosts_distsrc_exists; do + echo " ${host} '$(distsrc_for_host "$host")'" +done +exit 1 +else + mkdir -p "$DISTSRC_BASE" +fi + +################ +# When building for darwin, the macOS SDK should exist +################ + +for host in $HOSTS; do + case "$host" in + *darwin*) + OSX_SDK="$(make -C "${PWD}/depends" --no-print-directory HOST="$host" print-OSX_SDK | sed 's@^[^=]\+=@@g')" + if [ -e "$OSX_SDK" ]; then + echo "Found macOS SDK at '${OSX_SDK}', using..." + break + else + echo "macOS SDK does not exist at '${OSX_SDK}', please place the extracted, untarred SDK there to perform darwin builds, or define SDK_PATH environment variable. Exiting..." + exit 1 + fi + ;; + esac +done + +################ +# VERSION_BASE should have enough space +################ + +avail_KiB="$(df -Pk "$VERSION_BASE" | sed 1d | tr -s ' ' | cut -d' ' -f4)" +total_required_KiB=0 +for host in $HOSTS; do + case "$host" in + *darwin*) required_KiB=440000 ;; + *mingw*) required_KiB=7600000 ;; + *) required_KiB=6400000 ;; + esac + total_required_KiB=$((total_required_KiB+required_KiB)) +done + +if (( total_required_KiB > avail_KiB )); then + total_required_GiB=$((total_required_KiB / 1048576)) + avail_GiB=$((avail_KiB / 1048576)) + echo "Performing a Dash Core Guix build for the selected HOSTS requires ${total_required_GiB} GiB, however, only ${avail_GiB} GiB is available. Please free up some disk space before performing the build." + exit 1 +fi + +################ +# Check that we can connect to the guix-daemon +################ + +cat << EOF +Checking that we can connect to the guix-daemon... + +Hint: If this hangs, you may want to try turning your guix-daemon off and on + again. + +EOF +if ! guix gc --list-failures > /dev/null; then +cat << EOF + +ERR: Failed to connect to the guix-daemon, please ensure that one is running and + reachable. +EOF +exit 1 +fi + +# Developer note: we could use `guix repl` for this check and run: +# +# (import (guix store)) (close-connection (open-connection)) +# +# However, the internal API is likely to change more than the CLI invocation + +################ +# Services database must have basic entries +################ + +if ! getent services http https ftp > /dev/null 2>&1; then +cat << EOF +ERR: Your system's C library can not find service database entries for at least + one of the following services: http, https, ftp. + +Hint: Most likely, /etc/services does not exist yet (common for docker images + and minimal distros), or you don't have permissions to access it. + + If /etc/services does not exist yet, you may want to install the + appropriate package for your distro which provides it. + + On Debian/Ubuntu: netbase + On Arch Linux: iana-etc + + For more information, see: getent(1), services(5) + +EOF + +fi + +######### +# SETUP # +######### + +# Determine the maximum number of jobs to run simultaneously (overridable by +# environment) +JOBS="${JOBS:-$(nproc)}" + +# Usage: host_to_commonname HOST +# +# HOST: The current platform triple we're building for +# +host_to_commonname() { + case "$1" in + *darwin*) echo osx ;; + *mingw*) echo win ;; + *linux*) echo linux ;; + *) exit 1 ;; + esac +} + +# Determine the reference time used for determinism (overridable by environment) +SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}" + +# Precious directories are those which should not be cleaned between successive +# guix builds +depends_precious_dir_names='SOURCES_PATH BASE_CACHE SDK_PATH' +precious_dir_names="${depends_precious_dir_names} OUTDIR_BASE PROFILES_BASE" + +# Usage: contains IFS-SEPARATED-LIST ITEM +contains() { + for i in ${1}; do + if [ "$i" = "${2}" ]; then + return 0 # Found! + fi + done + return 1 +} + +# If the user explicitly specified a precious directory, create it so we +# can map it into the container +for precious_dir_name in $precious_dir_names; do + precious_dir_path="${!precious_dir_name}" + if [ -n "$precious_dir_path" ]; then + if [ ! -e "$precious_dir_path" ]; then + mkdir -p "$precious_dir_path" + elif [ -L "$precious_dir_path" ]; then + echo "ERR: ${precious_dir_name} cannot be a symbolic link" + exit 1 + elif [ ! -d "$precious_dir_path" ]; then + echo "ERR: ${precious_dir_name} must be a directory" + exit 1 + fi + fi +done + +mkdir -p "$VAR_BASE" + +# Record the _effective_ values of precious directories such that guix-clean can +# avoid clobbering them if appropriate. +# +# shellcheck disable=SC2046,SC2086 +{ + # Get depends precious dir definitions from depends + make -C "${PWD}/depends" \ + --no-print-directory \ + -- $(printf "print-%s\n" $depends_precious_dir_names) + + # Get remaining precious dir definitions from the environment + for precious_dir_name in $precious_dir_names; do + precious_dir_path="${!precious_dir_name}" + if ! contains "$depends_precious_dir_names" "$precious_dir_name"; then + echo "${precious_dir_name}=${precious_dir_path}" + fi + done +} > "${VAR_BASE}/precious_dirs" + +# Make sure an output directory exists for our builds +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" +mkdir -p "$OUTDIR_BASE" + +# Download the depends sources now as we won't have internet access in the build +# container +for host in $HOSTS; do + make -C "${PWD}/depends" -j"$JOBS" download-"$(host_to_commonname "$host")" ${V:+V=1} ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} +done + +# Usage: outdir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +outdir_for_host() { + echo "${OUTDIR_BASE}/${1}${2:+-${2}}" +} + +# Usage: profiledir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +profiledir_for_host() { + echo "${PROFILES_BASE}/${1}${2:+-${2}}" +} + + +######### +# BUILD # +######### + +# Function to be called when building for host ${1} and the user interrupts the +# build +int_trap() { +cat << EOF +** INT received while building ${1}, you may want to clean up the relevant + work directories (e.g. distsrc-*) before rebuilding + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +} + +# Deterministically build Dash Core +# shellcheck disable=SC2153 +for host in $HOSTS; do + + # Display proper warning when the user interrupts the build + trap 'int_trap ${host}' INT + + ( + # Required for 'contrib/guix/manifest.scm' to output the right manifest + # for the particular $HOST we're building for + export HOST="$host" + + # shellcheck disable=SC2030 +cat << EOF +INFO: Building ${VERSION:?not set} for platform triple ${HOST:?not set}: + ...using reference timestamp: ${SOURCE_DATE_EPOCH:?not set} + ...running at most ${JOBS:?not set} jobs + ...from worktree directory: '${PWD}' + ...bind-mounted in container to: '/dash' + ...in build directory: '$(distsrc_for_host "$HOST")' + ...bind-mounted in container to: '$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")' + ...outputting in: '$(outdir_for_host "$HOST")' + ...bind-mounted in container to: '$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST")' +EOF + + # Run the build script 'contrib/guix/libexec/build.sh' in the build + # container specified by 'contrib/guix/manifest.scm'. + # + # Explanation of `guix environment` flags: + # + # --container run command within an isolated container + # + # Running in an isolated container minimizes build-time differences + # between machines and improves reproducibility + # + # --pure unset existing environment variables + # + # Same rationale as --container + # + # --no-cwd do not share current working directory with an + # isolated container + # + # When --container is specified, the default behavior is to share + # the current working directory with the isolated container at the + # same exact path (e.g. mapping '/home/satoshi/dash/' to + # '/home/satoshi/dash/'). This means that the $PWD inside the + # container becomes a source of irreproducibility. --no-cwd disables + # this behaviour. + # + # --share=SPEC for containers, share writable host file system + # according to SPEC + # + # --share="$PWD"=/dash + # + # maps our current working directory to /dash + # inside the isolated container, which we later cd + # into. + # + # While we don't want to map our current working directory to the + # same exact path (as this introduces irreproducibility), we do want + # it to be at a _fixed_ path _somewhere_ inside the isolated + # container so that we have something to build. '/dash' was + # chosen arbitrarily. + # + # ${SOURCES_PATH:+--share="$SOURCES_PATH"} + # + # make the downloaded depends sources path available + # inside the isolated container + # + # The isolated container has no network access as it's in a + # different network namespace from the main machine, so we have to + # make the downloaded depends sources available to it. The sources + # should have been downloaded prior to this invocation. + # + # --keep-failed keep build tree of failed builds + # + # When builds of the Guix environment itself (not Dash Core) + # fail, it is useful for the build tree to be kept for debugging + # purposes. + # + # ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} + # + # fetch substitute from SUBSTITUTE_URLS if they are + # authorized + # + # Depending on the user's security model, it may be desirable to use + # substitutes (pre-built packages) from servers that the user trusts. + # Please read the README.md in the same directory as this file for + # more information. + # + # shellcheck disable=SC2086,SC2031 + time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \ + --container \ + --pure \ + --no-cwd \ + --share="$PWD"=/dash \ + --share="$DISTSRC_BASE"=/distsrc-base \ + --share="$OUTDIR_BASE"=/outdir-base \ + --expose="$(git rev-parse --git-common-dir)" \ + ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ + ${BASE_CACHE:+--share="$BASE_CACHE"} \ + ${SDK_PATH:+--share="$SDK_PATH"} \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + --link-profile \ + --root="$(profiledir_for_host "${HOST}")" \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ + -- env HOST="$host" \ + DISTNAME="$DISTNAME" \ + JOBS="$JOBS" \ + SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \ + ${V:+V=1} \ + ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ + ${BASE_CACHE:+BASE_CACHE="$BASE_CACHE"} \ + ${SDK_PATH:+SDK_PATH="$SDK_PATH"} \ + DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \ + OUTDIR="$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST")" \ + DIST_ARCHIVE_BASE=/outdir-base/dist-archive \ + bash -c "cd /dash && bash contrib/guix/libexec/build.sh" + ) + +done diff --git a/contrib/guix/guix-build.sh b/contrib/guix/guix-build.sh deleted file mode 100755 index 11d2c8b86727e..0000000000000 --- a/contrib/guix/guix-build.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env bash -export LC_ALL=C -set -e -o pipefail - -# Determine the maximum number of jobs to run simultaneously (overridable by -# environment) -MAX_JOBS="${MAX_JOBS:-$(nproc)}" - -# Download the depends sources now as we won't have internet access in the build -# container -make -C "${PWD}/depends" -j"$MAX_JOBS" download ${V:+V=1} ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} - -# Determine the reference time used for determinism (overridable by environment) -SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}" - -# Execute "$@" in a pinned, possibly older version of Guix, for reproducibility -# across time. -time-machine() { - guix time-machine --url=https://github.com/dongcarl/guix.git \ - --commit=b066c25026f21fb57677aa34692a5034338e7ee3 \ - -- "$@" -} - -# Function to be called when building for host ${1} and the user interrupts the -# build -int_trap() { -cat << EOF -** INT received while building ${1}, you may want to clean up the relevant - output, deploy, and distsrc-* directories before rebuilding - -Hint: To blow everything away, you may want to use: - - $ git clean -xdff --exclude='/depends/SDKs/*' - -Specifically, this will remove all files without an entry in the index, -excluding the SDK directory. Practically speaking, this means that all ignored -and untracked files and directories will be wiped, allowing you to start anew. -EOF -} - -# Deterministically build Bitcoin Core for HOSTs (overridable by environment) -# shellcheck disable=SC2153 -for host in ${HOSTS=x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu x86_64-w64-mingw32}; do - - # Display proper warning when the user interrupts the build - trap 'int_trap ${host}' INT - - ( - # Required for 'contrib/guix/manifest.scm' to output the right manifest - # for the particular $HOST we're building for - export HOST="$host" - - # Run the build script 'contrib/guix/libexec/build.sh' in the build - # container specified by 'contrib/guix/manifest.scm'. - # - # Explanation of `guix environment` flags: - # - # --container run command within an isolated container - # - # Running in an isolated container minimizes build-time differences - # between machines and improves reproducibility - # - # --pure unset existing environment variables - # - # Same rationale as --container - # - # --no-cwd do not share current working directory with an - # isolated container - # - # When --container is specified, the default behavior is to share - # the current working directory with the isolated container at the - # same exact path (e.g. mapping '/home/satoshi/bitcoin/' to - # '/home/satoshi/bitcoin/'). This means that the $PWD inside the - # container becomes a source of irreproducibility. --no-cwd disables - # this behaviour. - # - # --share=SPEC for containers, share writable host file system - # according to SPEC - # - # --share="$PWD"=/bitcoin - # - # maps our current working directory to /bitcoin - # inside the isolated container, which we later cd - # into. - # - # While we don't want to map our current working directory to the - # same exact path (as this introduces irreproducibility), we do want - # it to be at a _fixed_ path _somewhere_ inside the isolated - # container so that we have something to build. '/bitcoin' was - # chosen arbitrarily. - # - # ${SOURCES_PATH:+--share="$SOURCES_PATH"} - # - # make the downloaded depends sources path available - # inside the isolated container - # - # The isolated container has no network access as it's in a - # different network namespace from the main machine, so we have to - # make the downloaded depends sources available to it. The sources - # should have been downloaded prior to this invocation. - # - # shellcheck disable=SC2086 - time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \ - --container \ - --pure \ - --no-cwd \ - --share="$PWD"=/bitcoin \ - --expose="$(git rev-parse --git-common-dir)" \ - ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ - ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ - -- env HOST="$host" \ - MAX_JOBS="$MAX_JOBS" \ - SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \ - ${V:+V=1} \ - ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ - bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh" - ) - -done diff --git a/contrib/guix/guix-clean b/contrib/guix/guix-clean new file mode 100755 index 0000000000000..1971353a69007 --- /dev/null +++ b/contrib/guix/guix-clean @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Dash Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat mkdir make git guix + + +############# +## Clean ## +############# + +# Usage: under_dir MAYBE_PARENT MAYBE_CHILD +# +# If MAYBE_CHILD is a subdirectory of MAYBE_PARENT, print the relative path +# from MAYBE_PARENT to MAYBE_CHILD. Otherwise, return 1 as the error code. +# +# NOTE: This does not perform any symlink-resolving or path canonicalization. +# +under_dir() { + local path_residue + path_residue="${2##${1}}" + if [ -z "$path_residue" ] || [ "$path_residue" = "$2" ]; then + return 1 + else + echo "$path_residue" + fi +} + +# Usage: dir_under_git_root MAYBE_CHILD +# +# If MAYBE_CHILD is under the current git repository and exists, print the +# relative path from the git repository's top-level directory to MAYBE_CHILD, +# otherwise, exit with an error code. +# +dir_under_git_root() { + local rv + rv="$(under_dir "$(git_root)" "$1")" + [ -n "$rv" ] && echo "$rv" +} + +shopt -s nullglob +found_precious_dirs_files=( "${version_base_prefix}"*/"${var_base_basename}/precious_dirs" ) # This expands to an array of directories... +shopt -u nullglob + +exclude_flags=() + +for precious_dirs_file in "${found_precious_dirs_files[@]}"; do + # Make sure the precious directories (e.g. SOURCES_PATH, BASE_CACHE, SDK_PATH) + # are excluded from git-clean + echo "Found precious_dirs file: '${precious_dirs_file}'" + + # Exclude the precious_dirs file itself + if dirs_file_exclude_fragment=$(dir_under_git_root "$(dirname "$precious_dirs_file")"); then + exclude_flags+=( --exclude="${dirs_file_exclude_fragment}/precious_dirs" ) + fi + + # Read each 'name=dir' pair from the precious_dirs file + while IFS='=' read -r name dir; do + # Add an exclusion flag if the precious directory is under the git root. + if under=$(dir_under_git_root "$dir"); then + echo "Avoiding ${name}: ${under}" + exclude_flags+=( --exclude="$under" ) + fi + done < "$precious_dirs_file" +done + +git clean -xdff "${exclude_flags[@]}" diff --git a/contrib/guix/guix-codesign b/contrib/guix/guix-codesign new file mode 100755 index 0000000000000..45da43abf1c12 --- /dev/null +++ b/contrib/guix/guix-codesign @@ -0,0 +1,378 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Dash Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## SANITY CHECKS ## +################### + +################ +# Required non-builtin commands should be invocable +################ + +check_tools cat mkdir git guix + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { + cat < \\ + ./contrib/guix/guix-codesign + +EOF +} + +if [ -z "$DETACHED_SIGS_REPO" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_BUILD_OPTIONS should be empty +################ +# +# GUIX_BUILD_OPTIONS is an environment variable recognized by guix commands that +# can perform builds. This seems like what we want instead of +# ADDITIONAL_GUIX_COMMON_FLAGS, but the value of GUIX_BUILD_OPTIONS is actually +# _appended_ to normal command-line options. Meaning that they will take +# precedence over the command-specific ADDITIONAL_GUIX__FLAGS. +# +# This seems like a poor user experience. Thus we check for GUIX_BUILD_OPTIONS's +# existence here and direct users of this script to use our (more flexible) +# custom environment variables. +if [ -n "$GUIX_BUILD_OPTIONS" ]; then +cat << EOF +Error: Environment variable GUIX_BUILD_OPTIONS is not empty: + '$GUIX_BUILD_OPTIONS' + +Unfortunately this script is incompatible with GUIX_BUILD_OPTIONS, please unset +GUIX_BUILD_OPTIONS and use ADDITIONAL_GUIX_COMMON_FLAGS to set build options +across guix commands or ADDITIONAL_GUIX__FLAGS to set build options for a +specific guix command. + +See contrib/guix/README.md for more details. +EOF +exit 1 +fi + +################ +# The codesignature git worktree should not be dirty +################ + +if ! git -C "$DETACHED_SIGS_REPO" diff-index --quiet HEAD -- && [ -z "$FORCE_DIRTY_WORKTREE" ]; then + cat << EOF +ERR: The DETACHED CODESIGNATURE git worktree is dirty, which may lead to broken builds. + + Aborting... + +Hint: To make your git worktree clean, You may want to: + 1. Commit your changes, + 2. Stash your changes, or + 3. Set the 'FORCE_DIRTY_WORKTREE' environment variable if you insist on + using a dirty worktree +EOF + exit 1 +fi + +################ +# Build directories should not exist +################ + +# Default to building for all supported HOSTs (overridable by environment) +export HOSTS="${HOSTS:-x86_64-w64-mingw32 x86_64-apple-darwin arm64-apple-darwin}" + +# Usage: distsrc_for_host HOST +# +# HOST: The current platform triple we're building for +# +distsrc_for_host() { + echo "${DISTSRC_BASE}/distsrc-${VERSION}-${1}-codesigned" +} + +# Accumulate a list of build directories that already exist... +hosts_distsrc_exists="" +for host in $HOSTS; do + if [ -e "$(distsrc_for_host "$host")" ]; then + hosts_distsrc_exists+=" ${host}" + fi +done + +if [ -n "$hosts_distsrc_exists" ]; then +# ...so that we can print them out nicely in an error message +cat << EOF +ERR: Build directories for this commit already exist for the following platform + triples you're attempting to build, probably because of previous builds. + Please remove, or otherwise deal with them prior to starting another build. + + Aborting... + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +for host in $hosts_distsrc_exists; do + echo " ${host} '$(distsrc_for_host "$host")'" +done +exit 1 +else + mkdir -p "$DISTSRC_BASE" +fi + + +################ +# Unsigned tarballs SHOULD exist +################ + +# Usage: outdir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +outdir_for_host() { + echo "${OUTDIR_BASE}/${1}${2:+-${2}}" +} + + +unsigned_tarball_for_host() { + case "$1" in + *mingw*) + echo "$(outdir_for_host "$1")/${DISTNAME}-win64-unsigned.tar.gz" + ;; + *darwin*) + echo "$(outdir_for_host "$1")/${DISTNAME}-${1}-unsigned.tar.gz" + ;; + *) + exit 1 + ;; + esac +} + +# Accumulate a list of build directories that already exist... +hosts_unsigned_tarball_missing="" +for host in $HOSTS; do + if [ ! -e "$(unsigned_tarball_for_host "$host")" ]; then + hosts_unsigned_tarball_missing+=" ${host}" + fi +done + +if [ -n "$hosts_unsigned_tarball_missing" ]; then + # ...so that we can print them out nicely in an error message + cat << EOF +ERR: Unsigned tarballs do not exist +... + +EOF +for host in $hosts_unsigned_tarball_missing; do + echo " ${host} '$(unsigned_tarball_for_host "$host")'" +done +exit 1 +fi + +################ +# Check that we can connect to the guix-daemon +################ + +cat << EOF +Checking that we can connect to the guix-daemon... + +Hint: If this hangs, you may want to try turning your guix-daemon off and on + again. + +EOF +if ! guix gc --list-failures > /dev/null; then + cat << EOF + +ERR: Failed to connect to the guix-daemon, please ensure that one is running and + reachable. +EOF + exit 1 +fi + +# Developer note: we could use `guix repl` for this check and run: +# +# (import (guix store)) (close-connection (open-connection)) +# +# However, the internal API is likely to change more than the CLI invocation + + +######### +# SETUP # +######### + +# Determine the maximum number of jobs to run simultaneously (overridable by +# environment) +JOBS="${JOBS:-$(nproc)}" + +# Determine the reference time used for determinism (overridable by environment) +SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}" + +# Make sure an output directory exists for our builds +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" +mkdir -p "$OUTDIR_BASE" + +# Usage: profiledir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +profiledir_for_host() { + echo "${PROFILES_BASE}/${1}${2:+-${2}}" +} + +######### +# BUILD # +######### + +# Function to be called when codesigning for host ${1} and the user interrupts +# the codesign +int_trap() { +cat << EOF +** INT received while codesigning ${1}, you may want to clean up the relevant + work directories (e.g. distsrc-*) before recodesigning + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +} + +# Deterministically build Dash Core +# shellcheck disable=SC2153 +for host in $HOSTS; do + + # Display proper warning when the user interrupts the build + trap 'int_trap ${host}' INT + + ( + # Required for 'contrib/guix/manifest.scm' to output the right manifest + # for the particular $HOST we're building for + export HOST="$host" + + # shellcheck disable=SC2030 +cat << EOF +INFO: Codesigning ${VERSION:?not set} for platform triple ${HOST:?not set}: + ...using reference timestamp: ${SOURCE_DATE_EPOCH:?not set} + ...from worktree directory: '${PWD}' + ...bind-mounted in container to: '/dash' + ...in build directory: '$(distsrc_for_host "$HOST")' + ...bind-mounted in container to: '$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")' + ...outputting in: '$(outdir_for_host "$HOST" codesigned)' + ...bind-mounted in container to: '$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST" codesigned)' + ...using detached signatures in: '${DETACHED_SIGS_REPO:?not set}' + ...bind-mounted in container to: '/detached-sigs' +EOF + + + # Run the build script 'contrib/guix/libexec/build.sh' in the build + # container specified by 'contrib/guix/manifest.scm'. + # + # Explanation of `guix environment` flags: + # + # --container run command within an isolated container + # + # Running in an isolated container minimizes build-time differences + # between machines and improves reproducibility + # + # --pure unset existing environment variables + # + # Same rationale as --container + # + # --no-cwd do not share current working directory with an + # isolated container + # + # When --container is specified, the default behavior is to share + # the current working directory with the isolated container at the + # same exact path (e.g. mapping '/home/satoshi/dash/' to + # '/home/satoshi/dash/'). This means that the $PWD inside the + # container becomes a source of irreproducibility. --no-cwd disables + # this behaviour. + # + # --share=SPEC for containers, share writable host file system + # according to SPEC + # + # --share="$PWD"=/dash + # + # maps our current working directory to /dash + # inside the isolated container, which we later cd + # into. + # + # While we don't want to map our current working directory to the + # same exact path (as this introduces irreproducibility), we do want + # it to be at a _fixed_ path _somewhere_ inside the isolated + # container so that we have something to build. '/dash' was + # chosen arbitrarily. + # + # ${SOURCES_PATH:+--share="$SOURCES_PATH"} + # + # make the downloaded depends sources path available + # inside the isolated container + # + # The isolated container has no network access as it's in a + # different network namespace from the main machine, so we have to + # make the downloaded depends sources available to it. The sources + # should have been downloaded prior to this invocation. + # + # ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} + # + # fetch substitute from SUBSTITUTE_URLS if they are + # authorized + # + # Depending on the user's security model, it may be desirable to use + # substitutes (pre-built packages) from servers that the user trusts. + # Please read the README.md in the same directory as this file for + # more information. + # + # shellcheck disable=SC2086,SC2031 + time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \ + --container \ + --pure \ + --no-cwd \ + --share="$PWD"=/dash \ + --share="$DISTSRC_BASE"=/distsrc-base \ + --share="$OUTDIR_BASE"=/outdir-base \ + --share="$DETACHED_SIGS_REPO"=/detached-sigs \ + --expose="$(git rev-parse --git-common-dir)" \ + --expose="$(git -C "$DETACHED_SIGS_REPO" rev-parse --git-common-dir)" \ + ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + --link-profile \ + --root="$(profiledir_for_host "${HOST}" codesigned)" \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ + -- env HOST="$host" \ + DISTNAME="$DISTNAME" \ + JOBS="$JOBS" \ + SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \ + ${V:+V=1} \ + ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ + DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \ + OUTDIR="$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST" codesigned)" \ + DIST_ARCHIVE_BASE=/outdir-base/dist-archive \ + DETACHED_SIGS_REPO=/detached-sigs \ + UNSIGNED_TARBALL="$(OUTDIR_BASE=/outdir-base && unsigned_tarball_for_host "$HOST")" \ + bash -c "cd /dash && bash contrib/guix/libexec/codesign.sh" + ) + +done diff --git a/contrib/guix/guix-verify b/contrib/guix/guix-verify new file mode 100755 index 0000000000000..ffcfba736092c --- /dev/null +++ b/contrib/guix/guix-verify @@ -0,0 +1,148 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Dash Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat diff gpg + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { +cat < ./contrib/guix/guix-verify + +EOF +} + +if [ -z "$GUIX_SIGS_REPO" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_SIGS_REPO should exist as a directory +################ + +if [ ! -d "$GUIX_SIGS_REPO" ]; then +cat << EOF +ERR: The specified GUIX_SIGS_REPO is not an existent directory: + + '$GUIX_SIGS_REPO' + +Hint: Please clone the guix.sigs repository and point to it with the + GUIX_SIGS_REPO environment variable. + +EOF +cmd_usage +exit 1 +fi + +############## +## Verify ## +############## + +OUTSIGDIR_BASE="${GUIX_SIGS_REPO}/${VERSION}" +echo "Looking for signature directories in '${OUTSIGDIR_BASE}'" +echo "" + +# Usage: verify compare_manifest current_manifest +verify() { + local compare_manifest="$1" + local current_manifest="$2" + if ! gpg --quiet --batch --verify "$current_manifest".asc "$current_manifest" 1>&2; then + echo "ERR: Failed to verify GPG signature in '${current_manifest}'" + echo "" + echo "Hint: Either the signature is invalid or the public key is missing" + echo "" + failure=1 + elif ! diff --report-identical "$compare_manifest" "$current_manifest" 1>&2; then + echo "ERR: The SHA256SUMS attestation in these two directories differ:" + echo " '${compare_manifest}'" + echo " '${current_manifest}'" + echo "" + failure=1 + else + echo "Verified: '${current_manifest}'" + echo "" + fi +} + +shopt -s nullglob +all_noncodesigned=( "$OUTSIGDIR_BASE"/*/noncodesigned.SHA256SUMS ) +shopt -u nullglob + +echo "--------------------" +echo "" +if (( ${#all_noncodesigned[@]} )); then + compare_noncodesigned="${all_noncodesigned[0]}" + + for current_manifest in "${all_noncodesigned[@]}"; do + verify "$compare_noncodesigned" "$current_manifest" + done + + echo "DONE: Checking output signatures for noncodesigned.SHA256SUMS" + echo "" +else + echo "WARN: No signature directories with noncodesigned.SHA256SUMS found" + echo "" +fi + +shopt -s nullglob +all_all=( "$OUTSIGDIR_BASE"/*/all.SHA256SUMS ) +shopt -u nullglob + +echo "--------------------" +echo "" +if (( ${#all_all[@]} )); then + compare_all="${all_all[0]}" + + for current_manifest in "${all_all[@]}"; do + verify "$compare_all" "$current_manifest" + done + + # Sanity check: there should be no entries that exist in + # noncodesigned.SHA256SUMS that doesn't exist in all.SHA256SUMS + if [[ "$(comm -23 <(sort "$compare_noncodesigned") <(sort "$compare_all") | wc -c)" -ne 0 ]]; then + echo "ERR: There are unique lines in noncodesigned.SHA256SUMS which" + echo " do not exist in all.SHA256SUMS, something went very wrong." + exit 1 + fi + + echo "DONE: Checking output signatures for all.SHA256SUMS" + echo "" +else + echo "WARN: No signature directories with all.SHA256SUMS found" + echo "" +fi + +echo "====================" +echo "" +if (( ${#all_noncodesigned[@]} + ${#all_all[@]} == 0 )); then + echo "ERR: Unable to perform any verifications as no signature directories" + echo " were found" + echo "" + exit 1 +fi + +if [ -n "$failure" ]; then + exit 1 +fi diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 62d1f1edc7120..8c3c52ea10b05 100644 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -3,9 +3,38 @@ export LC_ALL=C set -e -o pipefail export TZ=UTC -# Check that environment variables assumed to be set by the environment are set -echo "Building for platform triple ${HOST:?not set} with reference timestamp ${SOURCE_DATE_EPOCH:?not set}..." -echo "At most ${MAX_JOBS:?not set} jobs will run at once..." +# Although Guix _does_ set umask when building its own packages (in our case, +# this is all packages in manifest.scm), it does not set it for `guix +# environment`. It does make sense for at least `guix environment --container` +# to set umask, so if that change gets merged upstream and we bump the +# time-machine to a commit which includes the aforementioned change, we can +# remove this line. +# +# This line should be placed before any commands which creates files. +umask 0022 + +if [ -n "$V" ]; then + # Print both unexpanded (-v) and expanded (-x) forms of commands as they are + # read from this file. + set -vx + # Set VERBOSE for CMake-based builds + export VERBOSE="$V" +fi + +# Check that required environment variables are set +cat << EOF +Required environment variables as seen inside the container: + DIST_ARCHIVE_BASE: ${DIST_ARCHIVE_BASE:?not set} + DISTNAME: ${DISTNAME:?not set} + HOST: ${HOST:?not set} + SOURCE_DATE_EPOCH: ${SOURCE_DATE_EPOCH:?not set} + JOBS: ${JOBS:?not set} + DISTSRC: ${DISTSRC:?not set} + OUTDIR: ${OUTDIR:?not set} +EOF + +ACTUAL_OUTDIR="${OUTDIR}" +OUTDIR="${DISTSRC}/output" ##################### # Environment Setup # @@ -15,19 +44,6 @@ echo "At most ${MAX_JOBS:?not set} jobs will run at once..." # $HOSTs after successfully building. BASEPREFIX="${PWD}/depends" -# Setup an output directory for our build -OUTDIR="${OUTDIR:-${PWD}/output}" -[ -e "$OUTDIR" ] || mkdir -p "$OUTDIR" - -# Setup the directory where our Bitcoin Core build for HOST will occur -DISTSRC="${DISTSRC:-${PWD}/distsrc-${HOST}}" -if [ -e "$DISTSRC" ]; then - echo "DISTSRC directory '${DISTSRC}' exists, probably because of previous builds... Aborting..." - exit 1 -else - mkdir -p "$DISTSRC" -fi - # Given a package name and an output name, return the path of that output in our # current guix environment store_path() { @@ -37,37 +53,77 @@ store_path() { --expression='s|"[[:space:]]*$||' } -# Set environment variables to point Guix's cross-toolchain to the right + +# Set environment variables to point the NATIVE toolchain to the right +# includes/libs +NATIVE_GCC="$(store_path gcc-toolchain)" +NATIVE_GCC_STATIC="$(store_path gcc-toolchain static)" + +unset LIBRARY_PATH +unset CPATH +unset C_INCLUDE_PATH +unset CPLUS_INCLUDE_PATH +unset OBJC_INCLUDE_PATH +unset OBJCPLUS_INCLUDE_PATH + +export LIBRARY_PATH="${NATIVE_GCC}/lib:${NATIVE_GCC}/lib64:${NATIVE_GCC_STATIC}/lib:${NATIVE_GCC_STATIC}/lib64" +export C_INCLUDE_PATH="${NATIVE_GCC}/include" +export CPLUS_INCLUDE_PATH="${NATIVE_GCC}/include/c++:${NATIVE_GCC}/include" +export OBJC_INCLUDE_PATH="${NATIVE_GCC}/include" +export OBJCPLUS_INCLUDE_PATH="${NATIVE_GCC}/include/c++:${NATIVE_GCC}/include" + +prepend_to_search_env_var() { + export "${1}=${2}${!1:+:}${!1}" +} + +case "$HOST" in + *darwin*) + # When targeting darwin, zlib is required by native_libdmg-hfsplus. + zlib_store_path=$(store_path "zlib") + zlib_static_store_path=$(store_path "zlib" static) + + prepend_to_search_env_var LIBRARY_PATH "${zlib_static_store_path}/lib:${zlib_store_path}/lib" + prepend_to_search_env_var C_INCLUDE_PATH "${zlib_store_path}/include" + prepend_to_search_env_var CPLUS_INCLUDE_PATH "${zlib_store_path}/include" + prepend_to_search_env_var OBJC_INCLUDE_PATH "${zlib_store_path}/include" + prepend_to_search_env_var OBJCPLUS_INCLUDE_PATH "${zlib_store_path}/include" +esac + +# Set environment variables to point the CROSS toolchain to the right # includes/libs for $HOST case "$HOST" in *mingw*) # Determine output paths to use in CROSS_* environment variables CROSS_GLIBC="$(store_path "mingw-w64-x86_64-winpthreads")" CROSS_GCC="$(store_path "gcc-cross-${HOST}")" - CROSS_GCC_LIBS=( "${CROSS_GCC}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) - NATIVE_GCC="$(store_path gcc-glibc-2.27-toolchain)" - export LIBRARY_PATH="${NATIVE_GCC}/lib:${NATIVE_GCC}/lib64" - export CPATH="${NATIVE_GCC}/include" - + # The search path ordering is generally: + # 1. gcc-related search paths + # 2. libc-related search paths + # 2. kernel-header-related search paths (not applicable to mingw-w64 hosts) export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include" export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" - export CROSS_LIBRARY_PATH="${CROSS_GCC}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib" + ;; + *darwin*) + # The CROSS toolchain for darwin uses the SDK and ignores environment variables. + # See depends/hosts/darwin.mk for more details. ;; *linux*) CROSS_GLIBC="$(store_path "glibc-cross-${HOST}")" CROSS_GLIBC_STATIC="$(store_path "glibc-cross-${HOST}" static)" CROSS_KERNEL="$(store_path "linux-libre-headers-cross-${HOST}")" CROSS_GCC="$(store_path "gcc-cross-${HOST}")" - CROSS_GCC_LIBS=( "${CROSS_GCC}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) - # NOTE: CROSS_C_INCLUDE_PATH is missing ${CROSS_GCC_LIB}/include-fixed, because - # the limits.h in it is missing a '#include_next ' - export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include" + export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include" export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" - export CROSS_LIBRARY_PATH="${CROSS_GCC}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib" ;; *) exit 1 ;; @@ -76,14 +132,25 @@ esac # Sanity check CROSS_*_PATH directories IFS=':' read -ra PATHS <<< "${CROSS_C_INCLUDE_PATH}:${CROSS_CPLUS_INCLUDE_PATH}:${CROSS_LIBRARY_PATH}" for p in "${PATHS[@]}"; do - if [ ! -d "$p" ]; then + if [ -n "$p" ] && [ ! -d "$p" ]; then echo "'$p' doesn't exist or isn't a directory... Aborting..." exit 1 fi done # Disable Guix ld auto-rpath behavior -export GUIX_LD_WRAPPER_DISABLE_RPATH=yes +case "$HOST" in + *darwin*) + # The auto-rpath behavior is necessary for darwin builds as some native + # tools built by depends refer to and depend on Guix-built native + # libraries + # + # After the native packages in depends are built, the ld wrapper should + # no longer affect our build, as clang would instead reach for + # x86_64-apple-darwin-ld from cctools + ;; + *) export GUIX_LD_WRAPPER_DISABLE_RPATH=yes ;; +esac # Make /usr/bin if it doesn't exist [ -e /usr/bin ] || mkdir -p /usr/bin @@ -97,95 +164,113 @@ case "$HOST" in *linux*) glibc_dynamic_linker=$( case "$HOST" in - i686-linux-gnu) echo /lib/ld-linux.so.2 ;; - x86_64-linux-gnu) echo /lib64/ld-linux-x86-64.so.2 ;; - arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;; - aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;; - riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;; - *) exit 1 ;; + x86_64-linux-gnu) echo /lib64/ld-linux-x86-64.so.2 ;; + arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;; + aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;; + riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;; + powerpc64-linux-gnu) echo /lib64/ld64.so.1;; + powerpc64le-linux-gnu) echo /lib64/ld64.so.2;; + *) exit 1 ;; esac ) ;; esac # Environment variables for determinism -export QT_RCC_TEST=1 -export QT_RCC_SOURCE_DATE_OVERRIDE=1 export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name" export TZ="UTC" +case "$HOST" in + *darwin*) + # cctools AR, unlike GNU binutils AR, does not have a deterministic mode + # or a configure flag to enable determinism by default, it only + # understands if this env-var is set or not. See: + # + # https://github.com/tpoechtrager/cctools-port/blob/55562e4073dea0fbfd0b20e0bf69ffe6390c7f97/cctools/ar/archive.c#L334 + export ZERO_AR_DATE=yes + ;; +esac #################### # Depends Building # #################### # Build the depends tree, overriding variables that assume multilib gcc -make -C depends --jobs="$MAX_JOBS" HOST="$HOST" \ +make -C depends --jobs="$JOBS" HOST="$HOST" \ ${V:+V=1} \ ${SOURCES_PATH+SOURCES_PATH="$SOURCES_PATH"} \ - i686_linux_CC=i686-linux-gnu-gcc \ - i686_linux_CXX=i686-linux-gnu-g++ \ - i686_linux_AR=i686-linux-gnu-ar \ - i686_linux_RANLIB=i686-linux-gnu-ranlib \ - i686_linux_NM=i686-linux-gnu-nm \ - i686_linux_STRIP=i686-linux-gnu-strip \ + ${BASE_CACHE+BASE_CACHE="$BASE_CACHE"} \ + ${SDK_PATH+SDK_PATH="$SDK_PATH"} \ x86_64_linux_CC=x86_64-linux-gnu-gcc \ x86_64_linux_CXX=x86_64-linux-gnu-g++ \ x86_64_linux_AR=x86_64-linux-gnu-ar \ x86_64_linux_RANLIB=x86_64-linux-gnu-ranlib \ x86_64_linux_NM=x86_64-linux-gnu-nm \ x86_64_linux_STRIP=x86_64-linux-gnu-strip \ - qt_config_opts_i686_linux='-platform linux-g++ -xplatform bitcoin-linux-g++' + qt_config_opts_x86_64_linux='-platform linux-g++ -xplatform bitcoin-linux-g++' \ + FORCE_USE_SYSTEM_CLANG=1 ########################### # Source Tarball Building # ########################### -# Define DISTNAME variable. -# shellcheck source=contrib/gitian-descriptors/assign_DISTNAME -source contrib/gitian-descriptors/assign_DISTNAME - -GIT_ARCHIVE="${OUTDIR}/src/${DISTNAME}.tar.gz" +GIT_ARCHIVE="${DIST_ARCHIVE_BASE}/${DISTNAME}.tar.gz" # Create the source tarball if not already there if [ ! -e "$GIT_ARCHIVE" ]; then mkdir -p "$(dirname "$GIT_ARCHIVE")" - git archive --output="$GIT_ARCHIVE" HEAD + git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD fi +mkdir -p "$OUTDIR" + ########################### # Binary Tarball Building # ########################### # CONFIGFLAGS -CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests" +CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary" case "$HOST" in - *linux*) CONFIGFLAGS+=" --enable-glibc-back-compat" ;; + *linux*) CONFIGFLAGS+=" --disable-threadlocal" ;; esac # CFLAGS HOST_CFLAGS="-O2 -g" +HOST_CFLAGS+=$(find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;) case "$HOST" in *linux*) HOST_CFLAGS+=" -ffile-prefix-map=${PWD}=." ;; *mingw*) HOST_CFLAGS+=" -fno-ident" ;; + *darwin*) unset HOST_CFLAGS ;; esac # CXXFLAGS HOST_CXXFLAGS="$HOST_CFLAGS" +case "$HOST" in + arm-linux-gnueabihf) HOST_CXXFLAGS="${HOST_CXXFLAGS} -Wno-psabi" ;; +esac + # LDFLAGS case "$HOST" in *linux*) HOST_LDFLAGS="-Wl,--as-needed -Wl,--dynamic-linker=$glibc_dynamic_linker -static-libstdc++ -Wl,-O2" ;; *mingw*) HOST_LDFLAGS="-Wl,--no-insert-timestamp" ;; esac +# Using --no-tls-get-addr-optimize retains compatibility with glibc 2.17, by +# avoiding a PowerPC64 optimisation available in glibc 2.22 and later. +# https://sourceware.org/binutils/docs-2.35/ld/PowerPC64-ELF64.html +case "$HOST" in + *powerpc64*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,--no-tls-get-addr-optimize" ;; +esac + # Make $HOST-specific native binaries from depends available in $PATH export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" +mkdir -p "$DISTSRC" ( cd "$DISTSRC" # Extract the source tarball - tar -xf "${GIT_ARCHIVE}" + tar --strip-components=1 -xf "${GIT_ARCHIVE}" ./autogen.sh @@ -197,26 +282,24 @@ export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" --disable-maintainer-mode \ --disable-dependency-tracking \ ${CONFIGFLAGS} \ - CFLAGS="${HOST_CFLAGS}" \ - CXXFLAGS="${HOST_CXXFLAGS}" \ + ${HOST_CFLAGS:+CFLAGS="${HOST_CFLAGS}"} \ + ${HOST_CXXFLAGS:+CXXFLAGS="${HOST_CXXFLAGS}"} \ ${HOST_LDFLAGS:+LDFLAGS="${HOST_LDFLAGS}"} sed -i.old 's/-lstdc++ //g' {./,src/dashbls/,src/secp256k1/}{config.status,libtool} - # Build Bitcoin Core - make --jobs="$MAX_JOBS" ${V:+V=1} + # Build Dash Core + make --jobs="$JOBS" ${V:+V=1} - # Perform basic ELF security checks on a series of executables. + # Check that symbol/security checks tools are sane. + make test-security-check ${V:+V=1} + # Perform basic security checks on a series of executables. make -C src --jobs=1 check-security ${V:+V=1} + # Check that executables only contain allowed version symbols. + make -C src --jobs=1 check-symbols ${V:+V=1} - case "$HOST" in - *linux*|*mingw*) - # Check that executables only contain allowed gcc, glibc and libstdc++ - # version symbols for Linux distro back-compatibility. - make -C src --jobs=1 check-symbols ${V:+V=1} - ;; - esac + mkdir -p "$OUTDIR" # Make the os-specific installers case "$HOST" in @@ -225,14 +308,42 @@ export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" ;; esac - # Setup the directory where our Bitcoin Core build for HOST will be + # Setup the directory where our Dash Core build for HOST will be # installed. This directory will also later serve as the input for our # binary tarballs. INSTALLPATH="${PWD}/installed/${DISTNAME}" mkdir -p "${INSTALLPATH}" - # Install built Bitcoin Core to $INSTALLPATH - make install DESTDIR="${INSTALLPATH}" ${V:+V=1} + # Install built Dash Core to $INSTALLPATH + case "$HOST" in + *darwin*) + make install-strip DESTDIR="${INSTALLPATH}" ${V:+V=1} + ;; + *) + make install DESTDIR="${INSTALLPATH}" ${V:+V=1} + ;; + esac + case "$HOST" in + *darwin*) + make osx_volname ${V:+V=1} + make deploydir ${V:+V=1} + mkdir -p "unsigned-app-${HOST}" + cp --target-directory="unsigned-app-${HOST}" \ + osx_volname \ + contrib/macdeploy/detached-sig-{apply,create}.sh \ + "${BASEPREFIX}/${HOST}"/native/bin/dmg + mv --target-directory="unsigned-app-${HOST}" dist + ( + cd "unsigned-app-${HOST}" + find . -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" && exit 1 ) + ) + make deploy ${V:+V=1} OSX_DMG="${OUTDIR}/${DISTNAME}-${HOST}-unsigned.dmg" + ;; + esac ( cd installed @@ -247,13 +358,18 @@ export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" find . -name "lib*.a" -delete # Prune pkg-config files - rm -r "${DISTNAME}/lib/pkgconfig" + rm -rf "${DISTNAME}/lib/pkgconfig" - # Split binaries and libraries from their debug symbols - { - find "${DISTNAME}/bin" -type f -executable -print0 - find "${DISTNAME}/lib" -type f -print0 - } | xargs -0 -n1 -P"$MAX_JOBS" -I{} "${DISTSRC}/contrib/devtools/split-debug.sh" {} {} {}.dbg + case "$HOST" in + *darwin*) ;; + *) + # Split binaries and libraries from their debug symbols + { + find "${DISTNAME}/bin" -type f -executable -print0 + find "${DISTNAME}/lib" -type f -print0 + } | xargs -0 -P"$JOBS" -I{} "${DISTSRC}/contrib/devtools/split-debug.sh" {} {} {}.dbg + ;; + esac case "$HOST" in *mingw*) @@ -293,22 +409,44 @@ export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-debug.tar.gz" \ || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-debug.tar.gz" && exit 1 ) ;; + *darwin*) + find "${DISTNAME}" -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" && exit 1 ) + ;; esac - ) -) + ) # $DISTSRC/installed -case "$HOST" in - *mingw*) - cp -rf --target-directory=. contrib/windeploy - ( - cd ./windeploy - mkdir unsigned - cp --target-directory=unsigned/ "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" - find . -print0 \ - | sort --zero-terminated \ - | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ - | gzip -9n > "${OUTDIR}/${DISTNAME}-win-unsigned.tar.gz" \ - || ( rm -f "${OUTDIR}/${DISTNAME}-win-unsigned.tar.gz" && exit 1 ) - ) - ;; -esac + case "$HOST" in + *mingw*) + cp -rf --target-directory=. contrib/windeploy + ( + cd ./windeploy + mkdir -p unsigned + cp --target-directory=unsigned/ "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" + find . -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-win64-unsigned.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-win64-unsigned.tar.gz" && exit 1 ) + ) + ;; + esac +) # $DISTSRC + +rm -rf "$ACTUAL_OUTDIR" +mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ + || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) + +( + cd /outdir-base + { + echo "$GIT_ARCHIVE" + find "$ACTUAL_OUTDIR" -type f + } | xargs realpath --relative-base="$PWD" \ + | xargs sha256sum \ + | sort -k2 \ + | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part +) diff --git a/contrib/guix/libexec/codesign.sh b/contrib/guix/libexec/codesign.sh new file mode 100755 index 0000000000000..59df8e87fac22 --- /dev/null +++ b/contrib/guix/libexec/codesign.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail +export TZ=UTC + +# Although Guix _does_ set umask when building its own packages (in our case, +# this is all packages in manifest.scm), it does not set it for `guix +# environment`. It does make sense for at least `guix environment --container` +# to set umask, so if that change gets merged upstream and we bump the +# time-machine to a commit which includes the aforementioned change, we can +# remove this line. +# +# This line should be placed before any commands which creates files. +umask 0022 + +if [ -n "$V" ]; then + # Print both unexpanded (-v) and expanded (-x) forms of commands as they are + # read from this file. + set -vx + # Set VERBOSE for CMake-based builds + export VERBOSE="$V" +fi + +# Check that required environment variables are set +cat << EOF +Required environment variables as seen inside the container: + UNSIGNED_TARBALL: ${UNSIGNED_TARBALL:?not set} + DETACHED_SIGS_REPO: ${DETACHED_SIGS_REPO:?not set} + DIST_ARCHIVE_BASE: ${DIST_ARCHIVE_BASE:?not set} + DISTNAME: ${DISTNAME:?not set} + HOST: ${HOST:?not set} + SOURCE_DATE_EPOCH: ${SOURCE_DATE_EPOCH:?not set} + DISTSRC: ${DISTSRC:?not set} + OUTDIR: ${OUTDIR:?not set} +EOF + +ACTUAL_OUTDIR="${OUTDIR}" +OUTDIR="${DISTSRC}/output" + +git_head_version() { + local recent_tag + if recent_tag="$(git -C "$1" describe --exact-match HEAD 2> /dev/null)"; then + echo "${recent_tag#v}" + else + git -C "$1" rev-parse --short=12 HEAD + fi +} + +CODESIGNATURE_GIT_ARCHIVE="${DIST_ARCHIVE_BASE}/${DISTNAME}-codesignatures-$(git_head_version "$DETACHED_SIGS_REPO").tar.gz" + +# Create the codesignature tarball if not already there +if [ ! -e "$CODESIGNATURE_GIT_ARCHIVE" ]; then + mkdir -p "$(dirname "$CODESIGNATURE_GIT_ARCHIVE")" + git -C "$DETACHED_SIGS_REPO" archive --output="$CODESIGNATURE_GIT_ARCHIVE" HEAD +fi + +mkdir -p "$OUTDIR" + +mkdir -p "$DISTSRC" +( + cd "$DISTSRC" + + tar -xf "$UNSIGNED_TARBALL" + + mkdir -p codesignatures + tar -C codesignatures -xf "$CODESIGNATURE_GIT_ARCHIVE" + + case "$HOST" in + *mingw*) + find "$PWD" -name "*-unsigned.exe" | while read -r infile; do + infile_base="$(basename "$infile")" + + # Codesigned *-unsigned.exe and output to OUTDIR + osslsigncode attach-signature \ + -in "$infile" \ + -out "${OUTDIR}/${infile_base/-unsigned}" \ + -sigin codesignatures/win/"$infile_base".pem + done + ;; + *darwin*) + # Apply detached codesignatures to dist/ (in-place) + signapple apply dist/Dash-Qt.app codesignatures/osx/dist + + # Make an uncompressed DMG from dist/ + xorrisofs -D -l -V "$(< osx_volname)" -no-pad -r -dir-mode 0755 \ + -o uncompressed.dmg \ + dist \ + -- -volume_date all_file_dates ="$SOURCE_DATE_EPOCH" + + # Compress uncompressed.dmg and output to OUTDIR + ./dmg dmg uncompressed.dmg "${OUTDIR}/${DISTNAME}-${HOST}.dmg" + ;; + *) + exit 1 + ;; + esac +) # $DISTSRC + +rm -rf "$ACTUAL_OUTDIR" +mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ + || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) + +( + cd /outdir-base + { + echo "$UNSIGNED_TARBALL" + echo "$CODESIGNATURE_GIT_ARCHIVE" + find "$ACTUAL_OUTDIR" -type f + } | xargs realpath --relative-base="$PWD" \ + | xargs sha256sum \ + | sort -k2 \ + | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part +) diff --git a/contrib/guix/libexec/prelude.bash b/contrib/guix/libexec/prelude.bash new file mode 100644 index 0000000000000..f9b541b510b8e --- /dev/null +++ b/contrib/guix/libexec/prelude.bash @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# shellcheck source=../../shell/realpath.bash +source contrib/shell/realpath.bash + +# shellcheck source=../../shell/git-utils.bash +source contrib/shell/git-utils.bash + +################ +# Required non-builtin commands should be invocable +################ + +check_tools() { + for cmd in "$@"; do + if ! command -v "$cmd" > /dev/null 2>&1; then + echo "ERR: This script requires that '$cmd' is installed and available in your \$PATH" + exit 1 + fi + done +} + +check_tools cat env readlink dirname basename git + +################ +# We should be at the top directory of the repository +################ + +same_dir() { + local resolved1 resolved2 + resolved1="$(bash_realpath "${1}")" + resolved2="$(bash_realpath "${2}")" + [ "$resolved1" = "$resolved2" ] +} + +if ! same_dir "${PWD}" "$(git_root)"; then +cat << EOF +ERR: This script must be invoked from the top level of the git repository + +Hint: This may look something like: + env FOO=BAR ./contrib/guix/guix- + +EOF +exit 1 +fi + +################ +# Execute "$@" in a pinned, possibly older version of Guix, for reproducibility +# across time. +time-machine() { + # shellcheck disable=SC2086 + guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \ + --commit=998eda3067c7d21e0d9bb3310d2f5a14b8f1c681 \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} \ + -- "$@" +} + + +################ +# Set common variables +################ + +VERSION="${FORCE_VERSION:-$(git_head_version)}" +DISTNAME="${DISTNAME:-dashcore-${VERSION}}" + +version_base_prefix="${PWD}/guix-build-" +VERSION_BASE="${version_base_prefix}${VERSION}" # TOP + +DISTSRC_BASE="${DISTSRC_BASE:-${VERSION_BASE}}" + +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" + +var_base_basename="var" +VAR_BASE="${VAR_BASE:-${VERSION_BASE}/${var_base_basename}}" + +profiles_base_basename="profiles" +PROFILES_BASE="${PROFILES_BASE:-${VAR_BASE}/${profiles_base_basename}}" diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 346a0cb0bea57..735b3f33ff95a 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -3,29 +3,50 @@ (gnu packages autotools) (gnu packages base) (gnu packages bash) + (gnu packages certs) + (gnu packages cdrom) (gnu packages check) + (gnu packages cmake) (gnu packages commencement) (gnu packages compression) (gnu packages cross-base) + (gnu packages curl) (gnu packages file) (gnu packages gawk) (gnu packages gcc) + (gnu packages gnome) (gnu packages installers) (gnu packages linux) + (gnu packages llvm) (gnu packages mingw) + (gnu packages moreutils) (gnu packages perl) (gnu packages pkg-config) (gnu packages python) + (gnu packages python-crypto) + (gnu packages python-web) (gnu packages shells) (gnu packages bison) + (gnu packages tls) (gnu packages version-control) (guix build-system gnu) + (guix build-system python) (guix build-system trivial) + (guix download) (guix gexp) + (guix git-download) + ((guix licenses) #:prefix license:) (guix packages) (guix profiles) (guix utils)) +(define-syntax-rule (search-our-patches file-name ...) + "Return the list of absolute file names corresponding to each +FILE-NAME found in ./patches relative to the current file." + (parameterize + ((%patch-path (list (string-append (dirname (current-filename)) "/patches")))) + (list (search-patch file-name) ...))) + (define (make-ssp-fixed-gcc xgcc) "Given a XGCC package, return a modified package that uses the SSP function from glibc instead of from libssp.so. Our `symbol-check' script will complain if @@ -34,28 +55,33 @@ we link against libssp.so, and thus will ensure that this works properly. Taken from: http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" (package - (inherit xgcc) - (arguments - (substitute-keyword-arguments (package-arguments xgcc) - ((#:make-flags flags) - `(cons "gcc_cv_libc_provides_ssp=yes" ,flags)))))) + (inherit xgcc) + (arguments + (substitute-keyword-arguments (package-arguments xgcc) + ((#:make-flags flags) + `(cons "gcc_cv_libc_provides_ssp=yes" ,flags)))))) (define (make-gcc-rpath-link xgcc) "Given a XGCC package, return a modified package that replace each instance of -rpath in the default system spec that's inserted by Guix with -rpath-link" (package - (inherit xgcc) - (arguments - (substitute-keyword-arguments (package-arguments xgcc) - ((#:phases phases) - `(modify-phases ,phases - (add-after 'pre-configure 'replace-rpath-with-rpath-link - (lambda _ - (substitute* (cons "gcc/config/rs6000/sysv4.h" - (find-files "gcc/config" - "^gnu-user.*\\.h$")) - (("-rpath=") "-rpath-link=")) - #t)))))))) + (inherit xgcc) + (arguments + (substitute-keyword-arguments (package-arguments xgcc) + ((#:phases phases) + `(modify-phases ,phases + (add-after 'pre-configure 'replace-rpath-with-rpath-link + (lambda _ + (substitute* (cons "gcc/config/rs6000/sysv4.h" + (find-files "gcc/config" + "^gnu-user.*\\.h$")) + (("-rpath=") "-rpath-link=")) + #t)))))))) + +(define building-on (string-append (list-ref (string-split (%current-system) #\-) 0) "-guix-linux-gnu")) + +(define (explicit-cross-configure package) + (package-with-extra-configure-variable package "--build" building-on)) (define (make-cross-toolchain target base-gcc-for-libc @@ -66,9 +92,9 @@ http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" (let* ((xbinutils (cross-binutils target)) ;; 1. Build a cross-compiling gcc without targeting any libc, derived ;; from BASE-GCC-FOR-LIBC - (xgcc-sans-libc (cross-gcc target - #:xgcc base-gcc-for-libc - #:xbinutils xbinutils)) + (xgcc-sans-libc (explicit-cross-configure (cross-gcc target + #:xgcc base-gcc-for-libc + #:xbinutils xbinutils))) ;; 2. Build cross-compiled kernel headers with XGCC-SANS-LIBC, derived ;; from BASE-KERNEL-HEADERS (xkernel (cross-kernel-headers target @@ -77,17 +103,17 @@ http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" xbinutils)) ;; 3. Build a cross-compiled libc with XGCC-SANS-LIBC and XKERNEL, ;; derived from BASE-LIBC - (xlibc (cross-libc target - base-libc - xgcc-sans-libc - xbinutils - xkernel)) + (xlibc (explicit-cross-configure (cross-libc target + base-libc + xgcc-sans-libc + xbinutils + xkernel))) ;; 4. Build a cross-compiling gcc targeting XLIBC, derived from ;; BASE-GCC - (xgcc (cross-gcc target - #:xgcc base-gcc - #:xbinutils xbinutils - #:libc xlibc))) + (xgcc (explicit-cross-configure (cross-gcc target + #:xgcc base-gcc + #:xbinutils xbinutils + #:libc xlibc)))) ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and ;; XGCC (package @@ -100,29 +126,48 @@ http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" `(("binutils" ,xbinutils) ("libc" ,xlibc) ("libc:static" ,xlibc "static") - ("gcc" ,xgcc))) + ("gcc" ,xgcc) + ("gcc-lib" ,xgcc "lib"))) (synopsis (string-append "Complete GCC tool chain for " target)) (description (string-append "This package provides a complete GCC tool chain for " target " development.")) (home-page (package-home-page xgcc)) (license (package-license xgcc))))) +(define base-gcc gcc-10) +(define base-linux-kernel-headers linux-libre-headers-5.15) + +;; https://gcc.gnu.org/install/configure.html +(define (hardened-gcc gcc) + (package-with-extra-configure-variable ( + package-with-extra-configure-variable gcc + "--enable-default-ssp" "yes") + "--enable-default-pie" "yes")) + (define* (make-bitcoin-cross-toolchain target - #:key - (base-gcc-for-libc gcc-5) - (base-kernel-headers linux-libre-headers-4.19) - (base-libc glibc-2.27) - (base-gcc (make-gcc-rpath-link gcc-9))) + #:key + (base-gcc-for-libc base-gcc) + (base-kernel-headers base-linux-kernel-headers) + (base-libc (make-glibc-with-bind-now (make-glibc-without-werror glibc-2.24))) + (base-gcc (make-gcc-rpath-link (hardened-gcc base-gcc)))) "Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values -desirable for building Bitcoin Core release binaries." +desirable for building Dash Core release binaries." (make-cross-toolchain target - base-gcc-for-libc - base-kernel-headers - base-libc - base-gcc)) + base-gcc-for-libc + base-kernel-headers + base-libc + base-gcc)) (define (make-gcc-with-pthreads gcc) - (package-with-extra-configure-variable gcc "--enable-threads" "posix")) + (package-with-extra-configure-variable + (package-with-extra-patches gcc + (search-our-patches "gcc-10-remap-guix-store.patch")) + "--enable-threads" "posix")) + +(define (make-mingw-w64-cross-gcc cross-gcc) + (package-with-extra-patches cross-gcc + (search-our-patches "vmov-alignment.patch" + "gcc-broken-longjmp.patch"))) (define (make-mingw-pthreads-cross-toolchain target) "Create a cross-compilation toolchain package for TARGET" @@ -130,7 +175,7 @@ desirable for building Bitcoin Core release binaries." (pthreads-xlibc mingw-w64-x86_64-winpthreads) (pthreads-xgcc (make-gcc-with-pthreads (cross-gcc target - #:xgcc (make-ssp-fixed-gcc gcc-9) + #:xgcc (make-ssp-fixed-gcc (make-mingw-w64-cross-gcc base-gcc)) #:xbinutils xbinutils #:libc pthreads-xlibc)))) ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and @@ -144,20 +189,405 @@ desirable for building Bitcoin Core release binaries." (propagated-inputs `(("binutils" ,xbinutils) ("libc" ,pthreads-xlibc) - ("gcc" ,pthreads-xgcc))) + ("gcc" ,pthreads-xgcc) + ("gcc-lib" ,pthreads-xgcc "lib"))) (synopsis (string-append "Complete GCC tool chain for " target)) (description (string-append "This package provides a complete GCC tool chain for " target " development.")) (home-page (package-home-page pthreads-xgcc)) (license (package-license pthreads-xgcc))))) +(define (make-nsis-for-gcc-10 base-nsis) + (package-with-extra-patches base-nsis + (search-our-patches "nsis-gcc-10-memmove.patch"))) + +(define osslsigncode + (package + (name "osslsigncode") + (version "2.0") + (source (origin + (method url-fetch) + (uri (string-append "https://github.com/mtrojnar/" + name "/archive/" version ".tar.gz")) + (sha256 + (base32 + "0byri6xny770wwb2nciq44j5071122l14bvv65axdd70nfjf0q2s")))) + (build-system gnu-build-system) + (native-inputs + `(("pkg-config" ,pkg-config) + ("autoconf" ,autoconf) + ("automake" ,automake) + ("libtool" ,libtool))) + (inputs + `(("openssl" ,openssl))) + (arguments + `(#:configure-flags + `("--without-gsf" + "--without-curl" + "--disable-dependency-tracking"))) + (home-page "https://github.com/mtrojnar/osslsigncode") + (synopsis "Authenticode signing and timestamping tool") + (description "osslsigncode is a small tool that implements part of the +functionality of the Microsoft tool signtool.exe - more exactly the Authenticode +signing and timestamping. But osslsigncode is based on OpenSSL and cURL, and +thus should be able to compile on most platforms where these exist.") + (license license:gpl3+))) ; license is with openssl exception + +(define-public python-elfesteem + (let ((commit "87bbd79ab7e361004c98cc8601d4e5f029fd8bd5")) + (package + (name "python-elfesteem") + (version (git-version "0.1" "1" commit)) + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/LRGH/elfesteem") + (commit commit))) + (file-name (git-file-name name commit)) + (sha256 + (base32 + "1nyvjisvyxyxnd0023xjf5846xd03lwawp5pfzr8vrky7wwm5maz")) + (patches (search-our-patches "elfsteem-value-error-python-39.patch")))) + (build-system python-build-system) + ;; There are no tests, but attempting to run python setup.py test leads to + ;; PYTHONPATH problems, just disable the test + (arguments '(#:tests? #f)) + (home-page "https://github.com/LRGH/elfesteem") + (synopsis "ELF/PE/Mach-O parsing library") + (description "elfesteem parses ELF, PE and Mach-O files.") + (license license:lgpl2.1)))) + +(define-public python-oscrypto + (package + (name "python-oscrypto") + (version "1.2.1") + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/wbond/oscrypto") + (commit version))) + (file-name (git-file-name name version)) + (sha256 + (base32 + "1d4d8s4z340qhvb3g5m5v3436y3a71yc26wk4749q64m09kxqc3l")) + (patches (search-our-patches "oscrypto-hard-code-openssl.patch")))) + (build-system python-build-system) + (native-search-paths + (list (search-path-specification + (variable "SSL_CERT_FILE") + (file-type 'regular) + (separator #f) ;single entry + (files '("etc/ssl/certs/ca-certificates.crt"))))) + + (propagated-inputs + `(("python-asn1crypto" ,python-asn1crypto) + ("openssl" ,openssl))) + (arguments + `(#:phases + (modify-phases %standard-phases + (add-after 'unpack 'hard-code-path-to-libscrypt + (lambda* (#:key inputs #:allow-other-keys) + (let ((openssl (assoc-ref inputs "openssl"))) + (substitute* "oscrypto/__init__.py" + (("@GUIX_OSCRYPTO_USE_OPENSSL@") + (string-append openssl "/lib/libcrypto.so" "," openssl "/lib/libssl.so"))) + #t))) + (add-after 'unpack 'disable-broken-tests + (lambda _ + ;; This test is broken as there is no keyboard interrupt. + (substitute* "tests/test_trust_list.py" + (("^(.*)class TrustListTests" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_tls.py" + (("^(.*)class TLSTests" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + #t)) + (replace 'check + (lambda _ + (invoke "python" "run.py" "tests") + #t))))) + (home-page "https://github.com/wbond/oscrypto") + (synopsis "Compiler-free Python crypto library backed by the OS") + (description "oscrypto is a compilation-free, always up-to-date encryption library for Python.") + (license license:expat))) + +(define-public python-oscryptotests + (package (inherit python-oscrypto) + (name "python-oscryptotests") + (propagated-inputs + `(("python-oscrypto" ,python-oscrypto))) + (arguments + `(#:tests? #f + #:phases + (modify-phases %standard-phases + (add-after 'unpack 'hard-code-path-to-libscrypt + (lambda* (#:key inputs #:allow-other-keys) + (chdir "tests") + #t))))))) + +(define-public python-certvalidator + (let ((commit "a145bf25eb75a9f014b3e7678826132efbba6213")) + (package + (name "python-certvalidator") + (version (git-version "0.1" "1" commit)) + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/achow101/certvalidator") + (commit commit))) + (file-name (git-file-name name commit)) + (sha256 + (base32 + "1qw2k7xis53179lpqdqyylbcmp76lj7sagp883wmxg5i7chhc96k")))) + (build-system python-build-system) + (propagated-inputs + `(("python-asn1crypto" ,python-asn1crypto) + ("python-oscrypto" ,python-oscrypto) + ("python-oscryptotests", python-oscryptotests))) ;; certvalidator tests import oscryptotests + (arguments + `(#:phases + (modify-phases %standard-phases + (add-after 'unpack 'disable-broken-tests + (lambda _ + (substitute* "tests/test_certificate_validator.py" + (("^(.*)class CertificateValidatorTests" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_crl_client.py" + (("^(.*)def test_fetch_crl" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_ocsp_client.py" + (("^(.*)def test_fetch_ocsp" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_registry.py" + (("^(.*)def test_build_paths" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_validate.py" + (("^(.*)def test_revocation_mode_hard" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_validate.py" + (("^(.*)def test_revocation_mode_soft" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + #t)) + (replace 'check + (lambda _ + (invoke "python" "run.py" "tests") + #t))))) + (home-page "https://github.com/wbond/certvalidator") + (synopsis "Python library for validating X.509 certificates and paths") + (description "certvalidator is a Python library for validating X.509 +certificates or paths. Supports various options, including: validation at a +specific moment in time, whitelisting and revocation checks.") + (license license:expat)))) + +(define-public python-altgraph + (package + (name "python-altgraph") + (version "0.17") + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/ronaldoussoren/altgraph") + (commit (string-append "v" version)))) + (file-name (git-file-name name version)) + (sha256 + (base32 + "09sm4srvvkw458pn48ga9q7ykr4xlz7q8gh1h9w7nxpf001qgpwb")))) + (build-system python-build-system) + (home-page "https://github.com/ronaldoussoren/altgraph") + (synopsis "Python graph (network) package") + (description "altgraph is a fork of graphlib: a graph (network) package for +constructing graphs, BFS and DFS traversals, topological sort, shortest paths, +etc. with graphviz output.") + (license license:expat))) + + +(define-public python-macholib + (package + (name "python-macholib") + (version "1.14") + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/ronaldoussoren/macholib") + (commit (string-append "v" version)))) + (file-name (git-file-name name version)) + (sha256 + (base32 + "0aislnnfsza9wl4f0vp45ivzlc0pzhp9d4r08700slrypn5flg42")))) + (build-system python-build-system) + (propagated-inputs + `(("python-altgraph" ,python-altgraph))) + (arguments + '(#:phases + (modify-phases %standard-phases + (add-after 'unpack 'disable-broken-tests + (lambda _ + ;; This test is broken as there is no keyboard interrupt. + (substitute* "macholib_tests/test_command_line.py" + (("^(.*)class TestCmdLine" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "macholib_tests/test_dyld.py" + (("^(.*)def test_\\S+_find" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line)) + (("^(.*)def testBasic" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line)) + ) + #t))))) + (home-page "https://github.com/ronaldoussoren/macholib") + (synopsis "Python library for analyzing and editing Mach-O headers") + (description "macholib is a Macho-O header analyzer and editor. It's +typically used as a dependency analysis tool, and also to rewrite dylib +references in Mach-O headers to be @executable_path relative. Though this tool +targets a platform specific file format, it is pure python code that is platform +and endian independent.") + (license license:expat))) + +(define-public python-signapple + (let ((commit "8a945a2e7583be2665cf3a6a89d665b70ecd1ab6")) + (package + (name "python-signapple") + (version (git-version "0.1" "1" commit)) + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/achow101/signapple") + (commit commit))) + (file-name (git-file-name name commit)) + (sha256 + (base32 + "0fr1hangvfyiwflca6jg5g8zvg3jc9qr7vd2c12ff89pznf38dlg")))) + (build-system python-build-system) + (propagated-inputs + `(("python-asn1crypto" ,python-asn1crypto) + ("python-oscrypto" ,python-oscrypto) + ("python-certvalidator" ,python-certvalidator) + ("python-elfesteem" ,python-elfesteem) + ("python-requests" ,python-requests) + ("python-macholib" ,python-macholib))) + ;; There are no tests, but attempting to run python setup.py test leads to + ;; problems, just disable the test + (arguments '(#:tests? #f)) + (home-page "https://github.com/achow101/signapple") + (synopsis "Mach-O binary signature tool") + (description "signapple is a Python tool for creating, verifying, and +inspecting signatures in Mach-O binaries.") + (license license:expat)))) + +(define (make-glibc-without-werror glibc) + (package-with-extra-configure-variable glibc "enable_werror" "no")) + +(define (make-glibc-with-stack-protector glibc) + (package-with-extra-configure-variable glibc "--enable-stack-protector" "all")) + +(define (make-glibc-with-bind-now glibc) + (package-with-extra-configure-variable glibc "--enable-bind-now" "yes")) + +(define-public glibc-2.24 + (package + (inherit glibc-2.31) + (version "2.24") + (source (origin + (method git-fetch) + (uri (git-reference + (url "https://sourceware.org/git/glibc.git") + (commit "0d7f1ed30969886c8dde62fbf7d2c79967d4bace"))) + (file-name (git-file-name "glibc" "0d7f1ed30969886c8dde62fbf7d2c79967d4bace")) + (sha256 + (base32 + "0g5hryia5v1k0qx97qffgwzrz4lr4jw3s5kj04yllhswsxyjbic3")) + (patches (search-our-patches "glibc-ldd-x86_64.patch" + "glibc-versioned-locpath.patch" + "glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch" + "glibc-2.24-no-build-time-cxx-header-run.patch" + "glibc-2.24-fcommon.patch" + "glibc-2.24-guix-prefix.patch")))))) + +(define-public glibc-2.27/bitcoin-patched + (package + (inherit glibc-2.31) + (version "2.27") + (source (origin + (method git-fetch) + (uri (git-reference + (url "https://sourceware.org/git/glibc.git") + (commit "23158b08a0908f381459f273a984c6fd328363cb"))) + (file-name (git-file-name "glibc" "23158b08a0908f381459f273a984c6fd328363cb")) + (sha256 + (base32 + "1b2n1gxv9f4fd5yy68qjbnarhf8mf4vmlxk10i3328c1w5pmp0ca")) + (patches (search-our-patches "glibc-ldd-x86_64.patch" + "glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch" + "glibc-2.27-dont-redefine-nss-database.patch" + "glibc-2.27-guix-prefix.patch")))))) + +(define (fix-ppc64-nx-default lief) + (package-with-extra-patches lief + (search-our-patches "lief-fix-ppc64-nx-default.patch"))) + +(define-public lief + (package + (name "python-lief") + (version "0.12.1") + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/lief-project/LIEF.git") + (commit version))) + (file-name (git-file-name name version)) + (sha256 + (base32 + "1xzbh3bxy4rw1yamnx68da1v5s56ay4g081cyamv67256g0qy2i1")))) + (build-system python-build-system) + (arguments + `(#:phases + (modify-phases %standard-phases + (add-after 'unpack 'parallel-jobs + ;; build with multiple cores + (lambda _ + (substitute* "setup.py" (("self.parallel if self.parallel else 1") (number->string (parallel-job-count))))))))) + (native-inputs + `(("cmake" ,cmake))) + (home-page "https://github.com/lief-project/LIEF") + (synopsis "Library to Instrument Executable Formats") + (description "Python library to to provide a cross platform library which can +parse, modify and abstract ELF, PE and MachO formats.") + (license license:asl2.0))) (packages->manifest (append (list ;; The Basics bash-minimal which - coreutils + coreutils-minimal util-linux ;; File(system) inspection file @@ -168,33 +598,45 @@ chain for " target " development.")) patch gawk sed + moreutils ;; Compression and archiving tar bzip2 gzip xz zlib + (list zlib "static") ;; Build tools gnu-make - libtool - autoconf + libtool-2.4.7 + autoconf-2.71 automake pkg-config bison + ;; Native GCC 10 toolchain + gcc-toolchain-10 + (list gcc-toolchain-10 "static") ;; Scripting perl - python-3.7 + python-minimal ;; (3.9) ;; Git git - ;; Native gcc 9 toolchain targeting glibc 2.27 - (make-gcc-toolchain gcc-9 glibc-2.27)) + ;; Tests + (fix-ppc64-nx-default lief)) (let ((target (getenv "HOST"))) (cond ((string-suffix? "-mingw32" target) ;; Windows - (list zip (make-mingw-pthreads-cross-toolchain "x86_64-w64-mingw32") nsis-x86_64)) - ((string-contains target "riscv64-linux-") - (list (make-bitcoin-cross-toolchain "riscv64-linux-gnu" - #:base-gcc-for-libc gcc-7))) + (list zip + (make-mingw-pthreads-cross-toolchain "x86_64-w64-mingw32") + (make-nsis-for-gcc-10 nsis-x86_64) + osslsigncode)) ((string-contains target "-linux-") - (list (make-bitcoin-cross-toolchain target))) + (list (cond ((string-contains target "riscv64-") + (make-bitcoin-cross-toolchain target + #:base-libc (make-glibc-with-stack-protector + (make-glibc-with-bind-now (make-glibc-without-werror glibc-2.27/bitcoin-patched))))) + (else + (make-bitcoin-cross-toolchain target))))) + ((string-contains target "darwin") + (list clang-toolchain-10 binutils cmake-minimal xorriso python-signapple)) (else '()))))) diff --git a/contrib/guix/patches/elfsteem-value-error-python-39.patch b/contrib/guix/patches/elfsteem-value-error-python-39.patch new file mode 100644 index 0000000000000..21e1228afd83c --- /dev/null +++ b/contrib/guix/patches/elfsteem-value-error-python-39.patch @@ -0,0 +1,13 @@ +diff --git a/examples/otool.py b/examples/otool.py +index 2b8efc0..d797b2e 100755 +--- a/examples/otool.py ++++ b/examples/otool.py +@@ -342,7 +342,7 @@ if __name__ == '__main__': + try: + e = macho_init.MACHO(raw, + parseSymbols = False) +- except ValueError, err: ++ except ValueError as err: + print("%s:" %file) + print(" %s" % err) + continue diff --git a/contrib/guix/patches/gcc-10-remap-guix-store.patch b/contrib/guix/patches/gcc-10-remap-guix-store.patch new file mode 100644 index 0000000000000..a47ef7a2df130 --- /dev/null +++ b/contrib/guix/patches/gcc-10-remap-guix-store.patch @@ -0,0 +1,25 @@ +From aad25427e74f387412e8bc9a9d7bbc6c496c792f Mon Sep 17 00:00:00 2001 +From: Andrew Chow +Date: Wed, 6 Jul 2022 16:49:41 -0400 +Subject: [PATCH] guix: remap guix store paths to /usr + +--- + libgcc/Makefile.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/libgcc/Makefile.in b/libgcc/Makefile.in +index 851e7657d07..476c2becd1c 100644 +--- a/libgcc/Makefile.in ++++ b/libgcc/Makefile.in +@@ -854,7 +854,7 @@ endif + # libgcc_eh.a, only LIB2ADDEH matters. If we do, only LIB2ADDEHSTATIC and + # LIB2ADDEHSHARED matter. (Usually all three are identical.) + +-c_flags := -fexceptions ++c_flags := -fexceptions $(shell find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;) + + ifeq ($(enable_shared),yes) + +-- +2.37.0 + diff --git a/contrib/guix/patches/gcc-broken-longjmp.patch b/contrib/guix/patches/gcc-broken-longjmp.patch new file mode 100644 index 0000000000000..1cfc0918b090a --- /dev/null +++ b/contrib/guix/patches/gcc-broken-longjmp.patch @@ -0,0 +1,68 @@ +commit eb5698897c52702498938592d7f76e67d126451f +Author: Eric Botcazou +Date: Wed May 5 22:48:51 2021 +0200 + + Fix PR target/100402 + + This is a regression for 64-bit Windows present from mainline down to the 9 + branch and introduced by the fix for PR target/99234. Again SEH, but with + a twist related to the way MinGW implements setjmp/longjmp, which turns out + to be piggybacked on SEH with recent versions of MinGW, i.e. the longjmp + performs a bona-fide unwinding of the stack, because it calls RtlUnwindEx + with the second argument initially passed to setjmp, which is the result of + __builtin_frame_address (0) in the MinGW header file: + + define setjmp(BUF) _setjmp((BUF), __builtin_frame_address (0)) + + This means that we directly expose the frame pointer to the SEH machinery + here (unlike with regular exception handling where we use an intermediate + CFA) and thus that we cannot do whatever we want with it. The old code + would leave it unaligned, i.e. not multiple of 16, whereas the new code + aligns it, but this breaks for some reason; at least it appears that a + .seh_setframe directive with 0 as second argument always works, so the + fix aligns it this way. + + gcc/ + PR target/100402 + * config/i386/i386.c (ix86_compute_frame_layout): For a SEH target, + always return the establisher frame for __builtin_frame_address (0). + gcc/testsuite/ + * gcc.c-torture/execute/20210505-1.c: New test. + +diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c +index 2f838840e96..06ad1b2274e 100644 +--- a/gcc/config/i386/i386.c ++++ b/gcc/config/i386/i386.c +@@ -6356,12 +6356,29 @@ ix86_compute_frame_layout (void) + area, see the SEH code in config/i386/winnt.c for the rationale. */ + frame->hard_frame_pointer_offset = frame->sse_reg_save_offset; + +- /* If we can leave the frame pointer where it is, do so. Also, return ++ /* If we can leave the frame pointer where it is, do so; however return + the establisher frame for __builtin_frame_address (0) or else if the +- frame overflows the SEH maximum frame size. */ ++ frame overflows the SEH maximum frame size. ++ ++ Note that the value returned by __builtin_frame_address (0) is quite ++ constrained, because setjmp is piggybacked on the SEH machinery with ++ recent versions of MinGW: ++ ++ # elif defined(__SEH__) ++ # if defined(__aarch64__) || defined(_ARM64_) ++ # define setjmp(BUF) _setjmp((BUF), __builtin_sponentry()) ++ # elif (__MINGW_GCC_VERSION < 40702) ++ # define setjmp(BUF) _setjmp((BUF), mingw_getsp()) ++ # else ++ # define setjmp(BUF) _setjmp((BUF), __builtin_frame_address (0)) ++ # endif ++ ++ and the second argument passed to _setjmp, if not null, is forwarded ++ to the TargetFrame parameter of RtlUnwindEx by longjmp (after it has ++ built an ExceptionRecord on the fly describing the setjmp buffer). */ + const HOST_WIDE_INT diff + = frame->stack_pointer_offset - frame->hard_frame_pointer_offset; +- if (diff <= 255) ++ if (diff <= 255 && !crtl->accesses_prior_frames) + { + /* The resulting diff will be a multiple of 16 lower than 255, + i.e. at most 240 as required by the unwind data structure. */ diff --git a/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch b/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch new file mode 100644 index 0000000000000..5c4d0c6ebe196 --- /dev/null +++ b/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch @@ -0,0 +1,62 @@ +https://sourceware.org/git/?p=glibc.git;a=commit;h=a68ba2f3cd3cbe32c1f31e13c20ed13487727b32 + +commit 6b02af31e9a721bb15a11380cd22d53b621711f8 +Author: Szabolcs Nagy +Date: Wed Oct 18 17:26:23 2017 +0100 + + [AARCH64] Rewrite elf_machine_load_address using _DYNAMIC symbol + + This patch rewrites aarch64 elf_machine_load_address to use special _DYNAMIC + symbol instead of _dl_start. + + The static address of _DYNAMIC symbol is stored in the first GOT entry. + Here is the change which makes this solution work (part of binutils 2.24): + https://sourceware.org/ml/binutils/2013-06/msg00248.html + + i386, x86_64 targets use the same method to do this as well. + + The original implementation relies on a trick that R_AARCH64_ABS32 relocation + being resolved at link time and the static address fits in the 32bits. + However, in LP64, normally, the address is defined to be 64 bit. + + Here is the C version one which should be portable in all cases. + + * sysdeps/aarch64/dl-machine.h (elf_machine_load_address): Use + _DYNAMIC symbol to calculate load address. + +diff --git a/sysdeps/aarch64/dl-machine.h b/sysdeps/aarch64/dl-machine.h +index e86d8b5b63..5a5b8a5de5 100644 +--- a/sysdeps/aarch64/dl-machine.h ++++ b/sysdeps/aarch64/dl-machine.h +@@ -49,26 +49,11 @@ elf_machine_load_address (void) + /* To figure out the load address we use the definition that for any symbol: + dynamic_addr(symbol) = static_addr(symbol) + load_addr + +- The choice of symbol is arbitrary. The static address we obtain +- by constructing a non GOT reference to the symbol, the dynamic +- address of the symbol we compute using adrp/add to compute the +- symbol's address relative to the PC. +- This depends on 32bit relocations being resolved at link time +- and that the static address fits in the 32bits. */ +- +- ElfW(Addr) static_addr; +- ElfW(Addr) dynamic_addr; +- +- asm (" \n" +-" adrp %1, _dl_start; \n" +-" add %1, %1, #:lo12:_dl_start \n" +-" ldr %w0, 1f \n" +-" b 2f \n" +-"1: \n" +-" .word _dl_start \n" +-"2: \n" +- : "=r" (static_addr), "=r" (dynamic_addr)); +- return dynamic_addr - static_addr; ++ _DYNAMIC sysmbol is used here as its link-time address stored in ++ the special unrelocated first GOT entry. */ ++ ++ extern ElfW(Dyn) _DYNAMIC[] attribute_hidden; ++ return (ElfW(Addr)) &_DYNAMIC - elf_machine_dynamic (); + } + + /* Set up the loaded object described by L so its unrelocated PLT diff --git a/contrib/guix/patches/glibc-2.24-fcommon.patch b/contrib/guix/patches/glibc-2.24-fcommon.patch new file mode 100644 index 0000000000000..2bc32ede90560 --- /dev/null +++ b/contrib/guix/patches/glibc-2.24-fcommon.patch @@ -0,0 +1,32 @@ +commit 264a4a0dbe1f4369db315080034b500bed66016c +Author: fanquake +Date: Fri May 6 11:03:04 2022 +0100 + + build: use -fcommon to retain legacy behaviour with GCC 10 + + GCC 10 started using -fno-common by default, which causes issues with + the powerpc builds using gibc 2.24. A patch was commited to glibc to fix + the issue, 18363b4f010da9ba459b13310b113ac0647c2fcc but is non-trvial + to backport, and was broken in at least one way, see the followup in + commit 7650321ce037302bfc2f026aa19e0213b8d02fe6. + + For now, retain the legacy GCC behaviour by passing -fcommon when + building glibc. + + https://gcc.gnu.org/onlinedocs/gcc/Code-Gen-Options.html. + https://sourceware.org/git/?p=glibc.git;a=commit;h=18363b4f010da9ba459b13310b113ac0647c2fcc + https://sourceware.org/git/?p=glibc.git;a=commit;h=7650321ce037302bfc2f026aa19e0213b8d02fe6 + +diff --git a/Makeconfig b/Makeconfig +index ee379f5852..63c4a2f234 100644 +--- a/Makeconfig ++++ b/Makeconfig +@@ -824,7 +824,7 @@ ifeq "$(strip $(+cflags))" "" + +cflags := $(default_cflags) + endif # $(+cflags) == "" + +-+cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags) +++cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags) -fcommon + +gcc-nowarn := -w + + # Don't duplicate options if we inherited variables from the parent. diff --git a/contrib/guix/patches/glibc-2.24-guix-prefix.patch b/contrib/guix/patches/glibc-2.24-guix-prefix.patch new file mode 100644 index 0000000000000..875e8cd611914 --- /dev/null +++ b/contrib/guix/patches/glibc-2.24-guix-prefix.patch @@ -0,0 +1,25 @@ +Without ffile-prefix-map, the debug symbols will contain paths for the +guix store which will include the hashes of each package. However, the +hash for the same package will differ when on different architectures. +In order to be reproducible regardless of the architecture used to build +the package, map all guix store prefixes to something fixed, e.g. /usr. + +We might be able to drop this in favour of using --with-nonshared-cflags +when we being using newer versions of glibc. + +--- a/Makeconfig ++++ b/Makeconfig +@@ -950,6 +950,10 @@ object-suffixes-for-libc += .oS + # shared objects. We don't want to use CFLAGS-os because users may, for + # example, make that processor-specific. + CFLAGS-.oS = $(CFLAGS-.o) $(PIC-ccflag) ++ ++# Map Guix store paths to /usr ++CFLAGS-.oS += `find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;` ++ + CPPFLAGS-.oS = $(CPPFLAGS-.o) -DPIC -DLIBC_NONSHARED=1 + libtype.oS = lib%_nonshared.a + endif +-- +2.35.1 + diff --git a/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch b/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch new file mode 100644 index 0000000000000..11fe7fdc99c05 --- /dev/null +++ b/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch @@ -0,0 +1,100 @@ +https://sourceware.org/git/?p=glibc.git;a=commit;h=fc3e1337be1c6935ab58bd13520f97a535cf70cc + +commit dc23a45db566095e83ff0b7a57afc87fb5ca89a1 +Author: Florian Weimer +Date: Wed Sep 21 10:45:32 2016 +0200 + + Avoid running $(CXX) during build to obtain header file paths + + This reduces the build time somewhat and is particularly noticeable + during rebuilds with few code changes. + +diff --git a/Makerules b/Makerules +index 7e4077ee50..c338850de5 100644 +--- a/Makerules ++++ b/Makerules +@@ -121,14 +121,10 @@ ifneq (,$(CXX)) + # will be used instead of /usr/include/stdlib.h and /usr/include/math.h. + before-compile := $(common-objpfx)cstdlib $(common-objpfx)cmath \ + $(before-compile) +-cstdlib=$(shell echo "\#include " | $(CXX) -M -MP -x c++ - \ +- | sed -n "/cstdlib:/{s/:$$//;p}") +-$(common-objpfx)cstdlib: $(cstdlib) ++$(common-objpfx)cstdlib: $(c++-cstdlib-header) + $(INSTALL_DATA) $< $@T + $(move-if-change) $@T $@ +-cmath=$(shell echo "\#include " | $(CXX) -M -MP -x c++ - \ +- | sed -n "/cmath:/{s/:$$//;p}") +-$(common-objpfx)cmath: $(cmath) ++$(common-objpfx)cmath: $(c++-cmath-header) + $(INSTALL_DATA) $< $@T + $(move-if-change) $@T $@ + endif +diff --git a/config.make.in b/config.make.in +index 95c6f36876..04a8b3ed7f 100644 +--- a/config.make.in ++++ b/config.make.in +@@ -45,6 +45,8 @@ defines = @DEFINES@ + sysheaders = @sysheaders@ + sysincludes = @SYSINCLUDES@ + c++-sysincludes = @CXX_SYSINCLUDES@ ++c++-cstdlib-header = @CXX_CSTDLIB_HEADER@ ++c++-cmath-header = @CXX_CMATH_HEADER@ + all-warnings = @all_warnings@ + enable-werror = @enable_werror@ + +diff --git a/configure b/configure +index 17625e1041..6ff252744b 100755 +--- a/configure ++++ b/configure +@@ -635,6 +635,8 @@ BISON + INSTALL_INFO + PERL + BASH_SHELL ++CXX_CMATH_HEADER ++CXX_CSTDLIB_HEADER + CXX_SYSINCLUDES + SYSINCLUDES + AUTOCONF +@@ -5054,6 +5056,18 @@ fi + + + ++# Obtain some C++ header file paths. This is used to make a local ++# copy of those headers in Makerules. ++if test -n "$CXX"; then ++ find_cxx_header () { ++ echo "#include <$1>" | $CXX -M -MP -x c++ - | sed -n "/$1:/{s/:\$//;p}" ++ } ++ CXX_CSTDLIB_HEADER="$(find_cxx_header cstdlib)" ++ CXX_CMATH_HEADER="$(find_cxx_header cmath)" ++fi ++ ++ ++ + # Test if LD_LIBRARY_PATH contains the notation for the current directory + # since this would lead to problems installing/building glibc. + # LD_LIBRARY_PATH contains the current directory if one of the following +diff --git a/configure.ac b/configure.ac +index 33bcd62180..9938ab0dc2 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -1039,6 +1039,18 @@ fi + AC_SUBST(SYSINCLUDES) + AC_SUBST(CXX_SYSINCLUDES) + ++# Obtain some C++ header file paths. This is used to make a local ++# copy of those headers in Makerules. ++if test -n "$CXX"; then ++ find_cxx_header () { ++ echo "#include <$1>" | $CXX -M -MP -x c++ - | sed -n "/$1:/{s/:\$//;p}" ++ } ++ CXX_CSTDLIB_HEADER="$(find_cxx_header cstdlib)" ++ CXX_CMATH_HEADER="$(find_cxx_header cmath)" ++fi ++AC_SUBST(CXX_CSTDLIB_HEADER) ++AC_SUBST(CXX_CMATH_HEADER) ++ + # Test if LD_LIBRARY_PATH contains the notation for the current directory + # since this would lead to problems installing/building glibc. + # LD_LIBRARY_PATH contains the current directory if one of the following diff --git a/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch b/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch new file mode 100644 index 0000000000000..16a595d613c11 --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch @@ -0,0 +1,87 @@ +commit 78a90c2f74a2012dd3eff302189e47ff6779a757 +Author: Andreas Schwab +Date: Fri Mar 2 23:07:14 2018 +0100 + + Fix multiple definitions of __nss_*_database (bug 22918) + + (cherry picked from commit eaf6753f8aac33a36deb98c1031d1bad7b593d2d) + +diff --git a/nscd/gai.c b/nscd/gai.c +index d081747797..576fd0045b 100644 +--- a/nscd/gai.c ++++ b/nscd/gai.c +@@ -45,3 +45,6 @@ + #ifdef HAVE_LIBIDN + # include + #endif ++ ++/* Some variables normally defined in libc. */ ++service_user *__nss_hosts_database attribute_hidden; +diff --git a/nss/nsswitch.c b/nss/nsswitch.c +index d5e655974f..b0f0c11a3e 100644 +--- a/nss/nsswitch.c ++++ b/nss/nsswitch.c +@@ -62,7 +62,7 @@ static service_library *nss_new_service (name_database *database, + + /* Declare external database variables. */ + #define DEFINE_DATABASE(name) \ +- extern service_user *__nss_##name##_database attribute_hidden; \ ++ service_user *__nss_##name##_database attribute_hidden; \ + weak_extern (__nss_##name##_database) + #include "databases.def" + #undef DEFINE_DATABASE +diff --git a/nss/nsswitch.h b/nss/nsswitch.h +index eccb535ef5..63573b9ebc 100644 +--- a/nss/nsswitch.h ++++ b/nss/nsswitch.h +@@ -226,10 +226,10 @@ libc_hidden_proto (__nss_hostname_digits_dots) + #define MAX_NR_ADDRS 48 + + /* Prototypes for __nss_*_lookup2 functions. */ +-#define DEFINE_DATABASE(arg) \ +- service_user *__nss_##arg##_database attribute_hidden; \ +- int __nss_##arg##_lookup2 (service_user **, const char *, \ +- const char *, void **); \ ++#define DEFINE_DATABASE(arg) \ ++ extern service_user *__nss_##arg##_database attribute_hidden; \ ++ int __nss_##arg##_lookup2 (service_user **, const char *, \ ++ const char *, void **); \ + libc_hidden_proto (__nss_##arg##_lookup2) + #include "databases.def" + #undef DEFINE_DATABASE +diff --git a/posix/tst-rfc3484-2.c b/posix/tst-rfc3484-2.c +index f509534ca9..8c64ac59ff 100644 +--- a/posix/tst-rfc3484-2.c ++++ b/posix/tst-rfc3484-2.c +@@ -58,6 +58,7 @@ _res_hconf_init (void) + #undef USE_NSCD + #include "../sysdeps/posix/getaddrinfo.c" + ++service_user *__nss_hosts_database attribute_hidden; + + /* This is the beginning of the real test code. The above defines + (among other things) the function rfc3484_sort. */ +diff --git a/posix/tst-rfc3484-3.c b/posix/tst-rfc3484-3.c +index ae44087a10..1c61aaf844 100644 +--- a/posix/tst-rfc3484-3.c ++++ b/posix/tst-rfc3484-3.c +@@ -58,6 +58,7 @@ _res_hconf_init (void) + #undef USE_NSCD + #include "../sysdeps/posix/getaddrinfo.c" + ++service_user *__nss_hosts_database attribute_hidden; + + /* This is the beginning of the real test code. The above defines + (among other things) the function rfc3484_sort. */ +diff --git a/posix/tst-rfc3484.c b/posix/tst-rfc3484.c +index 7f191abbbc..8f45848e44 100644 +--- a/posix/tst-rfc3484.c ++++ b/posix/tst-rfc3484.c +@@ -58,6 +58,7 @@ _res_hconf_init (void) + #undef USE_NSCD + #include "../sysdeps/posix/getaddrinfo.c" + ++service_user *__nss_hosts_database attribute_hidden; + + /* This is the beginning of the real test code. The above defines + (among other things) the function rfc3484_sort. */ diff --git a/contrib/guix/patches/glibc-2.27-guix-prefix.patch b/contrib/guix/patches/glibc-2.27-guix-prefix.patch new file mode 100644 index 0000000000000..d777af74f0188 --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-guix-prefix.patch @@ -0,0 +1,25 @@ +Without ffile-prefix-map, the debug symbols will contain paths for the +guix store which will include the hashes of each package. However, the +hash for the same package will differ when on different architectures. +In order to be reproducible regardless of the architecture used to build +the package, map all guix store prefixes to something fixed, e.g. /usr. + +We might be able to drop this in favour of using --with-nonshared-cflags +when we being using newer versions of glibc. + +--- a/Makeconfig ++++ b/Makeconfig +@@ -992,6 +992,10 @@ object-suffixes := + CPPFLAGS-.o = $(pic-default) + # libc.a must be compiled with -fPIE/-fpie for static PIE. + CFLAGS-.o = $(filter %frame-pointer,$(+cflags)) $(pie-default) ++ ++# Map Guix store paths to /usr ++CFLAGS-.o += `find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;` ++ + libtype.o := lib%.a + object-suffixes += .o + ifeq (yes,$(build-shared)) +-- +2.35.1 + diff --git a/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch new file mode 100644 index 0000000000000..c0f8495c41de1 --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch @@ -0,0 +1,76 @@ +Note that this has been modified from the original commit, to use __has_include +instead of __has_include__, as the later was causing build failures with GCC 10. +See also: http://lists.busybox.net/pipermail/buildroot/2020-July/590376.html. + +https://sourceware.org/git/?p=glibc.git;a=commit;h=0b9c84906f653978fb8768c7ebd0ee14a47e662e + +From 562c52cc81a4e456a62e6455feb32732049e9070 Mon Sep 17 00:00:00 2001 +From: "H.J. Lu" +Date: Mon, 31 Dec 2018 09:26:42 -0800 +Subject: [PATCH] riscv: Use __has_include__ to include [BZ + #24022] + + has been removed by + +commit 27f8899d6002e11a6e2d995e29b8deab5aa9cc25 +Author: David Abdurachmanov +Date: Thu Nov 8 20:02:39 2018 +0100 + + riscv: add asm/unistd.h UAPI header + + Marcin Juszkiewicz reported issues while generating syscall table for riscv + using 4.20-rc1. The patch refactors our unistd.h files to match some other + architectures. + + - Add asm/unistd.h UAPI header, which has __ARCH_WANT_NEW_STAT only for 64-bit + - Remove asm/syscalls.h UAPI header and merge to asm/unistd.h + - Adjust kernel asm/unistd.h + + So now asm/unistd.h UAPI header should show all syscalls for riscv. + + may be restored by + +Subject: [PATCH] riscv: restore asm/syscalls.h UAPI header +Date: Tue, 11 Dec 2018 09:09:35 +0100 + +UAPI header asm/syscalls.h was merged into UAPI asm/unistd.h header, +which did resolve issue with missing syscalls macros resulting in +glibc (2.28) build failure. It also broke glibc in a different way: +asm/syscalls.h is being used by glibc. I noticed this while doing +Fedora 30/Rawhide mass rebuild. + +The patch returns asm/syscalls.h header and incl. it into asm/unistd.h. +I plan to send a patch to glibc to use asm/unistd.h instead of +asm/syscalls.h + +In the meantime, we use __has_include__, which was added to GCC 5, to +check if exists before including it. Tested with +build-many-glibcs.py for riscv against kernel 4.19.12 and 4.20-rc7. + + [BZ #24022] + * sysdeps/unix/sysv/linux/riscv/flush-icache.c: Check if + exists with __has_include__ before including it. +--- + sysdeps/unix/sysv/linux/riscv/flush-icache.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/sysdeps/unix/sysv/linux/riscv/flush-icache.c b/sysdeps/unix/sysv/linux/riscv/flush-icache.c +index d612ef4c6c..0b2042620b 100644 +--- a/sysdeps/unix/sysv/linux/riscv/flush-icache.c ++++ b/sysdeps/unix/sysv/linux/riscv/flush-icache.c +@@ -21,7 +21,11 @@ + #include + #include + #include +-#include ++#if __has_include () ++# include ++#else ++# include ++#endif + + typedef int (*func_type) (void *, void *, unsigned long int); + +-- +2.31.1 + diff --git a/contrib/guix/patches/glibc-ldd-x86_64.patch b/contrib/guix/patches/glibc-ldd-x86_64.patch new file mode 100644 index 0000000000000..b1b6d5a54863c --- /dev/null +++ b/contrib/guix/patches/glibc-ldd-x86_64.patch @@ -0,0 +1,10 @@ +By default, 'RTDLLIST' in 'ldd' refers to 'lib64/ld-linux-x86-64.so', whereas +it's in 'lib/' for us. This patch fixes that. + +--- glibc-2.17/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed 2012-12-25 04:02:13.000000000 +0100 ++++ glibc-2.17/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed 2013-09-15 23:08:03.000000000 +0200 +@@ -1,3 +1,3 @@ + /LD_TRACE_LOADED_OBJECTS=1/a\ + add_env="$add_env LD_LIBRARY_VERSION=\\$verify_out" +-s_^\(RTLDLIST=\)\(.*lib\)\(\|64\|x32\)\(/[^/]*\)\(-x86-64\|-x32\)\(\.so\.[0-9.]*\)[ ]*$_\1"\2\4\6 \264\4-x86-64\6 \2x32\4-x32\6"_ ++s_^\(RTLDLIST=\)\(.*lib\)\(\|64\|x32\)\(/[^/]*\)\(-x86-64\|-x32\)\(\.so\.[0-9.]*\)[ ]*$_\1"\2\4\6 \2\4-x86-64\6 \2x32\4-x32\6"_ diff --git a/contrib/guix/patches/glibc-versioned-locpath.patch b/contrib/guix/patches/glibc-versioned-locpath.patch new file mode 100644 index 0000000000000..bc7652127fa5a --- /dev/null +++ b/contrib/guix/patches/glibc-versioned-locpath.patch @@ -0,0 +1,240 @@ +The format of locale data can be incompatible between libc versions, and +loading incompatible data can lead to 'setlocale' returning EINVAL at best +or triggering an assertion failure at worst. See +https://lists.gnu.org/archive/html/guix-devel/2015-09/msg00717.html +for background information. + +To address that, this patch changes libc to honor a new 'GUIX_LOCPATH' +variable, and to look for locale data in version-specific sub-directories of +that variable. So, if GUIX_LOCPATH=/foo:/bar, locale data is searched for in +/foo/X.Y and /bar/X.Y, where X.Y is the libc version number. + +That way, a single 'GUIX_LOCPATH' setting can work even if different libc +versions coexist on the system. + +--- a/locale/newlocale.c ++++ b/locale/newlocale.c +@@ -30,6 +30,7 @@ + /* Lock for protecting global data. */ + __libc_rwlock_define (extern , __libc_setlocale_lock attribute_hidden) + ++extern error_t compute_locale_search_path (char **, size_t *); + + /* Use this when we come along an error. */ + #define ERROR_RETURN \ +@@ -48,7 +49,6 @@ __newlocale (int category_mask, const char *locale, __locale_t base) + __locale_t result_ptr; + char *locale_path; + size_t locale_path_len; +- const char *locpath_var; + int cnt; + size_t names_len; + +@@ -102,17 +102,8 @@ __newlocale (int category_mask, const char *locale, __locale_t base) + locale_path = NULL; + locale_path_len = 0; + +- locpath_var = getenv ("LOCPATH"); +- if (locpath_var != NULL && locpath_var[0] != '\0') +- { +- if (__argz_create_sep (locpath_var, ':', +- &locale_path, &locale_path_len) != 0) +- return NULL; +- +- if (__argz_add_sep (&locale_path, &locale_path_len, +- _nl_default_locale_path, ':') != 0) +- return NULL; +- } ++ if (compute_locale_search_path (&locale_path, &locale_path_len) != 0) ++ return NULL; + + /* Get the names for the locales we are interested in. We either + allow a composite name or a single name. */ +diff --git a/locale/setlocale.c b/locale/setlocale.c +index ead030d..0c0e314 100644 +--- a/locale/setlocale.c ++++ b/locale/setlocale.c +@@ -215,12 +215,65 @@ setdata (int category, struct __locale_data *data) + } + } + ++/* Return in *LOCALE_PATH and *LOCALE_PATH_LEN the locale data search path as ++ a colon-separated list. Return ENOMEN on error, zero otherwise. */ ++error_t ++compute_locale_search_path (char **locale_path, size_t *locale_path_len) ++{ ++ char* guix_locpath_var = getenv ("GUIX_LOCPATH"); ++ char *locpath_var = getenv ("LOCPATH"); ++ ++ if (guix_locpath_var != NULL && guix_locpath_var[0] != '\0') ++ { ++ /* Entries in 'GUIX_LOCPATH' take precedence over 'LOCPATH'. These ++ entries are systematically prefixed with "/X.Y" where "X.Y" is the ++ libc version. */ ++ if (__argz_create_sep (guix_locpath_var, ':', ++ locale_path, locale_path_len) != 0 ++ || __argz_suffix_entries (locale_path, locale_path_len, ++ "/" VERSION) != 0) ++ goto bail_out; ++ } ++ ++ if (locpath_var != NULL && locpath_var[0] != '\0') ++ { ++ char *reg_locale_path = NULL; ++ size_t reg_locale_path_len = 0; ++ ++ if (__argz_create_sep (locpath_var, ':', ++ ®_locale_path, ®_locale_path_len) != 0) ++ goto bail_out; ++ ++ if (__argz_append (locale_path, locale_path_len, ++ reg_locale_path, reg_locale_path_len) != 0) ++ goto bail_out; ++ ++ free (reg_locale_path); ++ } ++ ++ if (*locale_path != NULL) ++ { ++ /* Append the system default locale directory. */ ++ if (__argz_add_sep (locale_path, locale_path_len, ++ _nl_default_locale_path, ':') != 0) ++ goto bail_out; ++ } ++ ++ return 0; ++ ++ bail_out: ++ free (*locale_path); ++ *locale_path = NULL; ++ *locale_path_len = 0; ++ ++ return ENOMEM; ++} ++ + char * + setlocale (int category, const char *locale) + { + char *locale_path; + size_t locale_path_len; +- const char *locpath_var; + char *composite; + + /* Sanity check for CATEGORY argument. */ +@@ -251,17 +304,10 @@ setlocale (int category, const char *locale) + locale_path = NULL; + locale_path_len = 0; + +- locpath_var = getenv ("LOCPATH"); +- if (locpath_var != NULL && locpath_var[0] != '\0') ++ if (compute_locale_search_path (&locale_path, &locale_path_len) != 0) + { +- if (__argz_create_sep (locpath_var, ':', +- &locale_path, &locale_path_len) != 0 +- || __argz_add_sep (&locale_path, &locale_path_len, +- _nl_default_locale_path, ':') != 0) +- { +- __libc_rwlock_unlock (__libc_setlocale_lock); +- return NULL; +- } ++ __libc_rwlock_unlock (__libc_setlocale_lock); ++ return NULL; + } + + if (category == LC_ALL) +diff --git a/string/Makefile b/string/Makefile +index 8424a61..f925503 100644 +--- a/string/Makefile ++++ b/string/Makefile +@@ -38,7 +38,7 @@ routines := strcat strchr strcmp strcoll strcpy strcspn \ + swab strfry memfrob memmem rawmemchr strchrnul \ + $(addprefix argz-,append count create ctsep next \ + delete extract insert stringify \ +- addsep replace) \ ++ addsep replace suffix) \ + envz basename \ + strcoll_l strxfrm_l string-inlines memrchr \ + xpg-strerror strerror_l +diff --git a/string/argz-suffix.c b/string/argz-suffix.c +new file mode 100644 +index 0000000..505b0f2 +--- /dev/null ++++ b/string/argz-suffix.c +@@ -0,0 +1,56 @@ ++/* Copyright (C) 2015 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ludovic Courtès . ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ . */ ++ ++#include ++#include ++#include ++#include ++ ++ ++error_t ++__argz_suffix_entries (char **argz, size_t *argz_len, const char *suffix) ++ ++{ ++ size_t suffix_len = strlen (suffix); ++ size_t count = __argz_count (*argz, *argz_len); ++ size_t new_argz_len = *argz_len + count * suffix_len; ++ char *new_argz = malloc (new_argz_len); ++ ++ if (new_argz) ++ { ++ char *p = new_argz, *entry; ++ ++ for (entry = *argz; ++ entry != NULL; ++ entry = argz_next (*argz, *argz_len, entry)) ++ { ++ p = stpcpy (p, entry); ++ p = stpcpy (p, suffix); ++ p++; ++ } ++ ++ free (*argz); ++ *argz = new_argz; ++ *argz_len = new_argz_len; ++ ++ return 0; ++ } ++ else ++ return ENOMEM; ++} ++weak_alias (__argz_suffix_entries, argz_suffix_entries) +diff --git a/string/argz.h b/string/argz.h +index bb62a31..d276a35 100644 +--- a/string/argz.h ++++ b/string/argz.h +@@ -134,6 +134,16 @@ extern error_t argz_replace (char **__restrict __argz, + const char *__restrict __str, + const char *__restrict __with, + unsigned int *__restrict __replace_count); ++ ++/* Suffix each entry of ARGZ & ARGZ_LEN with SUFFIX. Return 0 on success, ++ and ENOMEN if memory cannot be allocated. */ ++extern error_t __argz_suffix_entries (char **__restrict __argz, ++ size_t *__restrict __argz_len, ++ const char *__restrict __suffix); ++extern error_t argz_suffix_entries (char **__restrict __argz, ++ size_t *__restrict __argz_len, ++ const char *__restrict __suffix); ++ + + /* Returns the next entry in ARGZ & ARGZ_LEN after ENTRY, or NULL if there + are no more. If entry is NULL, then the first entry is returned. This diff --git a/contrib/guix/patches/lief-fix-ppc64-nx-default.patch b/contrib/guix/patches/lief-fix-ppc64-nx-default.patch new file mode 100644 index 0000000000000..101bc1ddc0cfe --- /dev/null +++ b/contrib/guix/patches/lief-fix-ppc64-nx-default.patch @@ -0,0 +1,29 @@ +Correct default for Binary::has_nx on ppc64 + +From the Linux kernel source: + + * This is the default if a program doesn't have a PT_GNU_STACK + * program header entry. The PPC64 ELF ABI has a non executable stack + * stack by default, so in the absence of a PT_GNU_STACK program header + * we turn execute permission off. + +This patch can be dropped the next time we update LIEF. + +diff --git a/src/ELF/Binary.cpp b/src/ELF/Binary.cpp +index a90be1ab..fd2d9764 100644 +--- a/src/ELF/Binary.cpp ++++ b/src/ELF/Binary.cpp +@@ -1084,7 +1084,12 @@ bool Binary::has_nx() const { + return segment->type() == SEGMENT_TYPES::PT_GNU_STACK; + }); + if (it_stack == std::end(segments_)) { +- return false; ++ if (header().machine_type() == ARCH::EM_PPC64) { ++ // The PPC64 ELF ABI has a non-executable stack by default. ++ return true; ++ } else { ++ return false; ++ } + } + + return !(*it_stack)->has(ELF_SEGMENT_FLAGS::PF_X); diff --git a/contrib/guix/patches/nsis-gcc-10-memmove.patch b/contrib/guix/patches/nsis-gcc-10-memmove.patch new file mode 100644 index 0000000000000..a1aadfd4f3618 --- /dev/null +++ b/contrib/guix/patches/nsis-gcc-10-memmove.patch @@ -0,0 +1,23 @@ +commit f6df41524e703dc471e283e566a48e05a735b7f2 +Author: Anders +Date: Sat Jun 27 23:18:45 2020 +0000 + + Don't let GCC 10 generate memmove calls (bug #1248) + + git-svn-id: https://svn.code.sf.net/p/nsis/code/NSIS/trunk@7189 212acab6-be3b-0410-9dea-997c60f758d6 + +diff --git a/SCons/Config/gnu b/SCons/Config/gnu +index bfcb362d..21fa446b 100644 +--- a/SCons/Config/gnu ++++ b/SCons/Config/gnu +@@ -103,6 +103,10 @@ stub_env.Append(LINKFLAGS = ['$NODEFLIBS_FLAG']) # no standard libraries + stub_env.Append(LINKFLAGS = ['$ALIGN_FLAG']) # 512 bytes align + stub_env.Append(LINKFLAGS = ['$MAP_FLAG']) # generate map file + ++conf = FlagsConfigure(stub_env) ++conf.CheckCompileFlag('-fno-tree-loop-distribute-patterns') # GCC 10: Don't generate msvcrt!memmove calls (bug #1248) ++conf.Finish() ++ + stub_uenv = stub_env.Clone() + stub_uenv.Append(CPPDEFINES = ['_UNICODE', 'UNICODE']) + diff --git a/contrib/guix/patches/oscrypto-hard-code-openssl.patch b/contrib/guix/patches/oscrypto-hard-code-openssl.patch new file mode 100644 index 0000000000000..32027f2d09af1 --- /dev/null +++ b/contrib/guix/patches/oscrypto-hard-code-openssl.patch @@ -0,0 +1,13 @@ +diff --git a/oscrypto/__init__.py b/oscrypto/__init__.py +index eb27313..371ab24 100644 +--- a/oscrypto/__init__.py ++++ b/oscrypto/__init__.py +@@ -302,3 +302,8 @@ def load_order(): + 'oscrypto._win.tls', + 'oscrypto.tls', + ] ++ ++ ++paths = '@GUIX_OSCRYPTO_USE_OPENSSL@'.split(',') ++assert len(paths) == 2, 'Value for OSCRYPTO_USE_OPENSSL env var must be two paths separated by a comma' ++use_openssl(*paths) diff --git a/contrib/guix/patches/vmov-alignment.patch b/contrib/guix/patches/vmov-alignment.patch new file mode 100644 index 0000000000000..072f76eafd38f --- /dev/null +++ b/contrib/guix/patches/vmov-alignment.patch @@ -0,0 +1,267 @@ +Description: Use unaligned VMOV instructions +Author: Stephen Kitt +Bug-Debian: https://bugs.debian.org/939559 + +Based on a patch originally by Claude Heiland-Allen + +--- a/gcc/config/i386/sse.md ++++ b/gcc/config/i386/sse.md +@@ -1058,17 +1058,11 @@ + { + if (FLOAT_MODE_P (GET_MODE_INNER (mode))) + { +- if (misaligned_operand (operands[1], mode)) +- return "vmovu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; +- else +- return "vmova\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; ++ return "vmovu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; + } + else + { +- if (misaligned_operand (operands[1], mode)) +- return "vmovdqu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; +- else +- return "vmovdqa\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; ++ return "vmovdqu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; + } + } + [(set_attr "type" "ssemov") +@@ -1184,17 +1178,11 @@ + { + if (FLOAT_MODE_P (GET_MODE_INNER (mode))) + { +- if (misaligned_operand (operands[0], mode)) +- return "vmovu\t{%1, %0%{%2%}|%0%{%2%}, %1}"; +- else +- return "vmova\t{%1, %0%{%2%}|%0%{%2%}, %1}"; ++ return "vmovu\t{%1, %0%{%2%}|%0%{%2%}, %1}"; + } + else + { +- if (misaligned_operand (operands[0], mode)) +- return "vmovdqu\t{%1, %0%{%2%}|%0%{%2%}, %1}"; +- else +- return "vmovdqa\t{%1, %0%{%2%}|%0%{%2%}, %1}"; ++ return "vmovdqu\t{%1, %0%{%2%}|%0%{%2%}, %1}"; + } + } + [(set_attr "type" "ssemov") +@@ -7806,7 +7794,7 @@ + "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))" + "@ + %vmovlps\t{%1, %0|%q0, %1} +- %vmovaps\t{%1, %0|%0, %1} ++ %vmovups\t{%1, %0|%0, %1} + %vmovlps\t{%1, %d0|%d0, %q1}" + [(set_attr "type" "ssemov") + (set_attr "prefix" "maybe_vex") +@@ -13997,29 +13985,15 @@ + switch (mode) + { + case E_V8DFmode: +- if (misaligned_operand (operands[2], mode)) +- return "vmovupd\t{%2, %x0|%x0, %2}"; +- else +- return "vmovapd\t{%2, %x0|%x0, %2}"; ++ return "vmovupd\t{%2, %x0|%x0, %2}"; + case E_V16SFmode: +- if (misaligned_operand (operands[2], mode)) +- return "vmovups\t{%2, %x0|%x0, %2}"; +- else +- return "vmovaps\t{%2, %x0|%x0, %2}"; ++ return "vmovups\t{%2, %x0|%x0, %2}"; + case E_V8DImode: +- if (misaligned_operand (operands[2], mode)) +- return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}" ++ return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}" + : "vmovdqu\t{%2, %x0|%x0, %2}"; +- else +- return which_alternative == 2 ? "vmovdqa64\t{%2, %x0|%x0, %2}" +- : "vmovdqa\t{%2, %x0|%x0, %2}"; + case E_V16SImode: +- if (misaligned_operand (operands[2], mode)) +- return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}" ++ return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}" + : "vmovdqu\t{%2, %x0|%x0, %2}"; +- else +- return which_alternative == 2 ? "vmovdqa32\t{%2, %x0|%x0, %2}" +- : "vmovdqa\t{%2, %x0|%x0, %2}"; + default: + gcc_unreachable (); + } +@@ -21225,63 +21199,27 @@ + switch (get_attr_mode (insn)) + { + case MODE_V16SF: +- if (misaligned_operand (operands[1], mode)) +- return "vmovups\t{%1, %t0|%t0, %1}"; +- else +- return "vmovaps\t{%1, %t0|%t0, %1}"; ++ return "vmovups\t{%1, %t0|%t0, %1}"; + case MODE_V8DF: +- if (misaligned_operand (operands[1], mode)) +- return "vmovupd\t{%1, %t0|%t0, %1}"; +- else +- return "vmovapd\t{%1, %t0|%t0, %1}"; ++ return "vmovupd\t{%1, %t0|%t0, %1}"; + case MODE_V8SF: +- if (misaligned_operand (operands[1], mode)) +- return "vmovups\t{%1, %x0|%x0, %1}"; +- else +- return "vmovaps\t{%1, %x0|%x0, %1}"; ++ return "vmovups\t{%1, %x0|%x0, %1}"; + case MODE_V4DF: +- if (misaligned_operand (operands[1], mode)) +- return "vmovupd\t{%1, %x0|%x0, %1}"; +- else +- return "vmovapd\t{%1, %x0|%x0, %1}"; ++ return "vmovupd\t{%1, %x0|%x0, %1}"; + case MODE_XI: +- if (misaligned_operand (operands[1], mode)) +- { +- if (which_alternative == 2) +- return "vmovdqu\t{%1, %t0|%t0, %1}"; +- else if (GET_MODE_SIZE (mode) == 8) +- return "vmovdqu64\t{%1, %t0|%t0, %1}"; +- else +- return "vmovdqu32\t{%1, %t0|%t0, %1}"; +- } ++ if (which_alternative == 2) ++ return "vmovdqu\t{%1, %t0|%t0, %1}"; ++ else if (GET_MODE_SIZE (mode) == 8) ++ return "vmovdqu64\t{%1, %t0|%t0, %1}"; + else +- { +- if (which_alternative == 2) +- return "vmovdqa\t{%1, %t0|%t0, %1}"; +- else if (GET_MODE_SIZE (mode) == 8) +- return "vmovdqa64\t{%1, %t0|%t0, %1}"; +- else +- return "vmovdqa32\t{%1, %t0|%t0, %1}"; +- } ++ return "vmovdqu32\t{%1, %t0|%t0, %1}"; + case MODE_OI: +- if (misaligned_operand (operands[1], mode)) +- { +- if (which_alternative == 2) +- return "vmovdqu\t{%1, %x0|%x0, %1}"; +- else if (GET_MODE_SIZE (mode) == 8) +- return "vmovdqu64\t{%1, %x0|%x0, %1}"; +- else +- return "vmovdqu32\t{%1, %x0|%x0, %1}"; +- } ++ if (which_alternative == 2) ++ return "vmovdqu\t{%1, %x0|%x0, %1}"; ++ else if (GET_MODE_SIZE (mode) == 8) ++ return "vmovdqu64\t{%1, %x0|%x0, %1}"; + else +- { +- if (which_alternative == 2) +- return "vmovdqa\t{%1, %x0|%x0, %1}"; +- else if (GET_MODE_SIZE (mode) == 8) +- return "vmovdqa64\t{%1, %x0|%x0, %1}"; +- else +- return "vmovdqa32\t{%1, %x0|%x0, %1}"; +- } ++ return "vmovdqu32\t{%1, %x0|%x0, %1}"; + default: + gcc_unreachable (); + } +--- a/gcc/config/i386/i386.c ++++ b/gcc/config/i386/i386.c +@@ -4981,13 +4981,13 @@ + switch (type) + { + case opcode_int: +- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32"; ++ opcode = "vmovdqu32"; + break; + case opcode_float: +- opcode = misaligned_p ? "vmovups" : "vmovaps"; ++ opcode = "vmovups"; + break; + case opcode_double: +- opcode = misaligned_p ? "vmovupd" : "vmovapd"; ++ opcode = "vmovupd"; + break; + } + } +@@ -4996,16 +4996,16 @@ + switch (scalar_mode) + { + case E_SFmode: +- opcode = misaligned_p ? "%vmovups" : "%vmovaps"; ++ opcode = "%vmovups"; + break; + case E_DFmode: +- opcode = misaligned_p ? "%vmovupd" : "%vmovapd"; ++ opcode = "%vmovupd"; + break; + case E_TFmode: + if (evex_reg_p) +- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64"; ++ opcode = "vmovdqu64"; + else +- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa"; ++ opcode = "%vmovdqu"; + break; + default: + gcc_unreachable (); +@@ -5017,48 +5017,32 @@ + { + case E_QImode: + if (evex_reg_p) +- opcode = (misaligned_p +- ? (TARGET_AVX512BW +- ? "vmovdqu8" +- : "vmovdqu64") +- : "vmovdqa64"); ++ opcode = TARGET_AVX512BW ? "vmovdqu8" : "vmovdqu64"; + else +- opcode = (misaligned_p +- ? (TARGET_AVX512BW +- ? "vmovdqu8" +- : "%vmovdqu") +- : "%vmovdqa"); ++ opcode = TARGET_AVX512BW ? "vmovdqu8" : "%vmovdqu"; + break; + case E_HImode: + if (evex_reg_p) +- opcode = (misaligned_p +- ? (TARGET_AVX512BW +- ? "vmovdqu16" +- : "vmovdqu64") +- : "vmovdqa64"); ++ opcode = TARGET_AVX512BW ? "vmovdqu16" : "vmovdqu64"; + else +- opcode = (misaligned_p +- ? (TARGET_AVX512BW +- ? "vmovdqu16" +- : "%vmovdqu") +- : "%vmovdqa"); ++ opcode = TARGET_AVX512BW ? "vmovdqu16" : "%vmovdqu"; + break; + case E_SImode: + if (evex_reg_p) +- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32"; ++ opcode = "vmovdqu32"; + else +- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa"; ++ opcode = "%vmovdqu"; + break; + case E_DImode: + case E_TImode: + case E_OImode: + if (evex_reg_p) +- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64"; ++ opcode = "vmovdqu64"; + else +- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa"; ++ opcode = "%vmovdqu"; + break; + case E_XImode: +- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64"; ++ opcode = "vmovdqu64"; + break; + default: + gcc_unreachable (); diff --git a/contrib/init/dashd.service b/contrib/init/dashd.service index 8bdb2ae207b6f..5dfd7c0c4dc4f 100644 --- a/contrib/init/dashd.service +++ b/contrib/init/dashd.service @@ -5,8 +5,9 @@ # See "man systemd.service" for details. # Note that almost all daemon options could be specified in -# /etc/dash/dash.conf, except for those explicitly specified as arguments -# in ExecStart= +# /etc/dash/dash.conf, but keep in mind those explicitly +# specified as arguments in ExecStart= will override those in the +# config file. [Unit] Description=Dash daemon @@ -18,6 +19,10 @@ ExecStart=/usr/bin/dashd -daemon \ -conf=/etc/dash/dash.conf \ -datadir=/var/lib/dashd +# Make sure the config directory is readable by the service user +PermissionsStartOnly=true +ExecStartPre=/bin/chgrp dashcore /etc/dash + # Process management #################### @@ -54,6 +59,9 @@ PrivateTmp=true # Mount /usr, /boot/ and /etc read-only for the process. ProtectSystem=full +# Deny access to /home, /root and /run/user +ProtectHome=true + # Disallow the process and all of its children to gain # new privileges through execve(). NoNewPrivileges=true diff --git a/contrib/install_db4.sh b/contrib/install_db4.sh new file mode 100755 index 0000000000000..81c615d93deea --- /dev/null +++ b/contrib/install_db4.sh @@ -0,0 +1,251 @@ +#!/bin/sh +# Copyright (c) 2017-2019 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# Install libdb4.8 (Berkeley DB). + +export LC_ALL=C +set -e + +if [ -z "${1}" ]; then + echo "Usage: $0 [ ...]" + echo + echo "Must specify a single argument: the directory in which db4 will be built." + echo "This is probably \`pwd\` if you're at the root of the dash repository." + exit 1 +fi + +expand_path() { + cd "${1}" && pwd -P +} + +BDB_PREFIX="$(expand_path ${1})/db4"; shift; +BDB_VERSION='db-4.8.30.NC' +BDB_HASH='12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef' +BDB_URL="https://download.oracle.com/berkeley-db/${BDB_VERSION}.tar.gz" + +check_exists() { + command -v "$1" >/dev/null +} + +sha256_check() { + # Args: + # + if check_exists sha256sum; then + echo "${1} ${2}" | sha256sum -c + elif check_exists sha256; then + if [ "$(uname)" = "FreeBSD" ]; then + sha256 -c "${1}" "${2}" + else + echo "${1} ${2}" | sha256 -c + fi + else + echo "${1} ${2}" | shasum -a 256 -c + fi +} + +http_get() { + # Args: + # + # It's acceptable that we don't require SSL here because we manually verify + # content hashes below. + # + if [ -f "${2}" ]; then + echo "File ${2} already exists; not downloading again" + elif check_exists curl; then + curl --insecure --retry 5 "${1}" -o "${2}" + else + wget --no-check-certificate "${1}" -O "${2}" + fi + + sha256_check "${3}" "${2}" +} + +mkdir -p "${BDB_PREFIX}" +http_get "${BDB_URL}" "${BDB_VERSION}.tar.gz" "${BDB_HASH}" +tar -xzvf ${BDB_VERSION}.tar.gz -C "$BDB_PREFIX" +cd "${BDB_PREFIX}/${BDB_VERSION}/" + +# Apply a patch necessary when building with clang and c++11 (see https://community.oracle.com/thread/3952592) +patch --ignore-whitespace -p1 << 'EOF' +commit 3311d68f11d1697565401eee6efc85c34f022ea7 +Author: fanquake +Date: Mon Aug 17 20:03:56 2020 +0800 + + Fix C++11 compatibility + +diff --git a/dbinc/atomic.h b/dbinc/atomic.h +index 0034dcc..7c11d4a 100644 +--- a/dbinc/atomic.h ++++ b/dbinc/atomic.h +@@ -70,7 +70,7 @@ typedef struct { + * These have no memory barriers; the caller must include them when necessary. + */ + #define atomic_read(p) ((p)->value) +-#define atomic_init(p, val) ((p)->value = (val)) ++#define atomic_init_db(p, val) ((p)->value = (val)) + + #ifdef HAVE_ATOMIC_SUPPORT + +@@ -144,7 +144,7 @@ typedef LONG volatile *interlocked_val; + #define atomic_inc(env, p) __atomic_inc(p) + #define atomic_dec(env, p) __atomic_dec(p) + #define atomic_compare_exchange(env, p, o, n) \ +- __atomic_compare_exchange((p), (o), (n)) ++ __atomic_compare_exchange_db((p), (o), (n)) + static inline int __atomic_inc(db_atomic_t *p) + { + int temp; +@@ -176,7 +176,7 @@ static inline int __atomic_dec(db_atomic_t *p) + * http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html + * which configure could be changed to use. + */ +-static inline int __atomic_compare_exchange( ++static inline int __atomic_compare_exchange_db( + db_atomic_t *p, atomic_value_t oldval, atomic_value_t newval) + { + atomic_value_t was; +@@ -206,7 +206,7 @@ static inline int __atomic_compare_exchange( + #define atomic_dec(env, p) (--(p)->value) + #define atomic_compare_exchange(env, p, oldval, newval) \ + (DB_ASSERT(env, atomic_read(p) == (oldval)), \ +- atomic_init(p, (newval)), 1) ++ atomic_init_db(p, (newval)), 1) + #else + #define atomic_inc(env, p) __atomic_inc(env, p) + #define atomic_dec(env, p) __atomic_dec(env, p) +diff --git a/mp/mp_fget.c b/mp/mp_fget.c +index 5fdee5a..0b75f57 100644 +--- a/mp/mp_fget.c ++++ b/mp/mp_fget.c +@@ -617,7 +617,7 @@ alloc: /* Allocate a new buffer header and data space. */ + + /* Initialize enough so we can call __memp_bhfree. */ + alloc_bhp->flags = 0; +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + #ifdef DIAGNOSTIC + if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) { + __db_errx(env, +@@ -911,7 +911,7 @@ alloc: /* Allocate a new buffer header and data space. */ + MVCC_MPROTECT(bhp->buf, mfp->stat.st_pagesize, + PROT_READ); + +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + MUTEX_LOCK(env, alloc_bhp->mtx_buf); + alloc_bhp->priority = bhp->priority; + alloc_bhp->pgno = bhp->pgno; +diff --git a/mp/mp_mvcc.c b/mp/mp_mvcc.c +index 34467d2..f05aa0c 100644 +--- a/mp/mp_mvcc.c ++++ b/mp/mp_mvcc.c +@@ -276,7 +276,7 @@ __memp_bh_freeze(dbmp, infop, hp, bhp, need_frozenp) + #else + memcpy(frozen_bhp, bhp, SSZA(BH, buf)); + #endif +- atomic_init(&frozen_bhp->ref, 0); ++ atomic_init_db(&frozen_bhp->ref, 0); + if (mutex != MUTEX_INVALID) + frozen_bhp->mtx_buf = mutex; + else if ((ret = __mutex_alloc(env, MTX_MPOOL_BH, +@@ -428,7 +428,7 @@ __memp_bh_thaw(dbmp, infop, hp, frozen_bhp, alloc_bhp) + #endif + alloc_bhp->mtx_buf = mutex; + MUTEX_LOCK(env, alloc_bhp->mtx_buf); +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + F_CLR(alloc_bhp, BH_FROZEN); + } + +diff --git a/mp/mp_region.c b/mp/mp_region.c +index e6cece9..ddbe906 100644 +--- a/mp/mp_region.c ++++ b/mp/mp_region.c +@@ -224,7 +224,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg) + MTX_MPOOL_FILE_BUCKET, 0, &htab[i].mtx_hash)) != 0) + return (ret); + SH_TAILQ_INIT(&htab[i].hash_bucket); +- atomic_init(&htab[i].hash_page_dirty, 0); ++ atomic_init_db(&htab[i].hash_page_dirty, 0); + } + + /* +@@ -269,7 +269,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg) + hp->mtx_hash = (mtx_base == MUTEX_INVALID) ? MUTEX_INVALID : + mtx_base + i; + SH_TAILQ_INIT(&hp->hash_bucket); +- atomic_init(&hp->hash_page_dirty, 0); ++ atomic_init_db(&hp->hash_page_dirty, 0); + #ifdef HAVE_STATISTICS + hp->hash_io_wait = 0; + hp->hash_frozen = hp->hash_thawed = hp->hash_frozen_freed = 0; +diff --git a/mutex/mut_method.c b/mutex/mut_method.c +index 2588763..5c6d516 100644 +--- a/mutex/mut_method.c ++++ b/mutex/mut_method.c +@@ -426,7 +426,7 @@ atomic_compare_exchange(env, v, oldval, newval) + MUTEX_LOCK(env, mtx); + ret = atomic_read(v) == oldval; + if (ret) +- atomic_init(v, newval); ++ atomic_init_db(v, newval); + MUTEX_UNLOCK(env, mtx); + + return (ret); +diff --git a/mutex/mut_tas.c b/mutex/mut_tas.c +index f3922e0..e40fcdf 100644 +--- a/mutex/mut_tas.c ++++ b/mutex/mut_tas.c +@@ -46,7 +46,7 @@ __db_tas_mutex_init(env, mutex, flags) + + #ifdef HAVE_SHARED_LATCHES + if (F_ISSET(mutexp, DB_MUTEX_SHARED)) +- atomic_init(&mutexp->sharecount, 0); ++ atomic_init_db(&mutexp->sharecount, 0); + else + #endif + if (MUTEX_INIT(&mutexp->tas)) { +@@ -486,7 +486,7 @@ __db_tas_mutex_unlock(env, mutex) + F_CLR(mutexp, DB_MUTEX_LOCKED); + /* Flush flag update before zeroing count */ + MEMBAR_EXIT(); +- atomic_init(&mutexp->sharecount, 0); ++ atomic_init_db(&mutexp->sharecount, 0); + } else { + DB_ASSERT(env, sharecount > 0); + MEMBAR_EXIT(); +EOF + +# The packaged config.guess and config.sub are ancient (2009) and can cause build issues. +# Replace them with modern versions. +# See https://github.com/bitcoin/bitcoin/issues/16064 +CONFIG_GUESS_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=4550d2f15b3a7ce2451c1f29500b9339430c877f' +CONFIG_GUESS_HASH='c8f530e01840719871748a8071113435bdfdf75b74c57e78e47898edea8754ae' +CONFIG_SUB_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=4550d2f15b3a7ce2451c1f29500b9339430c877f' +CONFIG_SUB_HASH='3969f7d5f6967ccc6f792401b8ef3916a1d1b1d0f0de5a4e354c95addb8b800e' + +rm -f "dist/config.guess" +rm -f "dist/config.sub" + +http_get "${CONFIG_GUESS_URL}" dist/config.guess "${CONFIG_GUESS_HASH}" +http_get "${CONFIG_SUB_URL}" dist/config.sub "${CONFIG_SUB_HASH}" + +cd build_unix/ + +"${BDB_PREFIX}/${BDB_VERSION}/dist/configure" \ + --enable-cxx --disable-shared --disable-replication --with-pic --prefix="${BDB_PREFIX}" \ + "${@}" + +make install + +echo +echo "db4 build complete." +echo +# shellcheck disable=SC2016 +echo 'When compiling dashd, run `./configure` in the following way:' +echo +echo " export BDB_PREFIX='${BDB_PREFIX}'" +# shellcheck disable=SC2016 +echo ' ./configure BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" BDB_CFLAGS="-I${BDB_PREFIX}/include" ...' diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py index ef489cbf639ff..848b0a4f333c5 100755 --- a/contrib/linearize/linearize-data.py +++ b/contrib/linearize/linearize-data.py @@ -48,17 +48,7 @@ def wordreverse(in_buf): return b''.join(out_words) def calc_hdr_hash(blk_hdr): - #hash1 = hashlib.sha256() - #hash1.update(blk_hdr) - #hash1_o = hash1.digest() - - #hash2 = hashlib.sha256() - #hash2.update(hash1_o) - #hash2_o = hash2.digest() - - #return hash2_o - pow_hash = dash_hash.getPoWHash(blk_hdr) - return pow_hash + return dash_hash.getPoWHash(blk_hdr) def calc_hash_str(blk_hdr): hash = calc_hdr_hash(blk_hdr) diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md index 73cbd71e8c5d4..3c38ad62b592b 100644 --- a/contrib/macdeploy/README.md +++ b/contrib/macdeploy/README.md @@ -6,10 +6,6 @@ The `macdeployqtplus` script should not be run manually. Instead, after building make deploy ``` -During the deployment process, the disk image window will pop up briefly -when the fancy settings are applied. This is normal, please do not interfere, -the process will unmount the DMG and cleanup before finishing. - When complete, it will have produced `Dash-Qt.dmg`. ## SDK Extraction @@ -17,13 +13,13 @@ When complete, it will have produced `Dash-Qt.dmg`. ### Step 1: Obtaining `Xcode.app` Our current macOS SDK -(`Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz`) can be +(`Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz`) can be extracted from -[Xcode_12.1.xip](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip). +[Xcode_12.2.xip](https://download.developer.apple.com/Developer_Tools/Xcode_12.2/Xcode_12.2.xip). Alternatively, after logging in to your account go to 'Downloads', then 'More' -and look for [`Xcode_12.1`](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip). +and look for [`Xcode_12.2`](https://download.developer.apple.com/Developer_Tools/Xcode_12.2/Xcode_12.2.xip). An Apple ID and cookies enabled for the hostname are needed to download this. -The `sha256sum` of the archive should be `612443b1894b39368a596ea1607f30cbb0481ad44d5e29c75edb71a6d2cf050f`. +The `sha256sum` of the archive should be `28d352f8c14a43d9b8a082ac6338dc173cb153f964c6e8fb6ba389e5be528bd0`. After Xcode version 7.x, Apple started shipping the `Xcode.app` in a `.xip` archive. This makes the SDK less-trivial to extract on non-macOS machines. One @@ -34,25 +30,25 @@ approach (tested on Debian Buster) is outlined below: apt install cpio git clone https://github.com/bitcoin-core/apple-sdk-tools.git -# Unpack Xcode_12.1.xip and place the resulting Xcode.app in your current +# Unpack Xcode_12.2.xip and place the resulting Xcode.app in your current # working directory -python3 apple-sdk-tools/extract_xcode.py -f Xcode_12.1.xip | cpio -d -i +python3 apple-sdk-tools/extract_xcode.py -f Xcode_12.2.xip | cpio -d -i ``` On macOS the process is more straightforward: ```bash -xip -x Xcode_12.1.xip +xip -x Xcode_12.2.xip ``` -### Step 2: Generating `Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz` from `Xcode.app` +### Step 2: Generating `Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz` from `Xcode.app` -To generate `Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz`, run +To generate `Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz`, run the script [`gen-sdk`](./gen-sdk) with the path to `Xcode.app` (extracted in the previous stage) as the first argument. ```bash -# Generate a Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz from +# Generate a Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz from # the supplied Xcode.app ./contrib/macdeploy/gen-sdk '/path/to/Xcode.app' ``` @@ -83,7 +79,7 @@ and its `libLTO.so` rather than those from `llvmgcc`, as it was originally done To complicate things further, all builds must target an Apple SDK. These SDKs are free to download, but not redistributable. To obtain it, register for an Apple Developer Account, -then download [Xcode_12.1](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip). +then download [Xcode_12.2](https://download.developer.apple.com/Developer_Tools/Xcode_12.2/Xcode_12.2.xip). This file is many gigabytes in size, but most (but not all) of what we need is contained only in a single directory: @@ -110,7 +106,7 @@ broken. Only the compression feature is currently used. Ideally, the creation co and `xorrisofs` would no longer be necessary. Background images and other features can be added to DMG files by inserting a -`.DS_Store` before creation. This is generated by the script `contrib/macdeploy/custom_dsstore.py`. +`.DS_Store` during creation. As of OS X 10.9 Mavericks, using an Apple-blessed key to sign binaries is a requirement in order to satisfy the new Gatekeeper requirements. Because this private key cannot be diff --git a/contrib/macdeploy/background.svg b/contrib/macdeploy/background.svg deleted file mode 100644 index 9c330af45142e..0000000000000 --- a/contrib/macdeploy/background.svg +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - PACKAGE_NAME - - - - - diff --git a/contrib/macdeploy/background.tiff b/contrib/macdeploy/background.tiff new file mode 100644 index 0000000000000..1fb088c8374ac Binary files /dev/null and b/contrib/macdeploy/background.tiff differ diff --git a/contrib/macdeploy/custom_dsstore.py b/contrib/macdeploy/custom_dsstore.py deleted file mode 100755 index d7af07e9ed698..0000000000000 --- a/contrib/macdeploy/custom_dsstore.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2013-2015 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -from ds_store import DSStore -from mac_alias import Alias -import sys - -output_file = sys.argv[1] -package_name_ns = sys.argv[2] - -ds = DSStore.open(output_file, 'w+') -ds['.']['bwsp'] = { - 'ShowStatusBar': False, - 'WindowBounds': '{{300, 280}, {500, 343}}', - 'ContainerShowSidebar': False, - 'SidebarWidth': 0, - 'ShowTabView': False, - 'PreviewPaneVisibility': False, - 'ShowToolbar': False, - 'ShowSidebar': False, - 'ShowPathbar': True -} - -icvp = { - 'gridOffsetX': 0.0, - 'textSize': 12.0, - 'viewOptionsVersion': 1, - 'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00', - 'backgroundColorBlue': 1.0, - 'iconSize': 96.0, - 'backgroundColorGreen': 1.0, - 'arrangeBy': 'none', - 'showIconPreview': True, - 'gridSpacing': 100.0, - 'gridOffsetY': 0.0, - 'showItemInfo': False, - 'labelOnBottom': True, - 'backgroundType': 2, - 'backgroundColorRed': 1.0 -} -alias = Alias.from_bytes(icvp['backgroundImageAlias']) -alias.volume.name = package_name_ns -alias.volume.posix_path = '/Volumes/' + package_name_ns -alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg' -alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00dashcoreuser:\x00Documents:\x00dashcore:\x00dashcore:\x00' + package_name_ns + '.temp.dmg' -alias.volume.disk_image_alias.target.posix_path = 'Users/dashcoreuser/Documents/dashcore/dashcore/' + package_name_ns + '.temp.dmg' -alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff' -icvp['backgroundImageAlias'] = alias.to_bytes() -ds['.']['icvp'] = icvp - -ds['.']['vSrn'] = ('long', 1) - -ds['Applications']['Iloc'] = (370, 156) -ds['Dash-Qt.app']['Iloc'] = (128, 156) - -ds.flush() -ds.close() diff --git a/contrib/macdeploy/detached-sig-create.sh b/contrib/macdeploy/detached-sig-create.sh index de46d046eaafa..cb33ac4c8e302 100755 --- a/contrib/macdeploy/detached-sig-create.sh +++ b/contrib/macdeploy/detached-sig-create.sh @@ -8,9 +8,11 @@ set -e ROOTDIR=dist BUNDLE="${ROOTDIR}/Dash-Qt.app" +BINARY="${BUNDLE}/Contents/MacOS/Dash-Qt" SIGNAPPLE=signapple TEMPDIR=sign.temp -OUT=signature-osx.tar.gz +ARCH=$(${SIGNAPPLE} info ${BINARY} | head -n 1 | cut -d " " -f 1) +OUT="signature-osx-${ARCH}.tar.gz" OUTROOT=osx/dist if [ -z "$1" ]; then diff --git a/contrib/macdeploy/fancy.plist b/contrib/macdeploy/fancy.plist deleted file mode 100644 index 90071abeaba4d..0000000000000 --- a/contrib/macdeploy/fancy.plist +++ /dev/null @@ -1,32 +0,0 @@ - - - - - window_bounds - - 300 - 300 - 800 - 620 - - background_picture - background.tiff - icon_size - 96 - applications_symlink - - items_position - - Applications - - 370 - 156 - - Dash-Qt.app - - 128 - 156 - - - - diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index 8142bc37b0d6d..249ce9c1876a4 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -16,9 +16,14 @@ # along with this program. If not, see . # -import subprocess, sys, re, os, shutil, stat, os.path, time -from string import Template +import plistlib +import sys, re, os, shutil, stat, os.path from argparse import ArgumentParser +from ds_store import DSStore +from mac_alias import Alias +from pathlib import Path +from subprocess import PIPE, run +from typing import List, Optional # This is ported from the original macdeployqt with modifications @@ -48,28 +53,18 @@ class FrameworkInfo(object): return False def __str__(self): - return """ Framework name: %s - Framework directory: %s - Framework path: %s - Binary name: %s - Binary directory: %s - Binary path: %s - Version: %s - Install name: %s - Deployed install name: %s - Source file Path: %s - Deployed Directory (relative to bundle): %s -""" % (self.frameworkName, - self.frameworkDirectory, - self.frameworkPath, - self.binaryName, - self.binaryDirectory, - self.binaryPath, - self.version, - self.installName, - self.deployedInstallName, - self.sourceFilePath, - self.destinationDirectory) + return f""" Framework name: {frameworkName} + Framework directory: {self.frameworkDirectory} + Framework path: {self.frameworkPath} + Binary name: {self.binaryName} + Binary directory: {self.binaryDirectory} + Binary path: {self.binaryPath} + Version: {self.version} + Install name: {self.installName} + Deployed install name: {self.deployedInstallName} + Source file Path: {self.sourceFilePath} + Deployed Directory (relative to bundle): {self.destinationDirectory} +""" def isDylib(self): return self.frameworkName.endswith(".dylib") @@ -85,7 +80,7 @@ class FrameworkInfo(object): bundleBinaryDirectory = "Contents/MacOS" @classmethod - def fromOtoolLibraryLine(cls, line): + def fromOtoolLibraryLine(cls, line: str) -> Optional['FrameworkInfo']: # Note: line must be trimmed if line == "": return None @@ -96,7 +91,7 @@ class FrameworkInfo(object): m = cls.reOLine.match(line) if m is None: - raise RuntimeError("otool line could not be parsed: " + line) + raise RuntimeError(f"otool line could not be parsed: {line}") path = m.group(1) @@ -116,7 +111,7 @@ class FrameworkInfo(object): info.version = "-" info.installName = path - info.deployedInstallName = "@executable_path/../Frameworks/" + info.binaryName + info.deployedInstallName = f"@executable_path/../Frameworks/{info.binaryName}" info.sourceFilePath = path info.destinationDirectory = cls.bundleFrameworkDirectory else: @@ -128,7 +123,7 @@ class FrameworkInfo(object): break i += 1 if i == len(parts): - raise RuntimeError("Could not find .framework or .dylib in otool line: " + line) + raise RuntimeError(f"Could not find .framework or .dylib in otool line: {line}") info.frameworkName = parts[i] info.frameworkDirectory = "/".join(parts[:i]) @@ -139,25 +134,24 @@ class FrameworkInfo(object): info.binaryPath = os.path.join(info.binaryDirectory, info.binaryName) info.version = parts[i+2] - info.deployedInstallName = "@executable_path/../Frameworks/" + os.path.join(info.frameworkName, info.binaryPath) + info.deployedInstallName = f"@executable_path/../Frameworks/{os.path.join(info.frameworkName, info.binaryPath)}" info.destinationDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, info.binaryDirectory) info.sourceResourcesDirectory = os.path.join(info.frameworkPath, "Resources") info.sourceContentsDirectory = os.path.join(info.frameworkPath, "Contents") info.sourceVersionContentsDirectory = os.path.join(info.frameworkPath, "Versions", info.version, "Contents") info.destinationResourcesDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, "Resources") - info.destinationContentsDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, "Contents") info.destinationVersionContentsDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, "Versions", info.version, "Contents") return info class ApplicationBundleInfo(object): - def __init__(self, path): + def __init__(self, path: str): self.path = path - appName = "Dash-Qt" - self.binaryPath = os.path.join(path, "Contents", "MacOS", appName) + # for backwards compatibility reasons, this must remain as Dash-Qt + self.binaryPath = os.path.join(path, "Contents", "MacOS", "Dash-Qt") if not os.path.exists(self.binaryPath): - raise RuntimeError("Could not find bundle binary for " + path) + raise RuntimeError(f"Could not find bundle binary for {path}") self.resourcesPath = os.path.join(path, "Contents", "Resources") self.pluginPath = os.path.join(path, "Contents", "PlugIns") @@ -167,7 +161,7 @@ class DeploymentInfo(object): self.pluginPath = None self.deployedFrameworks = [] - def detectQtPath(self, frameworkDirectory): + def detectQtPath(self, frameworkDirectory: str): parentDir = os.path.dirname(frameworkDirectory) if os.path.exists(os.path.join(parentDir, "translations")): # Classic layout, e.g. "/usr/local/Trolltech/Qt-4.x.x" @@ -180,31 +174,27 @@ class DeploymentInfo(object): if os.path.exists(pluginPath): self.pluginPath = pluginPath - def usesFramework(self, name): - nameDot = "%s." % name - libNameDot = "lib%s." % name + def usesFramework(self, name: str) -> bool: for framework in self.deployedFrameworks: if framework.endswith(".framework"): - if framework.startswith(nameDot): + if framework.startswith(f"{name}."): return True elif framework.endswith(".dylib"): - if framework.startswith(libNameDot): + if framework.startswith(f"lib{name}."): return True return False -def getFrameworks(binaryPath, verbose): - if verbose >= 3: - print("Inspecting with otool: " + binaryPath) +def getFrameworks(binaryPath: str, verbose: int) -> List[FrameworkInfo]: + if verbose: + print(f"Inspecting with otool: {binaryPath}") otoolbin=os.getenv("OTOOL", "otool") - otool = subprocess.Popen([otoolbin, "-L", binaryPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - o_stdout, o_stderr = otool.communicate() + otool = run([otoolbin, "-L", binaryPath], stdout=PIPE, stderr=PIPE, universal_newlines=True) if otool.returncode != 0: - if verbose >= 1: - sys.stderr.write(o_stderr) - sys.stderr.flush() - raise RuntimeError("otool failed with return code %d" % otool.returncode) + sys.stderr.write(otool.stderr) + sys.stderr.flush() + raise RuntimeError(f"otool failed with return code {otool.returncode}") - otoolLines = o_stdout.split("\n") + otoolLines = otool.stdout.split("\n") otoolLines.pop(0) # First line is the inspected binary if ".framework" in binaryPath or binaryPath.endswith(".dylib"): otoolLines.pop(0) # Frameworks and dylibs list themselves as a dependency. @@ -214,50 +204,50 @@ def getFrameworks(binaryPath, verbose): line = line.replace("@loader_path", os.path.dirname(binaryPath)) info = FrameworkInfo.fromOtoolLibraryLine(line.strip()) if info is not None: - if verbose >= 3: + if verbose: print("Found framework:") print(info) libraries.append(info) return libraries -def runInstallNameTool(action, *args): +def runInstallNameTool(action: str, *args): installnametoolbin=os.getenv("INSTALLNAMETOOL", "install_name_tool") - subprocess.check_call([installnametoolbin, "-"+action] + list(args)) + run([installnametoolbin, "-"+action] + list(args), check=True) -def changeInstallName(oldName, newName, binaryPath, verbose): - if verbose >= 3: +def changeInstallName(oldName: str, newName: str, binaryPath: str, verbose: int): + if verbose: print("Using install_name_tool:") print(" in", binaryPath) print(" change reference", oldName) print(" to", newName) runInstallNameTool("change", oldName, newName, binaryPath) -def changeIdentification(id, binaryPath, verbose): - if verbose >= 3: +def changeIdentification(id: str, binaryPath: str, verbose: int): + if verbose: print("Using install_name_tool:") print(" change identification in", binaryPath) print(" to", id) runInstallNameTool("id", id, binaryPath) -def runStrip(binaryPath, verbose): +def runStrip(binaryPath: str, verbose: int): stripbin=os.getenv("STRIP", "strip") - if verbose >= 3: + if verbose: print("Using strip:") print(" stripped", binaryPath) - subprocess.check_call([stripbin, "-x", binaryPath]) + run([stripbin, "-x", binaryPath], check=True) -def copyFramework(framework, path, verbose): +def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional[str]: if framework.sourceFilePath.startswith("Qt"): #standard place for Nokia Qt installer's frameworks - fromPath = "/Library/Frameworks/" + framework.sourceFilePath + fromPath = f"/Library/Frameworks/{framework.sourceFilePath}" else: fromPath = framework.sourceFilePath toDir = os.path.join(path, framework.destinationDirectory) toPath = os.path.join(toDir, framework.binaryName) if not os.path.exists(fromPath): - raise RuntimeError("No file at " + fromPath) + raise RuntimeError(f"No file at {fromPath}") if os.path.exists(toPath): return None # Already there @@ -266,7 +256,7 @@ def copyFramework(framework, path, verbose): os.makedirs(toDir) shutil.copy2(fromPath, toPath) - if verbose >= 3: + if verbose: print("Copied:", fromPath) print(" to:", toPath) @@ -280,13 +270,12 @@ def copyFramework(framework, path, verbose): linkto = framework.version if not os.path.exists(linkfrom): os.symlink(linkto, linkfrom) - if verbose >= 2: - print("Linked:", linkfrom, "->", linkto) + print("Linked:", linkfrom, "->", linkto) fromResourcesDir = framework.sourceResourcesDirectory if os.path.exists(fromResourcesDir): toResourcesDir = os.path.join(path, framework.destinationResourcesDirectory) shutil.copytree(fromResourcesDir, toResourcesDir, symlinks=True) - if verbose >= 3: + if verbose: print("Copied resources:", fromResourcesDir) print(" to:", toResourcesDir) fromContentsDir = framework.sourceVersionContentsDirectory @@ -295,7 +284,7 @@ def copyFramework(framework, path, verbose): if os.path.exists(fromContentsDir): toContentsDir = os.path.join(path, framework.destinationVersionContentsDirectory) shutil.copytree(fromContentsDir, toContentsDir, symlinks=True) - if verbose >= 3: + if verbose: print("Copied Contents:", fromContentsDir) print(" to:", toContentsDir) elif framework.frameworkName.startswith("libQtGui"): # Copy qt_menu.nib (applies to non-framework layout) @@ -303,13 +292,13 @@ def copyFramework(framework, path, verbose): qtMenuNibDestinationPath = os.path.join(path, "Contents", "Resources", "qt_menu.nib") if os.path.exists(qtMenuNibSourcePath) and not os.path.exists(qtMenuNibDestinationPath): shutil.copytree(qtMenuNibSourcePath, qtMenuNibDestinationPath, symlinks=True) - if verbose >= 3: + if verbose: print("Copied for libQtGui:", qtMenuNibSourcePath) print(" to:", qtMenuNibDestinationPath) return toPath -def deployFrameworks(frameworks, bundlePath, binaryPath, strip, verbose, deploymentInfo=None): +def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPath: str, strip: bool, verbose: int, deploymentInfo: Optional[DeploymentInfo] = None) -> DeploymentInfo: if deploymentInfo is None: deploymentInfo = DeploymentInfo() @@ -317,16 +306,14 @@ def deployFrameworks(frameworks, bundlePath, binaryPath, strip, verbose, deploym framework = frameworks.pop(0) deploymentInfo.deployedFrameworks.append(framework.frameworkName) - if verbose >= 2: - print("Processing", framework.frameworkName, "...") + print("Processing", framework.frameworkName, "...") # Get the Qt path from one of the Qt frameworks if deploymentInfo.qtPath is None and framework.isQtFramework(): deploymentInfo.detectQtPath(framework.frameworkDirectory) - + if framework.installName.startswith("@executable_path") or framework.installName.startswith(bundlePath): - if verbose >= 2: - print(framework.frameworkName, "already deployed, skipping.") + print(framework.frameworkName, "already deployed, skipping.") continue # install_name_tool the new id into the binary @@ -355,15 +342,15 @@ def deployFrameworks(frameworks, bundlePath, binaryPath, strip, verbose, deploym return deploymentInfo -def deployFrameworksForAppBundle(applicationBundle, strip, verbose): +def deployFrameworksForAppBundle(applicationBundle: ApplicationBundleInfo, strip: bool, verbose: int) -> DeploymentInfo: frameworks = getFrameworks(applicationBundle.binaryPath, verbose) - if len(frameworks) == 0 and verbose >= 1: - print("Warning: Could not find any external frameworks to deploy in %s." % (applicationBundle.path)) + if len(frameworks) == 0: + print(f"Warning: Could not find any external frameworks to deploy in {applicationBundle.path}.") return DeploymentInfo() else: return deployFrameworks(frameworks, applicationBundle.path, applicationBundle.binaryPath, strip, verbose) -def deployPlugins(appBundleInfo, deploymentInfo, strip, verbose): +def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: DeploymentInfo, strip: bool, verbose: int): # Lookup available plugins, exclude unneeded plugins = [] if deploymentInfo.pluginPath is None: @@ -373,10 +360,12 @@ def deployPlugins(appBundleInfo, deploymentInfo, strip, verbose): if pluginDirectory == "designer": # Skip designer plugins continue - elif pluginDirectory == "phonon" or pluginDirectory == "phonon_backend": - # Deploy the phonon plugins only if phonon is in use - if not deploymentInfo.usesFramework("phonon"): - continue + elif pluginDirectory == "printsupport": + # Skip printsupport plugins + continue + elif pluginDirectory == "imageformats": + # Skip imageformats plugins + continue elif pluginDirectory == "sqldrivers": # Deploy the sql plugins only if QtSql is in use if not deploymentInfo.usesFramework("QtSql"): @@ -409,6 +398,42 @@ def deployPlugins(appBundleInfo, deploymentInfo, strip, verbose): # Deploy the mediaservice plugins only if QtMultimediaWidgets is in use if not deploymentInfo.usesFramework("QtMultimediaWidgets"): continue + elif pluginDirectory == "canbus": + # Deploy the canbus plugins only if QtSerialBus is in use + if not deploymentInfo.usesFramework("QtSerialBus"): + continue + elif pluginDirectory == "webview": + # Deploy the webview plugins only if QtWebView is in use + if not deploymentInfo.usesFramework("QtWebView"): + continue + elif pluginDirectory == "gamepads": + # Deploy the webview plugins only if QtGamepad is in use + if not deploymentInfo.usesFramework("QtGamepad"): + continue + elif pluginDirectory == "geoservices": + # Deploy the webview plugins only if QtLocation is in use + if not deploymentInfo.usesFramework("QtLocation"): + continue + elif pluginDirectory == "texttospeech": + # Deploy the texttospeech plugins only if QtTextToSpeech is in use + if not deploymentInfo.usesFramework("QtTextToSpeech"): + continue + elif pluginDirectory == "virtualkeyboard": + # Deploy the virtualkeyboard plugins only if QtVirtualKeyboard is in use + if not deploymentInfo.usesFramework("QtVirtualKeyboard"): + continue + elif pluginDirectory == "sceneparsers": + # Deploy the virtualkeyboard plugins only if Qt3DCore is in use + if not deploymentInfo.usesFramework("Qt3DCore"): + continue + elif pluginDirectory == "renderplugins": + # Deploy the renderplugins plugins only if Qt3DCore is in use + if not deploymentInfo.usesFramework("Qt3DCore"): + continue + elif pluginDirectory == "geometryloaders": + # Deploy the geometryloaders plugins only if Qt3DCore is in use + if not deploymentInfo.usesFramework("Qt3DCore"): + continue for pluginName in filenames: pluginPath = os.path.join(pluginDirectory, pluginName) @@ -431,12 +456,15 @@ def deployPlugins(appBundleInfo, deploymentInfo, strip, verbose): # Deploy the accessible qtquick plugin only if QtQuick is in use if not deploymentInfo.usesFramework("QtQuick"): continue + elif pluginPath == "platforminputcontexts/libqtvirtualkeyboardplugin.dylib": + # Deploy the virtualkeyboardplugin plugin only if QtVirtualKeyboard is in use + if not deploymentInfo.usesFramework("QtVirtualKeyboard"): + continue plugins.append((pluginDirectory, pluginName)) for pluginDirectory, pluginName in plugins: - if verbose >= 2: - print("Processing plugin", os.path.join(pluginDirectory, pluginName), "...") + print("Processing plugin", os.path.join(pluginDirectory, pluginName), "...") sourcePath = os.path.join(deploymentInfo.pluginPath, pluginDirectory, pluginName) destinationDirectory = os.path.join(appBundleInfo.pluginPath, pluginDirectory) @@ -445,7 +473,7 @@ def deployPlugins(appBundleInfo, deploymentInfo, strip, verbose): destinationPath = os.path.join(destinationDirectory, pluginName) shutil.copy2(sourcePath, destinationPath) - if verbose >= 3: + if verbose: print("Copied:", sourcePath) print(" to:", destinationPath) @@ -461,147 +489,50 @@ def deployPlugins(appBundleInfo, deploymentInfo, strip, verbose): if dependency.frameworkName not in deploymentInfo.deployedFrameworks: deployFrameworks([dependency], appBundleInfo.path, destinationPath, strip, verbose, deploymentInfo) -qt_conf="""[Paths] -Translations=Resources -Plugins=PlugIns -""" - ap = ArgumentParser(description="""Improved version of macdeployqt. Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .dmg file. Note, that the "dist" folder will be deleted before deploying on each run. -Optionally, Qt translation files (.qm) and additional resources can be added to the bundle. - -Also optionally signs the .app bundle; set the CODESIGNARGS environment variable to pass arguments -to the codesign tool. -E.g. CODESIGNARGS='--sign "Developer ID Application: ..." --keychain /encrypted/foo.keychain'""") +Optionally, Qt translation files (.qm) can be added to the bundle.""") ap.add_argument("app_bundle", nargs=1, metavar="app-bundle", help="application bundle to be deployed") -ap.add_argument("-verbose", type=int, nargs=1, default=[1], metavar="<0-3>", help="0 = no output, 1 = error/warning (default), 2 = normal, 3 = debug") +ap.add_argument("appname", nargs=1, metavar="appname", help="name of the app being deployed") +ap.add_argument("-verbose", nargs="?", const=True, help="Output additional debugging information") ap.add_argument("-no-plugins", dest="plugins", action="store_false", default=True, help="skip plugin deployment") ap.add_argument("-no-strip", dest="strip", action="store_false", default=True, help="don't run 'strip' on the binaries") -ap.add_argument("-sign", dest="sign", action="store_true", default=False, help="sign .app bundle with codesign tool") -ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image; if basename is not specified, a camel-cased version of the app name is used") -ap.add_argument("-fancy", nargs=1, metavar="plist", default=[], help="make a fancy looking disk image using the given plist file with instructions; requires -dmg to work") -ap.add_argument("-add-qt-tr", nargs=1, metavar="languages", default=[], help="add Qt translation files to the bundle's resources; the language list must be separated with commas, not with whitespace") -ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translation files") -ap.add_argument("-add-resources", nargs="+", metavar="path", default=[], help="list of additional files or folders to be copied into the bundle's resources; must be the last argument") -ap.add_argument("-volname", nargs=1, metavar="volname", default=[], help="custom volume name for dmg") +ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image") +ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translations. Base translations will automatically be added to the bundle's resources.") config = ap.parse_args() -verbose = config.verbose[0] +verbose = config.verbose # ------------------------------------------------ app_bundle = config.app_bundle[0] +appname = config.appname[0] if not os.path.exists(app_bundle): - if verbose >= 1: - sys.stderr.write("Error: Could not find app bundle \"%s\"\n" % (app_bundle)) + sys.stderr.write(f"Error: Could not find app bundle \"{app_bundle}\"\n") sys.exit(1) -app_bundle_name = os.path.splitext(os.path.basename(app_bundle))[0] - -# ------------------------------------------------ -translations_dir = None -if config.translations_dir and config.translations_dir[0]: - if os.path.exists(config.translations_dir[0]): - translations_dir = config.translations_dir[0] - else: - if verbose >= 1: - sys.stderr.write("Error: Could not find translation dir \"%s\"\n" % (translations_dir)) - sys.exit(1) -# ------------------------------------------------ - -for p in config.add_resources: - if verbose >= 3: - print("Checking for \"%s\"..." % p) - if not os.path.exists(p): - if verbose >= 1: - sys.stderr.write("Error: Could not find additional resource file \"%s\"\n" % (p)) - sys.exit(1) - -# ------------------------------------------------ - -if len(config.fancy) == 1: - if verbose >= 3: - print("Fancy: Importing plistlib...") - try: - import plistlib - except ImportError: - if verbose >= 1: - sys.stderr.write("Error: Could not import plistlib which is required for fancy disk images.\n") - sys.exit(1) - - p = config.fancy[0] - if verbose >= 3: - print("Fancy: Loading \"%s\"..." % p) - if not os.path.exists(p): - if verbose >= 1: - sys.stderr.write("Error: Could not find fancy disk image plist at \"%s\"\n" % (p)) - sys.exit(1) - - try: - with open(p, 'rb') as fp: - fancy = plistlib.load(fp, fmt=plistlib.FMT_XML) - except: - if verbose >= 1: - sys.stderr.write("Error: Could not parse fancy disk image plist at \"%s\"\n" % (p)) - sys.exit(1) - - try: - assert "window_bounds" not in fancy or (isinstance(fancy["window_bounds"], list) and len(fancy["window_bounds"]) == 4) - assert "background_picture" not in fancy or isinstance(fancy["background_picture"], str) - assert "icon_size" not in fancy or isinstance(fancy["icon_size"], int) - assert "applications_symlink" not in fancy or isinstance(fancy["applications_symlink"], bool) - if "items_position" in fancy: - assert isinstance(fancy["items_position"], dict) - for key, value in fancy["items_position"].items(): - assert isinstance(value, list) and len(value) == 2 and isinstance(value[0], int) and isinstance(value[1], int) - except: - if verbose >= 1: - sys.stderr.write("Error: Bad format of fancy disk image plist at \"%s\"\n" % (p)) - sys.exit(1) - - if "background_picture" in fancy: - bp = fancy["background_picture"] - if verbose >= 3: - print("Fancy: Resolving background picture \"%s\"..." % bp) - if not os.path.exists(bp): - bp = os.path.join(os.path.dirname(p), bp) - if not os.path.exists(bp): - if verbose >= 1: - sys.stderr.write("Error: Could not find background picture at \"%s\" or \"%s\"\n" % (fancy["background_picture"], bp)) - sys.exit(1) - else: - fancy["background_picture"] = bp -else: - fancy = None - # ------------------------------------------------ if os.path.exists("dist"): - if verbose >= 2: - print("+ Removing old dist folder +") - + print("+ Removing existing dist folder +") shutil.rmtree("dist") -# ------------------------------------------------ - -if len(config.volname) == 1: - volname = config.volname[0] -else: - volname = app_bundle_name +if os.path.exists(appname + ".dmg"): + print("+ Removing existing DMG +") + os.unlink(appname + ".dmg") # ------------------------------------------------ target = os.path.join("dist", "Dash-Qt.app") -if verbose >= 2: - print("+ Copying source bundle +") -if verbose >= 3: +print("+ Copying source bundle +") +if verbose: print(app_bundle, "->", target) os.mkdir("dist") @@ -611,257 +542,154 @@ applicationBundle = ApplicationBundleInfo(target) # ------------------------------------------------ -if verbose >= 2: - print("+ Deploying frameworks +") +print("+ Deploying frameworks +") try: deploymentInfo = deployFrameworksForAppBundle(applicationBundle, config.strip, verbose) if deploymentInfo.qtPath is None: deploymentInfo.qtPath = os.getenv("QTDIR", None) if deploymentInfo.qtPath is None: - if verbose >= 1: - sys.stderr.write("Warning: Could not detect Qt's path, skipping plugin deployment!\n") + sys.stderr.write("Warning: Could not detect Qt's path, skipping plugin deployment!\n") config.plugins = False except RuntimeError as e: - if verbose >= 1: - sys.stderr.write("Error: %s\n" % str(e)) + sys.stderr.write(f"Error: {str(e)}\n") sys.exit(1) # ------------------------------------------------ if config.plugins: - if verbose >= 2: - print("+ Deploying plugins +") + print("+ Deploying plugins +") try: deployPlugins(applicationBundle, deploymentInfo, config.strip, verbose) except RuntimeError as e: - if verbose >= 1: - sys.stderr.write("Error: %s\n" % str(e)) + sys.stderr.write(f"Error: {str(e)}\n") sys.exit(1) # ------------------------------------------------ -if len(config.add_qt_tr) == 0: - add_qt_tr = [] -else: - if translations_dir is not None: - qt_tr_dir = translations_dir - else: - if deploymentInfo.qtPath is not None: - qt_tr_dir = os.path.join(deploymentInfo.qtPath, "translations") - else: - sys.stderr.write("Error: Could not find Qt translation path\n") - sys.exit(1) - add_qt_tr = ["qt_%s.qm" % lng for lng in config.add_qt_tr[0].split(",")] - for lng_file in add_qt_tr: - p = os.path.join(qt_tr_dir, lng_file) - if verbose >= 3: - print("Checking for \"%s\"..." % p) - if not os.path.exists(p): - if verbose >= 1: - sys.stderr.write("Error: Could not find Qt translation file \"%s\"\n" % (lng_file)) - sys.exit(1) +if config.translations_dir: + if not Path(config.translations_dir[0]).exists(): + sys.stderr.write(f"Error: Could not find translation dir \"{config.translations_dir[0]}\"\n") + sys.exit(1) + +print("+ Adding Qt translations +") + +translations = Path(config.translations_dir[0]) + +regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)') + +lang_files = [x for x in translations.iterdir() if regex.match(x.name)] + +for file in lang_files: + if verbose: + print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name)) + shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name)) # ------------------------------------------------ -if verbose >= 2: - print("+ Installing qt.conf +") +print("+ Installing qt.conf +") + +qt_conf="""[Paths] +Translations=Resources +Plugins=PlugIns +""" with open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb") as f: f.write(qt_conf.encode()) # ------------------------------------------------ -if len(add_qt_tr) > 0 and verbose >= 2: - print("+ Adding Qt translations +") - -for lng_file in add_qt_tr: - if verbose >= 3: - print(os.path.join(qt_tr_dir, lng_file), "->", os.path.join(applicationBundle.resourcesPath, lng_file)) - shutil.copy2(os.path.join(qt_tr_dir, lng_file), os.path.join(applicationBundle.resourcesPath, lng_file)) +print("+ Generating .DS_Store +") + +output_file = os.path.join("dist", ".DS_Store") + +ds = DSStore.open(output_file, 'w+') + +ds['.']['bwsp'] = { + 'WindowBounds': '{{300, 280}, {500, 343}}', + 'PreviewPaneVisibility': False, +} + +icvp = { + 'gridOffsetX': 0.0, + 'textSize': 12.0, + 'viewOptionsVersion': 1, + 'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00', + 'backgroundColorBlue': 1.0, + 'iconSize': 96.0, + 'backgroundColorGreen': 1.0, + 'arrangeBy': 'none', + 'showIconPreview': True, + 'gridSpacing': 100.0, + 'gridOffsetY': 0.0, + 'showItemInfo': False, + 'labelOnBottom': True, + 'backgroundType': 2, + 'backgroundColorRed': 1.0 +} +alias = Alias().from_bytes(icvp['backgroundImageAlias']) +alias.volume.name = appname +alias.volume.posix_path = '/Volumes/' + appname +icvp['backgroundImageAlias'] = alias.to_bytes() +ds['.']['icvp'] = icvp + +ds['.']['vSrn'] = ('long', 1) + +ds['Applications']['Iloc'] = (370, 156) +ds['Dash-Qt.app']['Iloc'] = (128, 156) + +ds.flush() +ds.close() # ------------------------------------------------ -if len(config.add_resources) > 0 and verbose >= 2: - print("+ Adding additional resources +") +if config.dmg is not None: -for p in config.add_resources: - t = os.path.join(applicationBundle.resourcesPath, os.path.basename(p)) - if verbose >= 3: - print(p, "->", t) - if os.path.isdir(p): - shutil.copytree(p, t, symlinks=True) - else: - shutil.copy2(p, t) + print("+ Preparing .dmg disk image +") -# ------------------------------------------------ + if verbose: + print("Determining size of \"dist\"...") + size = 0 + for path, dirs, files in os.walk("dist"): + for file in files: + size += os.path.getsize(os.path.join(path, file)) + size += int(size * 0.15) -if config.sign and 'CODESIGNARGS' not in os.environ: - print("You must set the CODESIGNARGS environment variable. Skipping signing.") -elif config.sign: - if verbose >= 1: - print("Code-signing app bundle %s"%(target,)) - subprocess.check_call("codesign --force %s %s"%(os.environ['CODESIGNARGS'], target), shell=True) + if verbose: + print("Creating temp image for modification...") -# ------------------------------------------------ + tempname: str = appname + ".temp.dmg" -if config.dmg is not None: + run(["hdiutil", "create", tempname, "-srcfolder", "dist", "-format", "UDRW", "-size", str(size), "-volname", appname], check=True, universal_newlines=True) - def runHDIUtil(verb, image_basename, **kwargs): - hdiutil_args = ["hdiutil", verb, image_basename + ".dmg"] - if "capture_stdout" in kwargs: - del kwargs["capture_stdout"] - run = subprocess.check_output - else: - if verbose < 2: - hdiutil_args.append("-quiet") - elif verbose >= 3: - hdiutil_args.append("-verbose") - run = subprocess.check_call - - for key, value in kwargs.items(): - hdiutil_args.append("-" + key) - if not value is True: - hdiutil_args.append(str(value)) - - return run(hdiutil_args, universal_newlines=True) - if verbose >= 2: - if fancy is None: - print("+ Creating .dmg disk image +") - else: - print("+ Preparing .dmg disk image +") + if verbose: + print("Attaching temp image...") + output = run(["hdiutil", "attach", tempname, "-readwrite"], check=True, universal_newlines=True, stdout=PIPE).stdout - if config.dmg != "": - dmg_name = config.dmg - else: - spl = app_bundle_name.split(" ") - dmg_name = spl[0] + "".join(p.capitalize() for p in spl[1:]) - - if fancy is None: - try: - runHDIUtil("create", dmg_name, srcfolder="dist", format="UDBZ", volname=volname, ov=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - else: - if verbose >= 3: - print("Determining size of \"dist\"...") - size = 0 - for path, dirs, files in os.walk("dist"): - for file in files: - size += os.path.getsize(os.path.join(path, file)) - size += int(size * 0.15) - - if verbose >= 3: - print("Creating temp image for modification...") - try: - runHDIUtil("create", dmg_name + ".temp", srcfolder="dist", format="UDRW", size=size, volname=volname, ov=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - - if verbose >= 3: - print("Attaching temp image...") - try: - output = runHDIUtil("attach", dmg_name + ".temp", readwrite=True, noverify=True, noautoopen=True, capture_stdout=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - - m = re.search(r"/Volumes/(.+$)", output) - disk_root = m.group(0) - disk_name = m.group(1) - - if verbose >= 2: - print("+ Applying fancy settings +") - - if "background_picture" in fancy: - bg_path = os.path.join(disk_root, ".background", os.path.basename(fancy["background_picture"])) - os.mkdir(os.path.dirname(bg_path)) - if verbose >= 3: - print(fancy["background_picture"], "->", bg_path) - shutil.copy2(fancy["background_picture"], bg_path) - else: - bg_path = None - - if fancy.get("applications_symlink", False): - os.symlink("/Applications", os.path.join(disk_root, "Applications")) - - # The Python appscript package broke with OSX 10.8 and isn't being fixed. - # So we now build up an AppleScript string and use the osascript command - # to make the .dmg file pretty: - appscript = Template( """ - on run argv - tell application "Finder" - tell disk "$disk" - open - set current view of container window to icon view - set toolbar visible of container window to false - set statusbar visible of container window to false - set the bounds of container window to {$window_bounds} - set theViewOptions to the icon view options of container window - set arrangement of theViewOptions to not arranged - set icon size of theViewOptions to $icon_size - $background_commands - $items_positions - close -- close/reopen works around a bug... - open - update without registering applications - delay 5 - eject - end tell - end tell - end run - """) - - itemscript = Template('set position of item "${item}" of container window to {${position}}') - items_positions = [] - if "items_position" in fancy: - for name, position in fancy["items_position"].items(): - params = { "item" : name, "position" : ",".join([str(p) for p in position]) } - items_positions.append(itemscript.substitute(params)) - - params = { - "disk" : volname, - "window_bounds" : "300,300,800,620", - "icon_size" : "96", - "background_commands" : "", - "items_positions" : "\n ".join(items_positions) - } - if "window_bounds" in fancy: - params["window_bounds"] = ",".join([str(p) for p in fancy["window_bounds"]]) - if "icon_size" in fancy: - params["icon_size"] = str(fancy["icon_size"]) - if bg_path is not None: - # Set background file, then call SetFile to make it invisible. - # (note: making it invisible first makes set background picture fail) - bgscript = Template("""set background picture of theViewOptions to file ".background:$bgpic" - do shell script "SetFile -a V /Volumes/$disk/.background/$bgpic" """) - params["background_commands"] = bgscript.substitute({"bgpic" : os.path.basename(bg_path), "disk" : params["disk"]}) - - s = appscript.substitute(params) - if verbose >= 2: - print("Running AppleScript:") - print(s) - - time.sleep(2) # fixes '112:116: execution error: Finder got an error: Can’t get disk "Dash-Core". (-1728)' - p = subprocess.Popen(['osascript', '-'], stdin=subprocess.PIPE) - p.communicate(input=s.encode('utf-8')) - if p.returncode: - print("Error running osascript.") - - if verbose >= 2: - print("+ Finalizing .dmg disk image +") - time.sleep(5) - - try: - runHDIUtil("convert", dmg_name + ".temp", format="UDBZ", o=dmg_name + ".dmg", ov=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - - os.unlink(dmg_name + ".temp.dmg") + m = re.search(r"/Volumes/(.+$)", output) + disk_root = m.group(0) + + print("+ Applying fancy settings +") + + bg_path = os.path.join(disk_root, ".background", os.path.basename('background.tiff')) + os.mkdir(os.path.dirname(bg_path)) + if verbose: + print('background.tiff', "->", bg_path) + shutil.copy2('contrib/macdeploy/background.tiff', bg_path) + + os.symlink("/Applications", os.path.join(disk_root, "Applications")) + + print("+ Finalizing .dmg disk image +") + + run(["hdiutil", "detach", f"/Volumes/{appname}"], universal_newlines=True) + + run(["hdiutil", "convert", tempname, "-format", "UDZO", "-o", appname, "-imagekey", "zlib-level=9"], check=True, universal_newlines=True) + + os.unlink(tempname) # ------------------------------------------------ -if verbose >= 2: - print("+ Done +") +print("+ Done +") sys.exit(0) diff --git a/contrib/shell/git-utils.bash b/contrib/shell/git-utils.bash new file mode 100644 index 0000000000000..37bac1f38d8f0 --- /dev/null +++ b/contrib/shell/git-utils.bash @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +git_root() { + git rev-parse --show-toplevel 2> /dev/null +} + +git_head_version() { + local recent_tag + if recent_tag="$(git describe --exact-match HEAD 2> /dev/null)"; then + echo "${recent_tag#v}" + else + git rev-parse --short=12 HEAD + fi +} diff --git a/contrib/shell/realpath.bash b/contrib/shell/realpath.bash new file mode 100644 index 0000000000000..389b77b56266d --- /dev/null +++ b/contrib/shell/realpath.bash @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# Based on realpath.sh written by Michael Kropat +# Found at: https://github.com/mkropat/sh-realpath/blob/65512368b8155b176b67122aa395ac580d9acc5b/realpath.sh + +bash_realpath() { + canonicalize_path "$(resolve_symlinks "$1")" +} + +resolve_symlinks() { + _resolve_symlinks "$1" +} + +_resolve_symlinks() { + _assert_no_path_cycles "$@" || return + + local dir_context path + if path=$(readlink -- "$1"); then + dir_context=$(dirname -- "$1") + _resolve_symlinks "$(_prepend_dir_context_if_necessary "$dir_context" "$path")" "$@" + else + printf '%s\n' "$1" + fi +} + +_prepend_dir_context_if_necessary() { + if [ "$1" = . ]; then + printf '%s\n' "$2" + else + _prepend_path_if_relative "$1" "$2" + fi +} + +_prepend_path_if_relative() { + case "$2" in + /* ) printf '%s\n' "$2" ;; + * ) printf '%s\n' "$1/$2" ;; + esac +} + +_assert_no_path_cycles() { + local target path + + target=$1 + shift + + for path in "$@"; do + if [ "$path" = "$target" ]; then + return 1 + fi + done +} + +canonicalize_path() { + if [ -d "$1" ]; then + _canonicalize_dir_path "$1" + else + _canonicalize_file_path "$1" + fi +} + +_canonicalize_dir_path() { + (cd "$1" 2>/dev/null && pwd -P) +} + +_canonicalize_file_path() { + local dir file + dir=$(dirname -- "$1") + file=$(basename -- "$1") + (cd "$dir" 2>/dev/null && printf '%s/%s\n' "$(pwd -P)" "$file") +} diff --git a/contrib/valgrind.supp b/contrib/valgrind.supp index 06df5636b5844..d2c904743d44e 100644 --- a/contrib/valgrind.supp +++ b/contrib/valgrind.supp @@ -1,7 +1,5 @@ -# Valgrind suppressions file for Dash. -# -# Includes known Valgrind warnings in our dependencies that cannot be fixed -# in-tree. +# This valgrind suppressions file includes known Valgrind warnings in our +# dependencies that cannot be fixed in-tree. # # Example use: # $ valgrind --suppressions=contrib/valgrind.supp src/test/test_dash @@ -14,6 +12,9 @@ # --error-limit=no src/test/test_dash # # Note that suppressions may depend on OS and/or library versions. +# Tested on: +# * aarch64 (Ubuntu 20.04 system libs, without gui) +# * x86_64 (Ubuntu 18.04 system libs, without gui) { Suppress libstdc++ warning - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65434 Memcheck:Leak @@ -30,8 +31,6 @@ Memcheck:Cond obj:*/libdb_cxx-*.so fun:__log_put - obj:*/libdb_cxx-*.so - fun:__log_put_record } { Suppress libdb warning @@ -39,8 +38,50 @@ pwrite64(buf) fun:pwrite fun:__os_io +} +{ + Suppress libdb warning + Memcheck:Cond + fun:__log_putr.isra.1 +} +{ + Suppress libdb warning + Memcheck:Param + pwrite64(buf) + ... + obj:*/libdb_cxx-*.so +} +{ + Suppress uninitialized bytes warning in compat code + Memcheck:Param + ioctl(TCSET{S,SW,SF}) + fun:tcsetattr +} +{ + Suppress libdb warning + Memcheck:Leak + fun:malloc + ... obj:*/libdb_cxx-*.so } +{ + Suppress leaks on init + Memcheck:Leak + ... + fun:_Z11AppInitMainR11NodeContext +} +{ + Suppress leaks on shutdown + Memcheck:Leak + ... + fun:_Z8ShutdownR11NodeContext +} +{ + Ignore GUI warning + Memcheck:Leak + ... + obj:/usr/lib64/libgdk-3.so.0.2404.7 +} { Suppress leveldb warning (leveldb::InitModule()) - https://github.com/google/leveldb/issues/113 Memcheck:Leak @@ -56,17 +97,48 @@ ... fun:_ZN7leveldbL14InitDefaultEnvEv } +{ + Suppress leveldb leak + Memcheck:Leak + match-leak-kinds: reachable + fun:_Znwm + ... + fun:_ZN7leveldb6DBImpl14BackgroundCallEv +} +{ + Suppress leveldb leak + Memcheck:Leak + fun:_Znwm + ... + fun:GetCoin +} { Suppress wcsnrtombs glibc SSE4 warning (could be related: https://stroika.atlassian.net/browse/STK-626) Memcheck:Addr16 fun:__wcsnlen_sse4_1 fun:wcsnrtombs } +{ + Suppress wcsnrtombs warning (remove after removing boost::fs) + Memcheck:Cond + ... + fun:_ZN5boost10filesystem6detail11unique_pathERKNS0_4pathEPNS_6system10error_codeE +} +{ + Suppress boost warning + Memcheck:Leak + fun:_Znwm + ... + fun:_ZN5boost9unit_test9framework5state17execute_test_treeEmjPKNS2_23random_generator_helperE + fun:_ZN5boost9unit_test9framework3runEmb + fun:_ZN5boost9unit_test14unit_test_mainEPFbvEiPPc + fun:main +} { Suppress boost::filesystem warning (fixed in boost 1.70: https://github.com/boostorg/filesystem/commit/bbe9d1771e5d679b3f10c42a58fc81f7e8c024a9) Memcheck:Cond fun:_ZN5boost10filesystem6detail28directory_iterator_incrementERNS0_18directory_iteratorEPNS_6system10error_codeE - fun:_ZN5boost10filesystem6detail28directory_iterator_constructERNS0_18directory_iteratorERKNS0_4pathEPNS_6system10error_codeE + ... obj:*/libboost_filesystem.so.* } { @@ -74,6 +146,7 @@ Memcheck:Leak match-leak-kinds: reachable fun:_Znwm + ... fun:_ZN5boost10filesystem8absoluteERKNS0_4pathES3_ } { diff --git a/contrib/verify-commits/pre-push-hook.sh b/contrib/verify-commits/pre-push-hook.sh index 367d59328cdff..204f52f176cc2 100755 --- a/contrib/verify-commits/pre-push-hook.sh +++ b/contrib/verify-commits/pre-push-hook.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright (c) 2014-2015 The Bitcoin Core developers +# Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/contrib/windeploy/detached-sig-create.sh b/contrib/windeploy/detached-sig-create.sh index cc42422b23f43..ca7ba21fa774e 100755 --- a/contrib/windeploy/detached-sig-create.sh +++ b/contrib/windeploy/detached-sig-create.sh @@ -25,7 +25,7 @@ CERTFILE="win-codesign.cert" mkdir -p "${OUTSUBDIR}" basename -a $(ls -1 "${SRCDIR}"/*-unsigned.exe) | while read UNSIGNED; do echo Signing "${UNSIGNED}" - "${OSSLSIGNCODE}" sign -certs "${CERTFILE}" -t "${TIMESERVER}" -in "${SRCDIR}/${UNSIGNED}" -out "${WORKDIR}/${UNSIGNED}" "$@" + "${OSSLSIGNCODE}" sign -certs "${CERTFILE}" -t "${TIMESERVER}" -h sha256 -in "${SRCDIR}/${UNSIGNED}" -out "${WORKDIR}/${UNSIGNED}" "$@" "${OSSLSIGNCODE}" extract-signature -pem -in "${WORKDIR}/${UNSIGNED}" -out "${OUTSUBDIR}/${UNSIGNED}.pem" && rm "${WORKDIR}/${UNSIGNED}" done diff --git a/depends/.gitignore b/depends/.gitignore index 72734102c51fc..19c506ce5447d 100644 --- a/depends/.gitignore +++ b/depends/.gitignore @@ -8,5 +8,7 @@ i686* mips* arm* aarch64* +powerpc* riscv32* riscv64* +s390x* diff --git a/depends/Makefile b/depends/Makefile index b71be217658c1..dc429d492037f 100644 --- a/depends/Makefile +++ b/depends/Makefile @@ -1,8 +1,8 @@ .NOTPARALLEL : # Pattern rule to print variables, e.g. make print-top_srcdir -print-%: - @echo $* = $($*) +print-%: FORCE + @echo '$*'='$($*)' # When invoking a sub-make, keep only the command line variable definitions # matching the pattern in the filter function. @@ -98,10 +98,6 @@ host_prefix=$($(host_arch)_$(host_os)_prefix) build_prefix=$(host_prefix)/native build_host=$(build) -AT_$(V):= -AT_:=@ -AT:=$(AT_$(V)) - all: install include hosts/$(host_os).mk @@ -110,26 +106,31 @@ include builders/$(build_os).mk include builders/default.mk include packages/packages.mk -build_id_string:=$(BUILD_ID_SALT) -build_id_string+=$(shell $(build_CC) --version 2>/dev/null) -build_id_string+=$(shell $(build_AR) --version 2>/dev/null) -build_id_string+=$(shell $(build_CXX) --version 2>/dev/null) -build_id_string+=$(shell $(build_RANLIB) --version 2>/dev/null) -build_id_string+=$(shell $(build_STRIP) --version 2>/dev/null) - -$(host_arch)_$(host_os)_id_string:=$(HOST_ID_SALT) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_CC) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_AR) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null) - -ifneq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) -# Make sure that cache is invalidated when switching between system and -# depends-managed, pinned clang -build_id_string+=system_clang -$(host_arch)_$(host_os)_id_string+=system_clang -endif +# Previously, we directly invoked the well-known programs using $(shell ...) +# to contruct build_id_string. However, that was problematic because: +# +# 1. When invoking a shell, GNU Make special-cases exit code 127 (command not +# found) by not capturing the output but instead passing it through. This is +# not done for any other exit code. +# +# 2. Characters like '#' (from these programs' output) would end up in make +# variables like build_id_string, which would be wrongly interpreted by make +# when these variables were used. +# +# Therefore, we should avoid having arbitrary strings in make variables where +# possible. The gen_id script used here hashes the output to construct a +# "make-safe" id. +# +# Also note that these lines need to be: +# +# 1. After including {hosts,builders}/*.mk, since they rely on the tool +# variables (e.g. build_CC, host_STRIP, etc.) to be set. +# +# 2. Before including packages/*.mk (excluding packages/packages.mk), since +# they rely on the build_id variables +# +build_id:=$(shell env CC='$(build_CC)' CXX='$(build_CXX)' AR='$(build_AR)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') +$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' CXX='$(host_CXX)' AR='$(host_AR)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') qrencode_packages_$(NO_QR) = $(qrencode_$(host_os)_packages) @@ -163,12 +164,12 @@ include funcs.mk final_build_id_long+=$(shell $(build_SHA256SUM) config.site.in) final_build_id+=$(shell echo -n "$(final_build_id_long)" | $(build_SHA256SUM) | cut -c-$(HASH_LENGTH)) $(host_prefix)/.stamp_$(final_build_id): $(native_packages) $(packages) - $(AT)rm -rf $(@D) - $(AT)mkdir -p $(@D) - $(AT)echo copying packages: $^ - $(AT)echo to: $(@D) - $(AT)cd $(@D); $(foreach package,$^, tar xf $($(package)_cached); ) - $(AT)touch $@ + rm -rf $(@D) + mkdir -p $(@D) + echo copying packages: $^ + echo to: $(@D) + cd $(@D); $(foreach package,$^, tar xf $($(package)_cached); ) + touch $@ # $PATH is not preserved between ./configure and make by convention. Its # modification and overriding at ./configure time is (as I understand it) @@ -195,8 +196,8 @@ $(host_prefix)/.stamp_$(final_build_id): $(native_packages) $(packages) # we expect them to be available in $PATH at all times, more specificity does # not hurt. $(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_build_id) - $(AT)@mkdir -p $(@D) - $(AT)sed -e 's|@HOST@|$(host)|' \ + @mkdir -p $(@D) + sed -e 's|@HOST@|$(host)|' \ -e 's|@CC@|$(host_CC)|' \ -e 's|@CXX@|$(host_CXX)|' \ -e 's|@AR@|$(host_AR)|' \ @@ -217,7 +218,7 @@ $(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_ -e 's|@no_natpmp@|$(NO_NATPMP)|' \ -e 's|@debug@|$(DEBUG)|' \ $< > $@ - $(AT)touch $@ + touch $@ define check_or_remove_cached @@ -244,7 +245,7 @@ $(host_prefix)/share/config.site: check-packages check-packages: check-sources clean-all: clean - @rm -rf $(SOURCES_PATH) x86_64* i686* mips* arm* aarch64* riscv32* riscv64* + @rm -rf $(SOURCES_PATH) x86_64* i686* mips* arm* aarch64* powerpc* riscv32* riscv64* s390x* clean: @rm -rf $(WORK_PATH) $(BASE_CACHE) $(BUILD) @@ -265,3 +266,5 @@ download: download-osx download-linux download-win $(foreach package,$(all_packages),$(eval $(call ext_add_stages,$(package)))) .PHONY: install cached clean clean-all download-one download-osx download-linux download-win download check-packages check-sources +.PHONY: FORCE +$(V).SILENT: diff --git a/depends/README.md b/depends/README.md index 23ee1c5199a7e..6e7227c68051d 100644 --- a/depends/README.md +++ b/depends/README.md @@ -12,23 +12,30 @@ For example: make HOST=x86_64-w64-mingw32 -j4 -A prefix will be generated that's suitable for plugging into Dash's -configure. In the above example, a dir named x86_64-w64-mingw32 will be +**Dash Core's configure script by default will ignore the depends output.** In +order for it to pick up libraries, tools, and settings from the depends build, +you must point it at the appropriate `--prefix` directory generated by the +build. In the above example, a prefix dir named x86_64-w64-mingw32 will be created. To use it for Dash: - ./configure --prefix=`pwd`/depends/x86_64-w64-mingw32 + ./configure --prefix=$PWD/depends/x86_64-w64-mingw32 Common `host-platform-triplets` for cross compilation are: +- `i686-pc-linux-gnu` for Linux 32 bit +- `x86_64-pc-linux-gnu` for x86 Linux - `x86_64-w64-mingw32` for Win64 -- `x86_64-apple-darwin19` for macOS +- `x86_64-apple-darwin` for macOS +- `arm64-apple-darwin` for ARM macOS - `arm-linux-gnueabihf` for Linux ARM 32 bit - `aarch64-linux-gnu` for Linux ARM 64 bit +- `powerpc64-linux-gnu` for Linux POWER 64-bit (big endian) +- `powerpc64le-linux-gnu` for Linux POWER 64-bit (little endian) - `riscv32-linux-gnu` for Linux RISC-V 32 bit - `riscv64-linux-gnu` for Linux RISC-V 64 bit +- `s390x-linux-gnu` for Linux S390X - `armv7a-linux-android` for Android ARM 32 bit - `aarch64-linux-android` for Android ARM 64 bit -- `i686-linux-android` for Android x86 32 bit - `x86_64-linux-android` for Android x86 64 bit The paths are automatically configured and no other options are needed unless targeting [Android](#Android). @@ -37,7 +44,7 @@ The paths are automatically configured and no other options are needed unless ta #### For macOS cross compilation - sudo apt-get install curl librsvg2-bin libtiff-tools bsdmainutils cmake imagemagick libcap-dev libz-dev libbz2-dev python3-setuptools libtinfo5 + sudo apt-get install curl bsdmainutils cmake libcap-dev libz-dev libbz2-dev python3-setuptools libtinfo5 xorriso Note: You must obtain the macOS SDK before proceeding with a cross-compile. Under the depends directory, create a subdirectory named `SDKs`. @@ -62,6 +69,10 @@ For linux AARCH64 cross compilation: sudo apt-get install g++-aarch64-linux-gnu binutils-aarch64-linux-gnu +For linux POWER 64-bit cross compilation (there are no packages for 32-bit): + + sudo apt-get install g++-powerpc64-linux-gnu binutils-powerpc64-linux-gnu g++-powerpc64le-linux-gnu binutils-powerpc64le-linux-gnu + For linux RISC-V 64-bit cross compilation (there are no packages for 32-bit): sudo apt-get install g++-riscv64-linux-gnu binutils-riscv64-linux-gnu @@ -69,6 +80,10 @@ For linux RISC-V 64-bit cross compilation (there are no packages for 32-bit): RISC-V known issue: gcc-7.3.0 and gcc-7.3.1 result in a broken `test_dash` executable (see https://github.com/bitcoin/bitcoin/pull/13543), this is apparently fixed in gcc-8.1.0. +For linux S390X cross compilation: + + sudo apt-get install g++-s390x-linux-gnu binutils-s390x-linux-gnu + ### Dependency Options The following can be set when running make: `make FOO=bar` diff --git a/depends/builders/darwin.mk b/depends/builders/darwin.mk index c07417cfab570..e298d302ca25a 100644 --- a/depends/builders/darwin.mk +++ b/depends/builders/darwin.mk @@ -21,3 +21,8 @@ darwin_NM:=$(shell xcrun -f nm) darwin_INSTALL_NAME_TOOL:=$(shell xcrun -f install_name_tool) darwin_native_binutils= darwin_native_toolchain= + +x86_64_darwin_CFLAGS += -arch x86_64 +x86_64_darwin_CXXFLAGS += -arch x86_64 +aarch64_darwin_CFLAGS += -arch arm64 +aarch64_darwin_CXXFLAGS += -arch arm64 diff --git a/depends/funcs.mk b/depends/funcs.mk index aab9555f2d3b0..3607a9befb9cf 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -51,7 +51,7 @@ define int_get_build_id $(eval $(1)_dependencies += $($(1)_$(host_arch)_$(host_os)_dependencies) $($(1)_$(host_os)_dependencies)) $(eval $(1)_all_dependencies:=$(call int_get_all_dependencies,$(1),$($($(1)_type)_native_toolchain) $($($(1)_type)_native_binutils) $($(1)_dependencies))) $(foreach dep,$($(1)_all_dependencies),$(eval $(1)_build_id_deps+=$(dep)-$($(dep)_version)-$($(dep)_recipe_hash))) -$(eval $(1)_build_id_long:=$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type) $($(1)_build_id_deps) $($($(1)_type)_id_string)) +$(eval $(1)_build_id_long:=$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type) $($(1)_build_id_deps) $($($(1)_type)_id)) $(eval $(1)_build_id:=$(shell echo -n "$($(1)_build_id_long)" | $(build_SHA256SUM) | cut -c-$(HASH_LENGTH))) final_build_id_long+=$($(package)_build_id_long) @@ -168,53 +168,53 @@ endef define int_add_cmds $($(1)_fetched): - $(AT)mkdir -p $$(@D) $(SOURCES_PATH) - $(AT)rm -f $$@ - $(AT)touch $$@ - $(AT)cd $$(@D); $(call $(1)_fetch_cmds,$(1)) - $(AT)cd $($(1)_source_dir); $(foreach source,$($(1)_all_sources),$(build_SHA256SUM) $(source) >> $$(@);) - $(AT)touch $$@ + mkdir -p $$(@D) $(SOURCES_PATH) + rm -f $$@ + touch $$@ + cd $$(@D); $(call $(1)_fetch_cmds,$(1)) + cd $($(1)_source_dir); $(foreach source,$($(1)_all_sources),$(build_SHA256SUM) $(source) >> $$(@);) + touch $$@ $($(1)_extracted): | $($(1)_fetched) - $(AT)echo Extracting $(1)... - $(AT)mkdir -p $$(@D) - $(AT)cd $$(@D); $(call $(1)_extract_cmds,$(1)) - $(AT)touch $$@ + echo Extracting $(1)... + mkdir -p $$(@D) + cd $$(@D); $(call $(1)_extract_cmds,$(1)) + touch $$@ $($(1)_preprocessed): | $($(1)_extracted) - $(AT)echo Preprocessing $(1)... - $(AT)mkdir -p $$(@D) $($(1)_patch_dir) - $(AT)$(foreach patch,$($(1)_patches),cd $(PATCHES_PATH)/$(1); cp $(patch) $($(1)_patch_dir) ;) - $(AT)cd $$(@D); $(call $(1)_preprocess_cmds, $(1)) - $(AT)touch $$@ + echo Preprocessing $(1)... + mkdir -p $$(@D) $($(1)_patch_dir) + $(foreach patch,$($(1)_patches),cd $(PATCHES_PATH)/$(1); cp $(patch) $($(1)_patch_dir) ;) + cd $$(@D); $(call $(1)_preprocess_cmds, $(1)) + touch $$@ $($(1)_configured): | $($(1)_dependencies) $($(1)_preprocessed) - $(AT)echo Configuring $(1)... - $(AT)rm -rf $(host_prefix); mkdir -p $(host_prefix)/lib; cd $(host_prefix); $(foreach package,$($(1)_all_dependencies), tar --no-same-owner -xf $($(package)_cached); ) - $(AT)mkdir -p $$(@D) - $(AT)+cd $$(@D); $($(1)_config_env) $(call $(1)_config_cmds, $(1)) - $(AT)touch $$@ + echo Configuring $(1)... + rm -rf $(host_prefix); mkdir -p $(host_prefix)/lib; cd $(host_prefix); $(foreach package,$($(1)_all_dependencies), tar --no-same-owner -xf $($(package)_cached); ) + mkdir -p $$(@D) + +cd $$(@D); $($(1)_config_env) $(call $(1)_config_cmds, $(1)) + touch $$@ $($(1)_built): | $($(1)_configured) - $(AT)echo Building $(1)... - $(AT)mkdir -p $$(@D) - $(AT)+cd $$(@D); $($(1)_build_env) $(call $(1)_build_cmds, $(1)) - $(AT)touch $$@ + echo Building $(1)... + mkdir -p $$(@D) + +cd $$(@D); $($(1)_build_env) $(call $(1)_build_cmds, $(1)) + touch $$@ $($(1)_staged): | $($(1)_built) - $(AT)echo Staging $(1)... - $(AT)mkdir -p $($(1)_staging_dir)/$(host_prefix) - $(AT)cd $($(1)_build_dir); $($(1)_stage_env) $(call $(1)_stage_cmds, $(1)) - $(AT)rm -rf $($(1)_extract_dir) - $(AT)touch $$@ + echo Staging $(1)... + mkdir -p $($(1)_staging_dir)/$(host_prefix) + cd $($(1)_build_dir); $($(1)_stage_env) $(call $(1)_stage_cmds, $(1)) + rm -rf $($(1)_extract_dir) + touch $$@ $($(1)_postprocessed): | $($(1)_staged) - $(AT)echo Postprocessing $(1)... - $(AT)cd $($(1)_staging_prefix_dir); $(call $(1)_postprocess_cmds) - $(AT)touch $$@ + echo Postprocessing $(1)... + cd $($(1)_staging_prefix_dir); $(call $(1)_postprocess_cmds) + touch $$@ $($(1)_cached): | $($(1)_dependencies) $($(1)_postprocessed) - $(AT)echo Caching $(1)... - $(AT)cd $$($(1)_staging_dir)/$(host_prefix); find . | sort | tar --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T - - $(AT)mkdir -p $$(@D) - $(AT)rm -rf $$(@D) && mkdir -p $$(@D) - $(AT)mv $$($(1)_staging_dir)/$$(@F) $$(@) - $(AT)rm -rf $($(1)_staging_dir) + echo Caching $(1)... + cd $$($(1)_staging_dir)/$(host_prefix); find . | sort | tar --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T - + mkdir -p $$(@D) + rm -rf $$(@D) && mkdir -p $$(@D) + mv $$($(1)_staging_dir)/$$(@F) $$(@) + rm -rf $($(1)_staging_dir) $($(1)_cached_checksum): $($(1)_cached) - $(AT)cd $$(@D); $(build_SHA256SUM) $$( $$(@) + cd $$(@D); $(build_SHA256SUM) $$( $$(@) .PHONY: $(1) $(1): | $($(1)_cached_checksum) diff --git a/depends/gen_id b/depends/gen_id new file mode 100755 index 0000000000000..ac69ca7ee1fa2 --- /dev/null +++ b/depends/gen_id @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# Usage: env [ CC=... ] [ CXX=... ] [ AR=... ] [ RANLIB=... ] [ STRIP=... ] \ +# [ DEBUG=... ] ./build-id [ID_SALT]... +# +# Prints to stdout a SHA256 hash representing the current toolset, used by +# depends/Makefile as a build id for caching purposes (detecting when the +# toolset has changed and the cache needs to be invalidated). +# +# If the DEBUG environment variable is non-empty and the system has `tee` +# available in its $PATH, the pre-image to the SHA256 hash will be printed to +# stderr. This is to help developers debug caching issues in depends. + +# This script explicitly does not `set -e` because id determination is mostly +# opportunistic: it is fine that things fail, as long as they fail consistently. + +# Command variables (CC/CXX/AR) which can be blank are invoked with `bash -c`, +# because the "command not found" error message printed by shells often include +# the line number, like so: +# +# ./depends/gen_id: line 43: --version: command not found +# +# By invoking with `bash -c`, we ensure that the line number is always 1 + +( + # Redirect stderr to stdout + exec 2>&1 + + echo "BEGIN ALL" + + # Include any ID salts supplied via command line + echo "BEGIN ID SALT" + echo "$@" + echo "END ID SALT" + + # GCC only prints COLLECT_LTO_WRAPPER when invoked with just "-v", but we want + # the information from "-v -E -" as well, so just include both. + echo "BEGIN CC" + bash -c "${CC} -v" + bash -c "${CC} -v -E -xc -o /dev/null - < /dev/null" + bash -c "${CC} -v -E -xobjective-c -o /dev/null - < /dev/null" + echo "END CC" + + echo "BEGIN CXX" + bash -c "${CXX} -v" + bash -c "${CXX} -v -E -xc++ -o /dev/null - < /dev/null" + bash -c "${CXX} -v -E -xobjective-c++ -o /dev/null - < /dev/null" + echo "END CXX" + + echo "BEGIN AR" + bash -c "${AR} --version" + env | grep '^AR_' + echo "ZERO_AR_DATE=${ZERO_AR_DATE}" + echo "END AR" + + echo "BEGIN RANLIB" + bash -c "${RANLIB} --version" + env | grep '^RANLIB_' + echo "END RANLIB" + + echo "BEGIN STRIP" + bash -c "${STRIP} --version" + env | grep '^STRIP_' + echo "END STRIP" + + echo "END ALL" +) | if [ -n "$DEBUG" ] && command -v tee > /dev/null 2>&1; then + # When debugging and `tee` is available, output the preimage to stderr + # in addition to passing through stdin to stdout + tee >(cat 1>&2) + else + # Otherwise, passthrough stdin to stdout + cat + fi | ${SHA256SUM} - | cut -d' ' -f1 diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk index e4b7ca22e260d..1f09b15f1a185 100644 --- a/depends/hosts/darwin.mk +++ b/depends/hosts/darwin.mk @@ -1,7 +1,7 @@ OSX_MIN_VERSION=10.15 -OSX_SDK_VERSION=10.15.6 -XCODE_VERSION=12.1 -XCODE_BUILD_ID=12A7403 +OSX_SDK_VERSION=11.0 +XCODE_VERSION=12.2 +XCODE_BUILD_ID=12B45b LD64_VERSION=609 OSX_SDK=$(SDK_PATH)/Xcode-$(XCODE_VERSION)-$(XCODE_BUILD_ID)-extracted-SDK-with-libcxx-headers @@ -17,6 +17,7 @@ darwin_native_toolchain=native_cctools clang_prog=$(build_prefix)/bin/clang clangxx_prog=$(clang_prog)++ +llvm_config_prog=$(build_prefix)/bin/llvm-config clang_resource_dir=$(build_prefix)/lib/clang/$(native_clang_version) else @@ -34,8 +35,10 @@ darwin_native_toolchain= # Source: https://lists.gnu.org/archive/html/bug-make/2017-11/msg00017.html clang_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang") clangxx_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang++") +llvm_config_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v llvm-config") clang_resource_dir=$(shell clang -print-resource-dir) +llvm_lib_dir=$(shell $(llvm_config_prog) --libdir) endif cctools_TOOLS=AR RANLIB STRIP NM LIBTOOL OTOOL INSTALL_NAME_TOOL diff --git a/depends/packages.md b/depends/packages.md index 14776be5a053e..4158b46d28d87 100644 --- a/depends/packages.md +++ b/depends/packages.md @@ -181,3 +181,18 @@ For us, it's much easier to just link a static `libsecondary` into a shared static or dynamic `libsecondary`, that's not our concern. With a static `libsecondary`, when we need to link `libprimary` into our executable, there's no dependency chain to worry about as `libprimary` has all the symbols. + +## Build targets: + +To build an individual package (useful for debugging), following build targets are available. + + make ${package} + make ${package}_fetched + make ${package}_extracted + make ${package}_preprocessed + make ${package}_configured + make ${package}_built + make ${package}_staged + make ${package}_postprocessed + make ${package}_cached + make ${package}_cached_checksum diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk index 8b499367be242..8f307beb8f52a 100644 --- a/depends/packages/boost.mk +++ b/depends/packages/boost.mk @@ -30,6 +30,7 @@ $(package)_config_libraries=filesystem,thread,test $(package)_cxxflags=-std=c++17 -fvisibility=hidden $(package)_cxxflags_linux=-fPIC $(package)_cxxflags_android=-fPIC +$(package)_cxxflags_x86_64=-fcf-protection=full endef # Fix unused variable in boost_process, can be removed after upgrading to 1.72 diff --git a/depends/packages/dbus.mk b/depends/packages/dbus.mk deleted file mode 100644 index ad10b0fdd7fb6..0000000000000 --- a/depends/packages/dbus.mk +++ /dev/null @@ -1,27 +0,0 @@ -package=dbus -$(package)_version=1.10.18 -$(package)_download_path=https://dbus.freedesktop.org/releases/dbus -$(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=6049ddd5f3f3e2618f615f1faeda0a115104423a7996b7aa73e2f36e38cc514a -$(package)_dependencies=expat - -define $(package)_set_vars - $(package)_config_opts=--disable-tests --disable-doxygen-docs --disable-xml-docs --disable-shared --without-x -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -C dbus libdbus-1.la -endef - -define $(package)_stage_cmds - $(MAKE) -C dbus DESTDIR=$($(package)_staging_dir) install-libLTLIBRARIES install-dbusincludeHEADERS install-nodist_dbusarchincludeHEADERS && \ - $(MAKE) DESTDIR=$($(package)_staging_dir) install-pkgconfigDATA -endef - -define $(package)_postprocess_cmds - rm lib/*.la -endef diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk index 1cd5a1749ac88..31d749d9eb851 100644 --- a/depends/packages/libevent.mk +++ b/depends/packages/libevent.mk @@ -1,14 +1,9 @@ package=libevent -$(package)_version=2.1.11-stable -$(package)_download_path=https://github.com/libevent/libevent/archive/ -$(package)_file_name=release-$($(package)_version).tar.gz -$(package)_sha256_hash=229393ab2bf0dc94694f21836846b424f3532585bac3468738b7bf752c03901e -$(package)_patches=0001-fix-windows-getaddrinfo.patch - -define $(package)_preprocess_cmds - patch -p1 < $($(package)_patch_dir)/0001-fix-windows-getaddrinfo.patch && \ - ./autogen.sh -endef +$(package)_version=2.1.12-stable +$(package)_download_path=https://github.com/libevent/libevent/releases/download/release-$($(package)_version)/ +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=92e6de1be9ec176428fd2367677e61ceffc2ee1cb119035037a27d346b0403bb +$(package)_patches = glibc_compatibility.patch # When building for Windows, we set _WIN32_WINNT to target the same Windows # version as we do in configure. Due to quirks in libevents build system, this @@ -22,6 +17,11 @@ define $(package)_set_vars $(package)_cppflags_mingw32=-D_WIN32_WINNT=0x0601 endef +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub build-aux && \ + patch -p1 -i $($(package)_patch_dir)/glibc_compatibility.patch +endef + define $(package)_config_cmds $($(package)_autoconf) endef diff --git a/depends/packages/native_cctools.mk b/depends/packages/native_cctools.mk index 885207fce9af5..03e9002ecd759 100644 --- a/depends/packages/native_cctools.mk +++ b/depends/packages/native_cctools.mk @@ -7,15 +7,25 @@ $(package)_build_subdir=cctools $(package)_dependencies=native_libtapi define $(package)_set_vars - $(package)_config_opts=--target=$(host) + $(package)_config_opts=--target=$(host) --enable-lto-support + $(package)_config_opts+=--with-llvm-config=$(llvm_config_prog) $(package)_ldflags+=-Wl,-rpath=\\$$$$$$$$\$$$$$$$$ORIGIN/../lib - ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) - $(package)_config_opts+=--enable-lto-support --with-llvm-config=$(build_prefix)/bin/llvm-config - endif $(package)_cc=$(clang_prog) $(package)_cxx=$(clangxx_prog) endef +ifneq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) +define $(package)_preprocess_cmds + mkdir -p $($(package)_staging_prefix_dir)/lib && \ + cp $(llvm_lib_dir)/libLTO.so $($(package)_staging_prefix_dir)/lib/ && \ + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub cctools +endef +else +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub cctools +endef +endif + define $(package)_config_cmds $($(package)_autoconf) endef diff --git a/depends/packages/native_clang.mk b/depends/packages/native_clang.mk index 245269a9d342a..f2712294ab20e 100644 --- a/depends/packages/native_clang.mk +++ b/depends/packages/native_clang.mk @@ -16,10 +16,13 @@ endef define $(package)_stage_cmds mkdir -p $($(package)_staging_prefix_dir)/lib/clang/$($(package)_version)/include && \ mkdir -p $($(package)_staging_prefix_dir)/bin && \ + mkdir -p $($(package)_staging_prefix_dir)/include/llvm-c && \ cp bin/clang $($(package)_staging_prefix_dir)/bin/ && \ cp -P bin/clang++ $($(package)_staging_prefix_dir)/bin/ && \ cp bin/dsymutil $($(package)_staging_prefix_dir)/bin/$(host)-dsymutil && \ cp bin/llvm-config $($(package)_staging_prefix_dir)/bin/ && \ + cp include/llvm-c/ExternC.h $($(package)_staging_prefix_dir)/include/llvm-c && \ + cp include/llvm-c/lto.h $($(package)_staging_prefix_dir)/include/llvm-c && \ cp lib/libLTO.so $($(package)_staging_prefix_dir)/lib/ && \ cp -r lib/clang/$($(package)_version)/include/* $($(package)_staging_prefix_dir)/lib/clang/$($(package)_version)/include/ endef diff --git a/depends/packages/native_libdmg-hfsplus.mk b/depends/packages/native_libdmg-hfsplus.mk index c0f0ce74de8b6..3abb74ab91565 100644 --- a/depends/packages/native_libdmg-hfsplus.mk +++ b/depends/packages/native_libdmg-hfsplus.mk @@ -12,7 +12,7 @@ define $(package)_preprocess_cmds endef define $(package)_config_cmds - cmake -DCMAKE_INSTALL_PREFIX:PATH=$(build_prefix) -DCMAKE_C_FLAGS="-Wl,--build-id=none" .. + cmake -DCMAKE_INSTALL_PREFIX:PATH=$(build_prefix) -DCMAKE_C_FLAGS="-Wl,--build-id=none" -DCMAKE_SKIP_RPATH="ON" -DCMAKE_EXE_LINKER_FLAGS="-static" -DCMAKE_FIND_LIBRARY_SUFFIXES=".a" .. endef define $(package)_build_cmds diff --git a/depends/packages/native_libtapi.mk b/depends/packages/native_libtapi.mk index 1633213a42337..052bb2368933a 100644 --- a/depends/packages/native_libtapi.mk +++ b/depends/packages/native_libtapi.mk @@ -13,7 +13,5 @@ define $(package)_build_cmds endef define $(package)_stage_cmds - ./install.sh && \ - mkdir -p $($(package)_staging_prefix_dir)/include/llvm-c && \ - cp src/llvm/include/llvm-c/lto.h $($(package)_staging_prefix_dir)/include/llvm-c + ./install.sh endef diff --git a/depends/packages/native_mac_alias.mk b/depends/packages/native_mac_alias.mk index e60b99dccc98c..5fe027fb8a51b 100644 --- a/depends/packages/native_mac_alias.mk +++ b/depends/packages/native_mac_alias.mk @@ -1,8 +1,8 @@ package=native_mac_alias -$(package)_version=2.0.7 +$(package)_version=2.1.1 $(package)_download_path=https://github.com/al45tair/mac_alias/archive/ $(package)_file_name=v$($(package)_version).tar.gz -$(package)_sha256_hash=6f606d3b6bccd2112aeabf1a063f5b5ece87005a5d7e97c8faca23b916e88838 +$(package)_sha256_hash=c0ffceee14f7d04a6eb323fb7b8217dc3f373b346198d2ca42300a8362db7efa $(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages define $(package)_build_cmds diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk index 1b9ecd32cb6d8..3452a9359c965 100644 --- a/depends/packages/packages.mk +++ b/depends/packages/packages.mk @@ -1,6 +1,6 @@ packages:=boost libevent gmp backtrace -qt_linux_packages:=qt expat dbus libxcb xcb_proto libXau xproto freetype fontconfig libxkbcommon +qt_linux_packages:=qt expat libxcb xcb_proto libXau xproto freetype fontconfig libxkbcommon qrencode_linux_packages = qrencode qrencode_android_packages = qrencode diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index d5eae66728afe..b04856b5e2c5d 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -10,13 +10,14 @@ $(package)_linguist_tools = lrelease lupdate lconvert $(package)_patches = qt.pro qttools_src.pro $(package)_patches += fix_qt_pkgconfig.patch mac-qmake.conf fix_no_printer.patch no-xlib.patch $(package)_patches+= fix_android_qmake_conf.patch fix_android_jni_static.patch dont_hardcode_pwd.patch -$(package)_patches+= no_sdk_version_check.patch -$(package)_patches+= fix_qpainter_non_determinism.patch fix_lib_paths.patch fix_android_pch.patch +$(package)_patches += dont_hardcode_x86_64.patch +$(package)_patches+= fix_lib_paths.patch fix_android_pch.patch $(package)_patches+= fix_limits_header.patch $(package)_patches+= fix_montery_include.patch $(package)_patches += glibc_compatibility.patch +$(package)_patches+= qtbase-moc-ignore-gcc-macro.patch +$(package)_patches += fast_fixed_dtoa_no_optimize.patch -# Update OSX_QT_TRANSLATIONS when this is updated $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) $(package)_qttranslations_sha256_hash=577b0668a777eb2b451c61e8d026d79285371597ce9df06b6dee6c814164b7c3 @@ -129,8 +130,10 @@ $(package)_config_opts_darwin += -device-option MAC_TARGET=$(host) $(package)_config_opts_darwin += -device-option XCODE_VERSION=$(XCODE_VERSION) endif -# for macOS on Apple Silicon (ARM) see https://bugreports.qt.io/browse/QTBUG-85279 +ifneq ($(build_arch),$(host_arch)) $(package)_config_opts_aarch64_darwin += -device-option QMAKE_APPLE_DEVICE_ARCHS=arm64 +$(package)_config_opts_x86_64_darwin += -device-option QMAKE_APPLE_DEVICE_ARCHS=x86_64 +endif $(package)_config_opts_linux = -qt-xcb $(package)_config_opts_linux += -no-xcb-xlib @@ -150,7 +153,10 @@ else $(package)_config_opts_x86_64_linux = -xplatform linux-g++-64 endif $(package)_config_opts_aarch64_linux = -xplatform linux-aarch64-gnu-g++ +$(package)_config_opts_powerpc64_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ +$(package)_config_opts_powerpc64le_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ $(package)_config_opts_riscv64_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ +$(package)_config_opts_s390x_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ $(package)_config_opts_mingw32 = -no-opengl $(package)_config_opts_mingw32 += -no-dbus @@ -181,7 +187,6 @@ $(package)_config_opts_android += -no-feature-vulkan $(package)_config_opts_aarch64_android += -android-arch arm64-v8a $(package)_config_opts_armv7a_android += -android-arch armeabi-v7a $(package)_config_opts_x86_64_android += -android-arch x86_64 -$(package)_config_opts_i686_android += -android-arch i686 endef define $(package)_fetch_cmds @@ -233,12 +238,13 @@ define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/fix_android_jni_static.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_android_pch.patch && \ patch -p1 -i $($(package)_patch_dir)/no-xlib.patch && \ - patch -p1 -i $($(package)_patch_dir)/fix_qpainter_non_determinism.patch &&\ - patch -p1 -i $($(package)_patch_dir)/no_sdk_version_check.patch && \ + patch -p1 -i $($(package)_patch_dir)/dont_hardcode_x86_64.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_lib_paths.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_limits_header.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_montery_include.patch && \ patch -p1 -i $($(package)_patch_dir)/glibc_compatibility.patch && \ + patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \ + patch -p1 -i $($(package)_patch_dir)/fast_fixed_dtoa_no_optimize.patch && \ mkdir -p qtbase/mkspecs/macx-clang-linux &&\ cp -f qtbase/mkspecs/macx-clang/qplatformdefs.h qtbase/mkspecs/macx-clang-linux/ &&\ cp -f $($(package)_patch_dir)/mac-qmake.conf qtbase/mkspecs/macx-clang-linux/qmake.conf && \ @@ -256,6 +262,7 @@ endef define $(package)_config_cmds export PKG_CONFIG_SYSROOT_DIR=/ && \ export PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig && \ + export QT_MAC_SDK_NO_VERSION_CHECK=1 && \ cd qtbase && \ ./configure -top-level $($(package)_config_opts) endef diff --git a/depends/packages/sqlite.mk b/depends/packages/sqlite.mk index 8f51757c26b24..cbdb5ceb52dae 100644 --- a/depends/packages/sqlite.mk +++ b/depends/packages/sqlite.mk @@ -10,6 +10,10 @@ $(package)_config_opts_linux=--with-pic $(package)_cppflags_linux = -DSQLITE_DISABLE_LFS endef +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + define $(package)_config_cmds $($(package)_autoconf) endef diff --git a/depends/patches/libevent/0001-fix-windows-getaddrinfo.patch b/depends/patches/libevent/0001-fix-windows-getaddrinfo.patch deleted file mode 100644 index a98cd90bd58a2..0000000000000 --- a/depends/patches/libevent/0001-fix-windows-getaddrinfo.patch +++ /dev/null @@ -1,15 +0,0 @@ -diff -ur libevent-2.1.8-stable.orig/configure.ac libevent-2.1.8-stable/configure.ac ---- libevent-2.1.8-stable.orig/configure.ac 2017-01-29 17:51:00.000000000 +0000 -+++ libevent-2.1.8-stable/configure.ac 2020-03-07 01:11:16.311335005 +0000 -@@ -389,6 +389,10 @@ - #ifdef HAVE_NETDB_H - #include - #endif -+#ifdef _WIN32 -+#include -+#include -+#endif - ]], - [[ - getaddrinfo; -Only in libevent-2.1.8-stable: configure.ac~ diff --git a/depends/patches/libevent/glibc_compatibility.patch b/depends/patches/libevent/glibc_compatibility.patch new file mode 100644 index 0000000000000..e27777a671b52 --- /dev/null +++ b/depends/patches/libevent/glibc_compatibility.patch @@ -0,0 +1,14 @@ +Avoid getrandom@GLIBC_2.25 symbol. + +--- old/config.h.in ++++ new/config.h.in +@@ -101,9 +101,6 @@ + /* Define to 1 if you have the `getprotobynumber' function. */ + #undef HAVE_GETPROTOBYNUMBER + +-/* Define to 1 if you have the `getrandom' function. */ +-#undef HAVE_GETRANDOM +- + /* Define to 1 if you have the `getservbyname' function. */ + #undef HAVE_GETSERVBYNAME + diff --git a/depends/patches/qt/dont_hardcode_x86_64.patch b/depends/patches/qt/dont_hardcode_x86_64.patch new file mode 100644 index 0000000000000..0e1ca6acdabbc --- /dev/null +++ b/depends/patches/qt/dont_hardcode_x86_64.patch @@ -0,0 +1,123 @@ +macOS: Don't hard-code x86_64 as the architecture when using qmake + +Upstream commit: + - Qt 6.1: 9082cc8e8d5a6441dabe5e7a95bc0cd9085b95fe + +For other Qt branches see +https://codereview.qt-project.org/q/I70db7e4c27f0d3da5d0af33cb491d72c312d3fa8 + + +--- old/qtbase/configure.json ++++ new/qtbase/configure.json +@@ -208,11 +208,18 @@ + + "testTypeDependencies": { + "linkerSupportsFlag": [ "use_gold_linker" ], +- "verifySpec": [ "shared", "use_gold_linker", "compiler-flags", "qmakeargs", "commit" ], ++ "verifySpec": [ ++ "shared", ++ "use_gold_linker", ++ "compiler-flags", "qmakeargs", ++ "simulator_and_device", ++ "thread", ++ "commit" ], + "compile": [ "verifyspec" ], + "detectPkgConfig": [ "cross_compile", "machineTuple" ], + "library": [ "pkg-config", "compiler-flags" ], +- "getPkgConfigVariable": [ "pkg-config" ] ++ "getPkgConfigVariable": [ "pkg-config" ], ++ "architecture" : [ "verifyspec" ] + }, + + "testTypeAliases": { +@@ -653,7 +660,7 @@ + }, + "architecture": { + "label": "Architecture", +- "output": [ "architecture" ] ++ "output": [ "architecture", "commitConfig" ] + }, + "pkg-config": { + "label": "Using pkg-config", +diff --git a/configure.pri b/configure.pri +index 33c90a8c2f..71767e29d6 100644 + +--- old/qtbase/configure.pri ++++ new/qtbase/configure.pri +@@ -642,6 +642,13 @@ defineTest(qtConfOutput_commitOptions) { + write_file($$QT_BUILD_TREE/mkspecs/qdevice.pri, $${currentConfig}.output.devicePro)|error() + } + ++# Output is written after configuring each Qt module, ++# but some tests within a module might depend on the ++# configuration output of previous tests. ++defineTest(qtConfOutput_commitConfig) { ++ qtConfProcessOutput() ++} ++ + # type (empty or 'host'), option name, default value + defineTest(processQtPath) { + out_var = config.rel_input.$${2} +diff --git a/mkspecs/common/macx.conf b/mkspecs/common/macx.conf +index 7d4a406134..de96c12fc9 100644 + +--- old/qtbase/mkspecs/common/macx.conf ++++ new/qtbase/mkspecs/common/macx.conf +@@ -6,7 +6,6 @@ QMAKE_PLATFORM += macos osx macx + QMAKE_MAC_SDK = macosx + + QMAKE_MACOSX_DEPLOYMENT_TARGET = 10.12 +-QMAKE_APPLE_DEVICE_ARCHS = x86_64 + + QT_MAC_SDK_VERSION_MIN = 10.13 + QT_MAC_SDK_VERSION_MAX = 11.0 +diff --git a/mkspecs/features/mac/default_post.prf b/mkspecs/features/mac/default_post.prf +index d052808c14..0a89effe87 100644 + +--- old/qtbase/mkspecs/features/mac/default_post.prf ++++ new/qtbase/mkspecs/features/mac/default_post.prf +@@ -89,6 +89,11 @@ app_extension_api_only { + QMAKE_LFLAGS += $$QMAKE_CFLAGS_APPLICATION_EXTENSION + } + ++# Non-universal builds do not set QMAKE_APPLE_DEVICE_ARCHS, ++# so we pick it up from what the arch test resolved instead. ++isEmpty(QMAKE_APPLE_DEVICE_ARCHS): \ ++ QMAKE_APPLE_DEVICE_ARCHS = $$QT_ARCH ++ + macx-xcode { + qmake_pkginfo_typeinfo.name = QMAKE_PKGINFO_TYPEINFO + !isEmpty(QMAKE_PKGINFO_TYPEINFO): \ +@@ -144,9 +149,6 @@ macx-xcode { + simulator: VALID_SIMULATOR_ARCHS = $$QMAKE_APPLE_SIMULATOR_ARCHS + VALID_ARCHS = $$VALID_DEVICE_ARCHS $$VALID_SIMULATOR_ARCHS + +- isEmpty(VALID_ARCHS): \ +- error("QMAKE_APPLE_DEVICE_ARCHS or QMAKE_APPLE_SIMULATOR_ARCHS must contain at least one architecture") +- + single_arch: VALID_ARCHS = $$first(VALID_ARCHS) + + ACTIVE_ARCHS = $(filter $(EXPORT_VALID_ARCHS), $(ARCHS)) +diff --git a/mkspecs/features/toolchain.prf b/mkspecs/features/toolchain.prf +index 5003679bd0..c7c080cb07 100644 + +--- old/qtbase/mkspecs/features/toolchain.prf ++++ new/qtbase/mkspecs/features/toolchain.prf +@@ -182,9 +182,14 @@ isEmpty($${target_prefix}.INCDIRS) { + # UIKit simulator platforms will see the device SDK's sysroot in + # QMAKE_DEFAULT_*DIRS, because they're handled in a single build pass. + darwin { +- # Clang doesn't pick up the architecture from the sysroot, and will +- # default to the host architecture, so we need to manually set it. +- cxx_flags += -arch $$QMAKE_APPLE_DEVICE_ARCHS ++ uikit { ++ # Clang doesn't automatically pick up the architecture, just because ++ # we're passing the iOS sysroot below, and we will end up building the ++ # test for the host architecture, resulting in linker errors when ++ # linking against the iOS libraries. We work around this by passing ++ # the architecture explicitly. ++ cxx_flags += -arch $$first(QMAKE_APPLE_DEVICE_ARCHS) ++ } + + uikit:macx-xcode: \ + cxx_flags += -isysroot $$sdk_path_device.value diff --git a/depends/patches/qt/fast_fixed_dtoa_no_optimize.patch b/depends/patches/qt/fast_fixed_dtoa_no_optimize.patch new file mode 100644 index 0000000000000..d4d6539f56dc4 --- /dev/null +++ b/depends/patches/qt/fast_fixed_dtoa_no_optimize.patch @@ -0,0 +1,20 @@ +Modify the optimisation flags for FastFixedDtoa. +This fixes a non-determinism issue in the asm produced for +this function when cross-compiling on x86_64 and aarch64 for +the arm-linux-gnueabihf HOST. + +--- a/qtbase/src/3rdparty/double-conversion/fixed-dtoa.h ++++ b/qtbase/src/3rdparty/double-conversion/fixed-dtoa.h +@@ -48,9 +48,12 @@ namespace double_conversion { + // + // This method only works for some parameters. If it can't handle the input it + // returns false. The output is null-terminated when the function succeeds. ++#pragma GCC push_options ++#pragma GCC optimize ("-O1") + bool FastFixedDtoa(double v, int fractional_count, + Vector buffer, int* length, int* decimal_point); + ++#pragma GCC pop_options + } // namespace double_conversion + + #endif // DOUBLE_CONVERSION_FIXED_DTOA_H_ diff --git a/depends/patches/qt/fix_qpainter_non_determinism.patch b/depends/patches/qt/fix_qpainter_non_determinism.patch deleted file mode 100644 index 44c45187c51ef..0000000000000 --- a/depends/patches/qt/fix_qpainter_non_determinism.patch +++ /dev/null @@ -1,63 +0,0 @@ -commit 2a8f7dc6ddfc414a66491522501c1574a1343ee1 -Author: Andrew Chow -Date: Sat Nov 21 01:11:04 2020 -0500 - - build: Fix determinism issue when building with Clang 8 - - When building Qt with LLVM/Clang 8 under -O3 (the default), we run into - a determinism issue in `qt_interset_spans`. The issue has been fixed for - LLVM/Clang 9, see - https://github.com/llvm/llvm-project/commit/db101864bdc938deb1d63fe4f7da761bd38e5cae - and https://reviews.llvm.org/D64601, however this fix was not backported - to 8.x. Once LLVM/Clang 9 is used, this patch can be dropped. - - The particular issue appears to be an optimization done by -O3 which - adds a temporary variable for `spans->y` in `qt_intersect_spans`. When - it does this, sometimes it chooses to use a 32-bit movs instruction - (movswl), and other times it chooses a 64-bit movs instruction (movswq). - By patching `qt_intersect_spans` to always make a temporary variable for - `spans->y`, we are able to sidestep this problem. - -diff --git a/qtbase/src/gui/painting/qpaintengine_raster.cpp b/qtbase/src/gui/painting/qpaintengine_raster.cpp -index 92ab6e8375..f018009e0b 100644 ---- a/qtbase/src/gui/painting/qpaintengine_raster.cpp -+++ b/qtbase/src/gui/painting/qpaintengine_raster.cpp -@@ -4128,22 +4128,23 @@ static const QSpan *qt_intersect_spans(const QClipData *clip, int *currentClip, - const QSpan *clipEnd = clip->m_spans + clip->count; - - while (available && spans < end ) { -+ const short spans_y = spans->y; - if (clipSpans >= clipEnd) { - spans = end; - break; - } -- if (clipSpans->y > spans->y) { -+ if (clipSpans->y > spans_y) { - ++spans; - continue; - } -- if (spans->y != clipSpans->y) { -- if (spans->y < clip->count && clip->m_clipLines[spans->y].spans) -- clipSpans = clip->m_clipLines[spans->y].spans; -+ if (spans_y != clipSpans->y) { -+ if (spans_y < clip->count && clip->m_clipLines[spans_y].spans) -+ clipSpans = clip->m_clipLines[spans_y].spans; - else - ++clipSpans; - continue; - } -- Q_ASSERT(spans->y == clipSpans->y); -+ Q_ASSERT(spans_y == clipSpans->y); - - int sx1 = spans->x; - int sx2 = sx1 + spans->len; -@@ -4162,7 +4163,7 @@ static const QSpan *qt_intersect_spans(const QClipData *clip, int *currentClip, - if (len) { - out->x = qMax(sx1, cx1); - out->len = qMin(sx2, cx2) - out->x; -- out->y = spans->y; -+ out->y = spans_y; - out->coverage = qt_div_255(spans->coverage * clipSpans->coverage); - ++out; - --available; - diff --git a/depends/patches/qt/mac-qmake.conf b/depends/patches/qt/mac-qmake.conf index 190ab7a160e49..e4bfaa1463e27 100644 --- a/depends/patches/qt/mac-qmake.conf +++ b/depends/patches/qt/mac-qmake.conf @@ -13,7 +13,6 @@ QMAKE_MAC_SDK.macosx.Path = $${MAC_SDK_PATH} QMAKE_MAC_SDK.macosx.platform_name = macosx QMAKE_MAC_SDK.macosx.SDKVersion = $${MAC_SDK_VERSION} QMAKE_MAC_SDK.macosx.PlatformPath = /phony -QMAKE_APPLE_DEVICE_ARCHS=x86_64 !host_build: QMAKE_CFLAGS += -target $${MAC_TARGET} !host_build: QMAKE_OBJECTIVE_CFLAGS += $$QMAKE_CFLAGS !host_build: QMAKE_CXXFLAGS += $$QMAKE_CFLAGS diff --git a/depends/patches/qt/no_sdk_version_check.patch b/depends/patches/qt/no_sdk_version_check.patch deleted file mode 100644 index b16635b572025..0000000000000 --- a/depends/patches/qt/no_sdk_version_check.patch +++ /dev/null @@ -1,20 +0,0 @@ -commit f5eb142cd04be2bc4ca610ed3b5b7e8ce3520ee3 -Author: fanquake -Date: Tue Jan 5 16:08:49 2021 +0800 - - Don't invoke macOS SDK version checking - - This tries to use xcrun which is not available when cross-compiling. - -diff --git a/qtbase/mkspecs/features/mac/default_post.prf b/qtbase/mkspecs/features/mac/default_post.prf -index 92a9112bca6..447e186eb26 100644 ---- a/qtbase/mkspecs/features/mac/default_post.prf -+++ b/qtbase/mkspecs/features/mac/default_post.prf -@@ -8,7 +8,6 @@ contains(TEMPLATE, .*app) { - !macx-xcode:if(isEmpty(BUILDS)|build_pass) { - # Detect changes to the platform SDK - QMAKE_EXTRA_VARIABLES += QMAKE_MAC_SDK QMAKE_MAC_SDK_VERSION QMAKE_XCODE_DEVELOPER_PATH -- QMAKE_EXTRA_INCLUDES += $$shell_quote($$PWD/sdk.mk) - } - - # Detect incompatible SDK versions diff --git a/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch b/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch new file mode 100644 index 0000000000000..0358bea6e9403 --- /dev/null +++ b/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch @@ -0,0 +1,17 @@ +The moc executable loops through headers on CPLUS_INCLUDE_PATH and stumbles +on the GCC internal _GLIBCXX_VISIBILITY macro. Tell it to ignore it as it is +not supposed to be looking there to begin with. + +Upstream report: https://bugreports.qt.io/browse/QTBUG-83160 + +diff --git a/qtbase/src/tools/moc/main.cpp b/qtbase/src/tools/moc/main.cpp +--- a/qtbase/src/tools/moc/main.cpp ++++ b/qtbase/src/tools/moc/main.cpp +@@ -188,6 +188,7 @@ int runMoc(int argc, char **argv) + dummyVariadicFunctionMacro.arguments += Symbol(0, PP_IDENTIFIER, "__VA_ARGS__"); + pp.macros["__attribute__"] = dummyVariadicFunctionMacro; + pp.macros["__declspec"] = dummyVariadicFunctionMacro; ++ pp.macros["_GLIBCXX_VISIBILITY"] = dummyVariadicFunctionMacro; + + QString filename; + QString output; diff --git a/doc/JSON-RPC-interface.md b/doc/JSON-RPC-interface.md index f220d78f02b56..e384fa0413359 100644 --- a/doc/JSON-RPC-interface.md +++ b/doc/JSON-RPC-interface.md @@ -60,7 +60,7 @@ RPC interface will be abused. are sent as clear text that can be read by anyone on your network path. Additionally, the RPC interface has not been hardened to withstand arbitrary Internet traffic, so changing the above settings - to expose it to the Internet (even using something like a Tor hidden + to expose it to the Internet (even using something like a Tor onion service) could expose you to unconsidered vulnerabilities. See `dashd -help` for more information about these settings and other settings described in this document. diff --git a/doc/README.md b/doc/README.md index 4a494b5bdcbb1..ef851a52e0b53 100644 --- a/doc/README.md +++ b/doc/README.md @@ -52,7 +52,6 @@ The Dash Core repo's [root README](/README.md) contains relevant information on - Source Code Documentation ***TODO*** - [Translation Process](translation_process.md) - [Translation Strings Policy](translation_strings_policy.md) -- [Travis CI](travis-ci.md) - [JSON-RPC Interface](JSON-RPC-interface.md) - [Unauthenticated REST Interface](REST-interface.md) - [Shared Libraries](shared-libraries.md) @@ -72,12 +71,13 @@ The Dash Core repo's [root README](/README.md) contains relevant information on - [dash.conf Configuration File](dash-conf.md) - [Files](files.md) - [Fuzz-testing](fuzzing.md) +- [I2P Support](i2p.md) +- [Init Scripts (systemd/upstart/openrc)](init.md) +- [PSBT support](psbt.md) - [Reduce Memory](reduce-memory.md) - [Reduce Traffic](reduce-traffic.md) - [Tor Support](tor.md) -- [Init Scripts (systemd/upstart/openrc)](init.md) - [ZMQ](zmq.md) -- [PSBT support](psbt.md) License --------------------- diff --git a/doc/build-cross.md b/doc/build-cross.md deleted file mode 100644 index 9c46b91785f69..0000000000000 --- a/doc/build-cross.md +++ /dev/null @@ -1,122 +0,0 @@ -Cross-compiliation of Dash Core -=============================== - -Dash Core can be cross-compiled on Linux to all other supported host systems. This is done by changing -the `HOST` parameter when building the dependencies and then specifying another `--prefix` directory when building Dash. - -The following instructions are only tested on Debian Stretch and Ubuntu Bionic. - -macOS Cross-compilation ------------------------- -Cross-compiling to macOS requires a few additional packages to be installed: - -```bash -$ sudo apt-get install python3-setuptools libcap-dev zlib1g-dev libbz2-dev cmake -``` - -Additionally, the Mac OSX SDK must be downloaded and extracted manually: - -```bash -$ mkdir -p depends/sdk-sources -$ mkdir -p depends/SDKs -$ curl https://bitcoincore.org/depends-sources/sdks/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz -o depends/sdk-sources/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz -$ tar -C depends/SDKs -xf depends/sdk-sources/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz -``` - -When building the dependencies, as described in [build-generic](build-generic.md), use - -```bash -$ make HOST=x86_64-apple-darwin19 -j4 -``` - -When building Dash Core, use - -```bash -$ ./configure --prefix=`pwd`/depends/x86_64-apple-darwin19 -``` - -Windows 64bit Cross-compilation -------------------------------- -The steps below can be performed on Ubuntu (including in a VM) or WSL. The depends system -will also work on other Linux distributions, however the commands for -installing the toolchain will be different. - -First, install the general dependencies: - - sudo apt update - sudo apt upgrade - sudo apt install build-essential libtool autotools-dev automake pkg-config bsdmainutils curl git python3 - -A host toolchain (`build-essential`) is necessary because some dependency -packages need to build host utilities that are used in the build process. - -See [dependencies.md](dependencies.md) for a complete overview. - -If you want to build the windows installer with `make deploy` you need [NSIS](https://nsis.sourceforge.io/Main_Page): - - sudo apt install nsis - -Acquire the source in the usual way: - - git clone https://github.com/dashpay/dash.git - cd dash - -### Building for 64-bit Windows - -The first step is to install the mingw-w64 cross-compilation tool chain: - - sudo apt install g++-mingw-w64-x86-64 - -Ubuntu Bionic 18.04 [1](#footnote1): - - sudo update-alternatives --config x86_64-w64-mingw32-g++ # Set the default mingw32 g++ compiler option to posix. - -Once the toolchain is installed the build steps are common: - -Note that for WSL the Dash Core source path MUST be somewhere in the default mount file system, for -example /usr/src/dash, AND not under /mnt/d/. If this is not the case the dependency autoconf scripts will fail. -This means you cannot use a directory that is located directly on the host Windows file system to perform the build. - -Build using: - - PATH=$(echo "$PATH" | sed -e 's/:\/mnt.*//g') # strip out problematic Windows %PATH% imported var - cd depends - make HOST=x86_64-w64-mingw32 - cd .. - ./autogen.sh # not required when building from tarball - CONFIG_SITE=$PWD/depends/x86_64-w64-mingw32/share/config.site ./configure --prefix=/ - make - -### Depends system - -For further documentation on the depends system see [README.md](../depends/README.md) in the depends directory. - -ARM-Linux Cross-compilation -------------------- -Cross-compiling to ARM-Linux requires a few additional packages to be installed: - -```bash -$ sudo apt-get install g++-arm-linux-gnueabihf -``` - -When building the dependencies, as described in [build-generic](build-generic.md), use - -```bash -$ make HOST=arm-linux-gnueabihf -j4 -``` - -When building Dash Core, use - -```bash -$ ./configure --prefix=`pwd`/depends/arm-linux-gnueabihf -``` - -Footnotes ---------- - -1: Starting from Ubuntu Xenial 16.04, both the 32 and 64 bit Mingw-w64 packages install two different -compiler options to allow a choice between either posix or win32 threads. The default option is win32 threads which is the more -efficient since it will result in binary code that links directly with the Windows kernel32.lib. Unfortunately, the headers -required to support win32 threads conflict with some of the classes in the C++11 standard library, in particular std::mutex. -It's not possible to build the Dash Core code using the win32 version of the Mingw-w64 cross compilers (at least not without -modifying headers in the Dash Core source code). diff --git a/doc/build-freebsd.md b/doc/build-freebsd.md new file mode 100644 index 0000000000000..a61fcd0871c19 --- /dev/null +++ b/doc/build-freebsd.md @@ -0,0 +1,136 @@ +# FreeBSD Build Guide + +**Updated for FreeBSD [12.2](https://www.freebsd.org/releases/12.2R/announce.html)** + +This guide describes how to build dashd, command-line utilities, and GUI on FreeBSD. + +## Dependencies + +The following dependencies are **required**: + + Library | Purpose | Description + ----------------------------------------------------------------------|------------|---------------------- + [autoconf](https://svnweb.freebsd.org/ports/head/devel/autoconf/) | Build | Automatically configure software source code + [automake](https://svnweb.freebsd.org/ports/head/devel/automake/) | Build | Generate makefile (requires autoconf) + [libtool](https://svnweb.freebsd.org/ports/head/devel/libtool/) | Build | Shared library support + [pkgconf](https://svnweb.freebsd.org/ports/head/devel/pkgconf/) | Build | Configure compiler and linker flags + [git](https://svnweb.freebsd.org/ports/head/devel/git/) | Clone | Version control system + [gmake](https://svnweb.freebsd.org/ports/head/devel/gmake/) | Compile | Generate executables + [boost-libs](https://svnweb.freebsd.org/ports/head/devel/boost-libs/) | Utility | Library for threading, data structures, etc + [libevent](https://svnweb.freebsd.org/ports/head/devel/libevent/) | Networking | OS independent asynchronous networking + + +The following dependencies are **optional**: + + Library | Purpose | Description + ---------------------------------------------------------------------------|------------------|---------------------- + [db5](https://svnweb.freebsd.org/ports/head/databases/db5/) | Berkeley DB | Wallet storage (only needed when wallet enabled) + [qt5](https://svnweb.freebsd.org/ports/head/devel/qt5/) | GUI | GUI toolkit (only needed when GUI enabled) + [gmp](https://svnweb.freebsd.org/ports/head/math/gmp/) | Optimized math routines | Arbitrary precision arithmetic library + [libqrencode](https://svnweb.freebsd.org/ports/head/graphics/libqrencode/) | QR codes in GUI | Generating QR codes (only needed when GUI enabled) + [libzmq4](https://svnweb.freebsd.org/ports/head/net/libzmq4/) | ZMQ notification | Allows generating ZMQ notifications (requires ZMQ version >= 4.0.0) + [sqlite3](https://svnweb.freebsd.org/ports/head/databases/sqlite3/) | SQLite DB | Wallet storage (only needed when wallet enabled) + [python3](https://svnweb.freebsd.org/ports/head/lang/python3/) | Testing | Python Interpreter (only needed when running the test suite) + + See [dependencies.md](dependencies.md) for a complete overview. + +## Preparation + +### 1. Install Required Dependencies +Install the required dependencies the usual way you [install software on FreeBSD](https://www.freebsd.org/doc/en/books/handbook/ports.html) - either with `pkg` or via the Ports collection. The example commands below use `pkg` which is usually run as `root` or via `sudo`. If you want to use `sudo`, and you haven't set it up: [use this guide](http://www.freebsdwiki.net/index.php/Sudo%2C_configuring) to setup `sudo` access on FreeBSD. + +```bash +pkg install autoconf automake boost-libs git gmake libevent libtool pkgconf + +``` + +### 2. Clone Dash Repo +Now that `git` and all the required dependencies are installed, let's clone the Dash Core repository to a directory. All build scripts and commands will run from this directory. +``` bash +git clone https://github.com/dashpay/dash.git +``` + +### 3. Install Optional Dependencies + +###### GMP + +```bash +pkg install gmp +``` + +#### Wallet Dependencies +It is not necessary to build wallet functionality to run dashd or the GUI. To enable legacy wallets, you must install `db5`. To enable [descriptor wallets](https://github.com/dashpay/dash/blob/master/doc/descriptors.md), `sqlite3` is required. Skip `db5` if you intend to *exclusively* use descriptor wallets + +###### Legacy Wallet Support +`db5` is required to enable support for legacy wallets. Skip if you don't intend to use legacy wallets + +```bash +pkg install db5 +``` + +###### Descriptor Wallet Support + +`sqlite3` is required to enable support for descriptor wallets. Skip if you don't intend to use descriptor wallets. +``` bash +pkg install sqlite3 +``` +--- + +#### GUI Dependencies +###### Qt5 + +Dash Core includes a GUI built with the cross-platform Qt Framework. To compile the GUI, we need to install `qt5`. Skip if you don't intend to use the GUI. +```bash +pkg install qt5 +``` +###### libqrencode + +The GUI can encode addresses in a QR Code. To build in QR support for the GUI, install `libqrencode`. Skip if not using the GUI or don't want QR code functionality. +```bash +pkg install libqrencode +``` +--- + +#### Test Suite Dependencies +There is an included test suite that is useful for testing code changes when developing. +To run the test suite (recommended), you will need to have Python 3 installed: + +```bash +pkg install python3 +``` +--- + +## Building Dash Core + +### 1. Configuration + +There are many ways to configure Dash Core, here are a few common examples: +##### Wallet (BDB + SQlite) Support, No GUI: +This explicitly enables legacy wallet support and disables the GUI. If `sqlite3` is installed, then descriptor wallet support will be built. +```bash +./autogen.sh +./configure --with-gui=no --with-incompatible-bdb \ + BDB_LIBS="-ldb_cxx-5" \ + BDB_CFLAGS="-I/usr/local/include/db5" \ + MAKE=gmake +``` + +##### Wallet (only SQlite) and GUI Support: +This explicitly enables the GUI and disables legacy wallet support. If `qt5` is not installed, this will throw an error. If `sqlite3` is installed then descriptor wallet functionality will be built. If `sqlite3` is not installed, then wallet functionality will be disabled. +```bash +./autogen.sh +./configure --without-bdb --with-gui=yes MAKE=gmake +``` +##### No Wallet or GUI +``` bash +./autogen.sh +./configure --without-wallet --with-gui=no MAKE=gmake +``` + +### 2. Compile +**Important**: Use `gmake` (the non-GNU `make` will exit with an error). + +```bash +gmake # use -jX here for parallelism +gmake check # Run tests if Python 3 is available +``` diff --git a/doc/build-generic.md b/doc/build-generic.md deleted file mode 100644 index 1bfb01abb220e..0000000000000 --- a/doc/build-generic.md +++ /dev/null @@ -1,82 +0,0 @@ -GENERIC BUILD NOTES -==================== -Some notes on how to build Dash Core based on the [depends](../depends/README.md) build system. - -Note on old build instructions ------------------------------- -In the past, the build documentation contained instructions on how to build Dash with system-wide installed dependencies -like BerkeleyDB 4.8, boost and Qt. Building this way is considered deprecated and only building with the `depends` prefix -is supported today. - -Required build tools and environment ------------------------------------- -Building the dependencies and Dash Core requires some essential build tools to be installed before. Please see -[build-unix](build-unix.md), [build-osx](build-osx.md) and [build-windows](build-windows.md) for details. - -Building dependencies ---------------------- -Dash inherited the `depends` folder from Bitcoin, which contains all dependencies required to build Dash. These -dependencies must be built before Dash can actually be built. To do so, perform the following: - -```bash -$ cd depends -$ make -j4 # Choose a good -j value, depending on the number of CPU cores available -$ cd .. -``` - -This will download and build all dependencies required to build Dash Core. Caching of build results will ensure that only -the packages are rebuilt which have changed since the last depends build. - -It is required to re-run the above commands from time to time when dependencies have been updated or added. If this is -not done, build failures might occur when building Dash. - -Please read the [depends](../depends/README.md) documentation for more details on supported hosts and configuration -options. If no host is specified (as in the above example) when calling `make`, the depends system will default to your -local host system. - -See [dependencies.md](dependencies.md) for a complete overview. - -Building Dash Core ---------------------- - -```bash -$ ./autogen.sh -$ ./configure --prefix=`pwd`/depends/ -$ make -$ make install # optional -``` - -Please replace `` with your local system's `host-platform-triplet`. The following triplets are usually valid: -- `i686-pc-linux-gnu` for Linux32 -- `x86_64-pc-linux-gnu` for Linux64 -- `x86_64-w64-mingw32` for Win64 -- `x86_64-apple-darwin19` for macOS -- `arm-linux-gnueabihf` for Linux ARM 32 bit -- `aarch64-linux-gnu` for Linux ARM 64 bit - -If you want to cross-compile for another platform, choose the appropriate `` and make sure to build the -dependencies with the same host before. - -If you want to build for the same host but different distro, add `--enable-glibc-back-compat LDFLAGS=-static-libstdc++` when calling `./configure`. - - -ccache ------- -`./configure` of Dash Core will autodetect the presence of ccache and enable use of it. To disable ccache, use -`./configure --prefix= --disable-ccache`. When installed and enabled, [ccache](https://ccache.samba.org/) will -cache build results on source->object level. - -The default maximum cache size is 5G, which might not be enough to cache multiple builds when switching Git branches -very often. It is advised to increase the maximum cache size: - -```bash -$ ccache -M20G -``` - -Additional Configure Flags --------------------------- -A list of additional configure flags can be displayed with: - -```bash -./configure --help -``` diff --git a/doc/build-netbsd.md b/doc/build-netbsd.md index 3fc9f6d19b378..14235c643b2be 100644 --- a/doc/build-netbsd.md +++ b/doc/build-netbsd.md @@ -19,6 +19,7 @@ automake boost git gmake +gmp libevent libtool pkg-config @@ -39,13 +40,13 @@ from ports, for the same reason as boost above (g++/libstd++ incompatibility). If you have to build it yourself, you can use [the installation script included in contrib/](/contrib/install_db4.sh) like so: -```shell +```bash ./contrib/install_db4.sh `pwd` ``` from the root of the repository. Then set `BDB_PREFIX` for the next section: -```shell +```bash export BDB_PREFIX="$PWD/db4" ``` @@ -54,24 +55,26 @@ export BDB_PREFIX="$PWD/db4" **Important**: Use `gmake` (the non-GNU `make` will exit with an error). With wallet: -``` +```bash ./autogen.sh ./configure --with-gui=no CPPFLAGS="-I/usr/pkg/include" \ LDFLAGS="-L/usr/pkg/lib" \ BOOST_CPPFLAGS="-I/usr/pkg/include" \ BOOST_LDFLAGS="-L/usr/pkg/lib" \ BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" \ - BDB_CFLAGS="-I${BDB_PREFIX}/include" + BDB_CFLAGS="-I${BDB_PREFIX}/include" \ + MAKE=gmake ``` Without wallet: -``` +```bash ./autogen.sh ./configure --with-gui=no --disable-wallet \ CPPFLAGS="-I/usr/pkg/include" \ LDFLAGS="-L/usr/pkg/lib" \ BOOST_CPPFLAGS="-I/usr/pkg/include" \ - BOOST_LDFLAGS="-L/usr/pkg/lib" + BOOST_LDFLAGS="-L/usr/pkg/lib" \ + MAKE=gmake ``` Build and run the tests: diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md new file mode 100644 index 0000000000000..9ad2dd006b5bf --- /dev/null +++ b/doc/build-openbsd.md @@ -0,0 +1,103 @@ +OpenBSD build guide +====================== +(updated for OpenBSD 6.2) + +This guide describes how to build dashd and command-line utilities on OpenBSD. + +OpenBSD is most commonly used as a server OS, so this guide does not contain instructions for building the GUI. + +Preparation +------------- + +Run the following as root to install the base dependencies for building: + +```bash +pkg_add git gmake libevent libtool +pkg_add autoconf # (select highest version, e.g. 2.69) +pkg_add automake # (select highest version, e.g. 1.15) +pkg_add python # (select highest version, e.g. 3.6) +pkg_add gmp +pkg_add boost + +git clone https://github.com/dashpay/dash.git +``` + +See [dependencies.md](dependencies.md) for a complete overview. + +**Important**: From OpenBSD 6.2 onwards a C++11-supporting clang compiler is +part of the base image, and while building it is necessary to make sure that this +compiler is used and not ancient g++ 4.2.1. This is done by appending +`CC=cc CXX=c++` to configuration commands. Mixing different compilers +within the same executable will result in linker errors. + +### Building BerkeleyDB + +BerkeleyDB is only necessary for the wallet functionality. To skip this, pass +`--disable-wallet` to `./configure` and skip to the next section. + +It is recommended to use Berkeley DB 4.8. You cannot use the BerkeleyDB library +from ports, for the same reason as boost above (g++/libstd++ incompatibility). +If you have to build it yourself, you can use [the installation script included +in contrib/](/contrib/install_db4.sh) like so: + +```bash +./contrib/install_db4.sh `pwd` CC=cc CXX=c++ +``` + +from the root of the repository. Then set `BDB_PREFIX` for the next section: + +```bash +export BDB_PREFIX="$PWD/db4" +``` + +### Building Dash Core + +**Important**: Use `gmake` (the non-GNU `make` will exit with an error). + +Preparation: +```bash +export AUTOCONF_VERSION=2.69 # replace this with the autoconf version that you installed +export AUTOMAKE_VERSION=1.15 # replace this with the automake version that you installed +./autogen.sh +``` +Make sure `BDB_PREFIX` is set to the appropriate path from the above steps. + +To configure with wallet: +```bash +./configure --with-gui=no CC=cc CXX=c++ \ + BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" \ + BDB_CFLAGS="-I${BDB_PREFIX}/include" \ + MAKE=gmake +``` + +To configure without wallet: +```bash +./configure --disable-wallet --with-gui=no CC=cc CXX=c++ MAKE=gmake +``` + +Build and run the tests: +```bash +gmake # use -jX here for parallelism +gmake check +``` + +Resource limits +------------------- + +If the build runs into out-of-memory errors, the instructions in this section +might help. + +The standard ulimit restrictions in OpenBSD are very strict: + + data(kbytes) 1572864 + +This, unfortunately, in some cases not enough to compile some `.cpp` files in the project, +(see issue [#6658](https://github.com/bitcoin/bitcoin/issues/6658)). +If your user is in the `staff` group the limit can be raised with: + + ulimit -d 3000000 + +The change will only affect the current shell and processes spawned by it. To +make the change system-wide, change `datasize-cur` and `datasize-max` in +`/etc/login.conf`, and reboot. + diff --git a/doc/build-osx.md b/doc/build-osx.md index b0309e187f116..5ac658fd865cf 100644 --- a/doc/build-osx.md +++ b/doc/build-osx.md @@ -17,29 +17,56 @@ When the popup appears, click `Install`. Then install [Homebrew](https://brew.sh). -## Base build dependencies - +## Dependencies ```shell -brew install automake libtool pkg-config libnatpmp sqlite +brew install automake berkeley-db4 libtool boost gmp miniupnpc pkg-config python qt libevent libnatpmp qrencode sqlite ``` +If you run into issues, check [Homebrew's troubleshooting page](https://docs.brew.sh/Troubleshooting). See [dependencies.md](dependencies.md) for a complete overview. -If you want to build the disk image with `make deploy` (.dmg / optional), you need RSVG: +## Berkeley DB + +It is recommended to use Berkeley DB 4.8. If you have to build it yourself, +you can use [the installation script included in contrib/](contrib/install_db4.sh) +like so: + ```shell -brew install librsvg +./contrib/install_db4.sh . ``` -If you run into issues, check [Homebrew's troubleshooting page](https://docs.brew.sh/Troubleshooting). +from the root of the repository. -## Building +**Note**: You only need Berkeley DB if the wallet is enabled (see the section *Disable-Wallet mode* below). -It's possible that your `PATH` environment variable contains some problematic strings, run -```shell -export PATH=$(echo "$PATH" | sed -e '/\\/!s/ /\\ /g') # fix whitespaces -``` +## Build Dash Core -Next, follow the instructions in [build-generic](build-generic.md) +1. Clone the Dash Core source code: + ```shell + git clone https://github.com/dashpay/dash + cd dash + ``` + +2. Build Dash Core: + + Configure and build the headless Dash Core binaries as well as the GUI (if Qt is found). + + You can disable the GUI build by passing `--without-gui` to configure. + ```shell + ./autogen.sh + ./configure + make + ``` + +3. It is recommended to build and run the unit tests: + ```shell + make check + ``` + +4. You can also create a `.dmg` that contains the `.app` bundle (optional): + ```shell + make deploy + ``` ## `disable-wallet` mode When the intention is to run only a P2P node without a wallet, Dash Core may be @@ -65,8 +92,7 @@ touch "/Users/${USER}/Library/Application Support/DashCore/dash.conf" chmod 600 "/Users/${USER}/Library/Application Support/DashCore/dash.conf" ``` -The first time you run dashd, it will start downloading the blockchain. This process could -take many hours, or even days on slower than average systems. +The first time you run dashd, it will start downloading the blockchain. This process could take many hours, or even days on slower than average systems. You can monitor the download process by looking at the debug.log file: ```shell @@ -76,7 +102,13 @@ tail -f $HOME/Library/Application\ Support/DashCore/debug.log ## Other commands: ```shell -./src/dashd -daemon # Starts the dash daemon. -./src/dash-cli --help # Outputs a list of command-line options. -./src/dash-cli help # Outputs a list of RPC commands when the daemon is running. +./src/dashd -daemon # Starts the dash daemon. +./src/dash-cli --help # Outputs a list of command-line options. +./src/dash-cli help # Outputs a list of RPC commands when the daemon is running. ``` + +## Notes + +* Tested on OS X 10.12 Sierra through macOS 10.15 Catalina on 64-bit Intel +processors only. +* Building with downloaded Qt binaries is not officially supported. See the notes in [#7714](https://github.com/bitcoin/bitcoin/issues/7714). diff --git a/doc/build-unix.md b/doc/build-unix.md index dfc6b46c76a9f..819dbf3c7cb29 100644 --- a/doc/build-unix.md +++ b/doc/build-unix.md @@ -2,49 +2,243 @@ UNIX BUILD NOTES ==================== Some notes on how to build Dash Core in Unix. -(For BSD specific instructions, see [build-openbsd.md](build-openbsd.md) and/or -[build-netbsd.md](build-netbsd.md)) +(For BSD specific instructions, see `build-*bsd.md` in this directory.) -Base build dependencies ------------------------ -Building the dependencies and Dash Core requires some essential build tools and libraries to be installed before. +Note +--------------------- +Always use absolute paths to configure and compile Dash Core and the dependencies. +For example, when specifying the path of the dependency: -Run the following commands to install required packages: + ../dist/configure --enable-cxx --disable-shared --with-pic --prefix=$BDB_PREFIX -##### Debian/Ubuntu: -```bash -$ sudo apt-get install curl build-essential libtool autotools-dev automake pkg-config python3 bsdmainutils bison libsqlite3-dev -``` +Here BDB_PREFIX must be an absolute path - it is defined using $(pwd) which ensures +the usage of the absolute path. -##### Fedora: -```bash -$ sudo dnf install gcc-c++ libtool make autoconf automake python3 libstdc++-static patch sqlite-devel -``` +To Build +--------------------- -##### Arch Linux: ```bash -$ pacman -S base-devel python3 +./autogen.sh +./configure +make +make install # optional ``` -##### Alpine Linux: -```sh -$ sudo apk --update --no-cache add autoconf automake curl g++ gcc libexecinfo-dev libexecinfo-static libtool make perl pkgconfig python3 patch linux-headers -``` +This will build dash-qt as well, if the dependencies are met. -##### FreeBSD/OpenBSD: -```bash -pkg_add gmake libtool -pkg_add autoconf # (select highest version, e.g. 2.69) -pkg_add automake # (select highest version, e.g. 1.15) -pkg_add python # (select highest version, e.g. 3.5) -``` +Dependencies +--------------------- + +These dependencies are required: + + Library | Purpose | Description + ------------|------------------|---------------------- + libboost | Utility | Library for threading, data structures, etc + libevent | Networking | OS independent asynchronous networking + +Optional dependencies: + + Library | Purpose | Description + ------------|------------------|---------------------- + gmp | Optimized math routines | Arbitrary precision arithmetic library + miniupnpc | UPnP Support | Firewall-jumping support + libnatpmp | NAT-PMP Support | Firewall-jumping support + libdb4.8 | Berkeley DB | Wallet storage (only needed when wallet enabled) + qt | GUI | GUI toolkit (only needed when GUI enabled) + libqrencode | QR codes in GUI | Optional for generating QR codes (only needed when GUI enabled) + libzmq3 | ZMQ notification | Optional, allows generating ZMQ notifications (requires ZMQ version >= 4.0.0) + sqlite3 | SQLite DB | Wallet storage (only needed when wallet enabled) For the versions used, see [dependencies.md](dependencies.md) -Building --------- +Memory Requirements +-------------------- + +C++ compilers are memory-hungry. It is recommended to have at least 1.5 GB of +memory available when compiling Dash Core. On systems with less, gcc can be +tuned to conserve memory with additional CXXFLAGS: + + + ./configure CXXFLAGS="--param ggc-min-expand=1 --param ggc-min-heapsize=32768" + + +## Linux Distribution Specific Instructions + +### Ubuntu & Debian + +#### Dependency Build Instructions + +Build requirements: + + sudo apt-get install build-essential libtool autotools-dev automake pkg-config bsdmainutils bison python3 + +Now, you can either build from self-compiled [depends](/depends/README.md) or install the required dependencies: + + sudo apt-get libevent-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev + +Berkeley DB is required for the wallet. + +Ubuntu and Debian have their own `libdb-dev` and `libdb++-dev` packages, but these will install +Berkeley DB 5.1 or later. This will break binary wallet compatibility with the distributed executables, which +are based on BerkeleyDB 4.8. If you do not care about wallet compatibility, +pass `--with-incompatible-bdb` to configure. + +Otherwise, you can build Berkeley DB [yourself](#berkeley-db). + +SQLite is required for the wallet: + + sudo apt install libsqlite3-dev + +To build Dash Core without wallet, see [*Disable-wallet mode*](#disable-wallet-mode) + +Optional port mapping libraries (see: `--with-miniupnpc`, `--enable-upnp-default`, and `--with-natpmp`, `--enable-natpmp-default`): + + sudo apt install libminiupnpc-dev libnatpmp-dev + +ZMQ dependencies (provides ZMQ API): + + sudo apt-get install libzmq3-dev + +GMP dependencies (provides platform-optimized routines): + + sudo apt-get install libgmp-dev + +GUI dependencies: + +If you want to build dash-qt, make sure that the required packages for Qt development +are installed. Qt 5 is necessary to build the GUI. +To build without GUI pass `--without-gui`. + +To build with Qt 5 you need the following: + + sudo apt-get install libqt5gui5 libqt5core5a libqt5dbus5 qttools5-dev qttools5-dev-tools + +Additionally, to support Wayland protocol for modern desktop environments: + + sudo apt install qtwayland5 + +libqrencode (optional) can be installed with: + + sudo apt-get install libqrencode-dev + +Once these are installed, they will be found by configure and a dash-qt executable will be +built by default. + + +### Fedora + +#### Dependency Build Instructions + +Build requirements: + + sudo dnf install gcc-c++ libtool make autoconf automake python3 + +Now, you can either build from self-compiled [depends](/depends/README.md) or install the required dependencies: + + sudo dnf install libevent-devel boost-devel + +Berkeley DB is required for the wallet: + + sudo dnf install libdb4-devel libdb4-cxx-devel + +Newer Fedora releases, since Fedora 33, have only `libdb-devel` and `libdb-cxx-devel` packages, but these will install +Berkeley DB 5.3 or later. This will break binary wallet compatibility with the distributed executables, which +are based on Berkeley DB 4.8. If you do not care about wallet compatibility, +pass `--with-incompatible-bdb` to configure. + +Otherwise, you can build Berkeley DB [yourself](#berkeley-db). + +SQLite is required for the wallet: + + sudo dnf install sqlite-devel + +To build Dash Core without wallet, see [*Disable-wallet mode*](#disable-wallet-mode) + +Optional port mapping libraries (see: `--with-miniupnpc`, `--enable-upnp-default`, and `--with-natpmp`, `--enable-natpmp-default`): + + sudo dnf install miniupnpc-devel libnatpmp-devel + +ZMQ dependencies (provides ZMQ API): + + sudo dnf install zeromq-devel + +GMP dependencies (provides platform-optimized routines): + + sudo dnf install gmp-devel + +GUI dependencies: + +If you want to build dash-qt, make sure that the required packages for Qt development +are installed. Qt 5 is necessary to build the GUI. +To build without GUI pass `--without-gui`. + +To build with Qt 5 you need the following: + + sudo dnf install qt5-qttools-devel qt5-qtbase-devel + +Additionally, to support Wayland protocol for modern desktop environments: + + sudo dnf install qt5-qtwayland + +libqrencode (optional) can be installed with: + + sudo dnf install qrencode-devel + +Once these are installed, they will be found by configure and a dash-qt executable will be +built by default. + +Notes +----- +The release is built with GCC and then "strip dashd" to strip the debug +symbols, which reduces the executable size by about 90%. + + +miniupnpc +--------- + +[miniupnpc](https://miniupnp.tuxfamily.org) may be used for UPnP port mapping. It can be downloaded from [here]( +https://miniupnp.tuxfamily.org/files/). UPnP support is compiled in and +turned off by default. See the configure options for upnp behavior desired: + + --without-miniupnpc No UPnP support miniupnp not required + --disable-upnp-default (the default) UPnP support turned off by default at runtime + --enable-upnp-default UPnP support turned on by default at runtime + +libnatpmp +--------- + +[libnatpmp](https://miniupnp.tuxfamily.org/libnatpmp.html) may be used for NAT-PMP port mapping. It can be downloaded +from [here](https://miniupnp.tuxfamily.org/files/). NAT-PMP support is compiled in and +turned off by default. See the configure options for NAT-PMP behavior desired: + + --without-natpmp No NAT-PMP support, libnatpmp not required + --disable-natpmp-default (the default) NAT-PMP support turned off by default at runtime + --enable-natpmp-default NAT-PMP support turned on by default at runtime + +Berkeley DB +----------- +It is recommended to use Berkeley DB 4.8. If you have to build it yourself, +you can use [the installation script included in contrib/](contrib/install_db4.sh) +like so: + +```shell +./contrib/install_db4.sh `pwd` +``` + +from the root of the repository. + +Otherwise, you can build Dash Core from self-compiled [depends](/depends/README.md). + +**Note**: You only need Berkeley DB if the wallet is enabled (see [*Disable-wallet mode*](#disable-wallet-mode)). + +Boost +----- +If you need to build Boost yourself: + + sudo su + ./bootstrap.sh + ./bjam install -Follow the instructions in [build-generic](build-generic.md) Security -------- @@ -54,8 +248,8 @@ This can be disabled with: Hardening Flags: - ./configure --prefix= --enable-hardening - ./configure --prefix= --disable-hardening + ./configure --enable-hardening + ./configure --disable-hardening Hardening enables the following features: @@ -97,7 +291,7 @@ Disable-wallet mode When the intention is to run only a P2P node without a wallet, Dash Core may be compiled in disable-wallet mode with: - ./configure --prefix= --disable-wallet + ./configure --disable-wallet In this case there is no dependency on Berkeley DB 4.8 and SQLite. @@ -109,55 +303,44 @@ A list of additional configure flags can be displayed with: ./configure --help -Building on FreeBSD --------------------- - -(TODO, this is untested, please report if it works and if changes to this documentation are needed) -Building on FreeBSD is basically the same as on Linux based systems, with the difference that you have to use `gmake` -instead of `make`. +Setup and Build Example: Arch Linux +----------------------------------- +This example lists the steps necessary to setup and build a command line only, non-wallet distribution of the latest changes on Arch Linux: -*Note on debugging*: The version of `gdb` installed by default is [ancient and considered harmful](https://wiki.freebsd.org/GdbRetirement). -It is not suitable for debugging a multi-threaded C++ program, not even for getting backtraces. Please install the package `gdb` and -use the versioned gdb command e.g. `gdb7111`. + pacman -S git base-devel boost libevent python + git clone https://github.com/dashpay/dash.git + cd dash/ + ./autogen.sh + ./configure --disable-wallet --without-gui --without-miniupnpc + make check -Building on OpenBSD -------------------- - -(TODO, this is untested, please report if it works and if changes to this documentation are needed) - -**Important**: From OpenBSD 6.2 onwards a C++11-supporting clang compiler is -part of the base image, and while building it is necessary to make sure that this -compiler is used and not ancient g++ 4.2.1. This is done by appending -`CC=cc CXX=c++` to configuration commands. Mixing different compilers -within the same executable will result in linker errors. +Note: +Enabling wallet support requires either compiling against a Berkeley DB newer than 4.8 (package `db`) using `--with-incompatible-bdb`, +or building and depending on a local version of Berkeley DB 4.8. The readily available Arch Linux packages are currently built using +`--with-incompatible-bdb` according to the [PKGBUILD](https://projects.archlinux.org/svntogit/community.git/tree/bitcoin/trunk/PKGBUILD). +As mentioned above, when maintaining portability of the wallet between the standard Dash Core distributions and independently built +node software is desired, Berkeley DB 4.8 must be used. -```bash -$ cd depends -$ make CC=cc CXX=c++ -$ cd .. -$ export AUTOCONF_VERSION=2.69 # replace this with the autoconf version that you installed -$ export AUTOMAKE_VERSION=1.15 # replace this with the automake version that you installed -$ ./autogen.sh -$ ./configure --prefix= CC=cc CXX=c++ -$ gmake # use -jX here for parallelism -``` -OpenBSD Resource limits +ARM Cross-compilation ------------------- -If the build runs into out-of-memory errors, the instructions in this section -might help. +These steps can be performed on, for example, an Ubuntu VM. The depends system +will also work on other Linux distributions, however the commands for +installing the toolchain will be different. + +Make sure you install the build requirements mentioned above. +Then, install the toolchain and curl: -The standard ulimit restrictions in OpenBSD are very strict: + sudo apt-get install g++-arm-linux-gnueabihf curl - data(kbytes) 1572864 +To build executables for ARM: -This, unfortunately, in some cases not enough to compile some `.cpp` files in the project, -(see issue [#6658](https://github.com/bitcoin/bitcoin/issues/6658)). -If your user is in the `staff` group the limit can be raised with: + cd depends + make HOST=arm-linux-gnueabihf NO_QT=1 + cd .. + ./configure --prefix=$PWD/depends/arm-linux-gnueabihf --enable-glibc-back-compat --enable-reduce-exports LDFLAGS=-static-libstdc++ + make - ulimit -d 3000000 -The change will only affect the current shell and processes spawned by it. To -make the change system-wide, change `datasize-cur` and `datasize-max` in -`/etc/login.conf`, and reboot. +For further documentation on the depends system see [README.md](../depends/README.md) in the depends directory. diff --git a/doc/build-windows.md b/doc/build-windows.md index 2f223936fecbb..2263880e5e5cd 100644 --- a/doc/build-windows.md +++ b/doc/build-windows.md @@ -5,7 +5,7 @@ Below are some notes on how to build Dash Core for Windows. The options known to work for building Dash Core on Windows are: -* On Linux, using the [Mingw-w64](https://mingw-w64.org/doku.php) cross compiler tool chain. Ubuntu Bionic 18.04 is required +* On Linux, using the [Mingw-w64](https://mingw-w64.org/doku.php) cross compiler tool chain. Ubuntu Focal 20.04 is required and is the platform used to build the Dash Core Windows release binaries. * On Windows, using [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/windows/wsl/about) and the Mingw-w64 cross compiler tool chain. @@ -38,7 +38,7 @@ To install WSL on Windows 10 with Fall Creators Update installed (version >= 162 * Enable 'Windows Subsystem for Linux' * Click 'OK' and restart if necessary 2. Install Ubuntu - * Open Microsoft Store and search for "Ubuntu 18.04" or use [this link](https://www.microsoft.com/store/productId/9N9TNGVNDL3Q) + * Open Microsoft Store and search for "Ubuntu 20.04" or use [this link](https://www.microsoft.com/store/productId/9MTTCL66CPXJ) * Click Install 3. Complete Installation * Open a cmd prompt and type "Ubuntu1804" @@ -51,7 +51,58 @@ recommended, but it is possible to compile the 32-bit version. Cross-compilation ------------------- -Follow the instructions for Windows in [build-cross](build-cross.md) +These steps can be performed on, for example, an Ubuntu VM. The depends system +will also work on other Linux distributions, however the commands for +installing the toolchain will be different. + +First, install the general dependencies: + + sudo apt-get install build-essential libtool autotools-dev automake pkg-config bsdmainutils bison curl + +A host toolchain (`build-essential`) is necessary because some dependency +packages (such as `protobuf`) need to build host utilities that are used in the +build process. + +## Building for 64-bit Windows + +To build executables for Windows 64-bit, install the following dependencies: + + sudo apt-get install g++-mingw-w64-x86-64 mingw-w64-x86-64-dev + +Then build using: + + cd depends + make HOST=x86_64-w64-mingw32 + cd .. + ./autogen.sh # not required when building from tarball + CONFIG_SITE=$PWD/depends/x86_64-w64-mingw32/share/config.site ./configure --prefix=/ + make + +## Building for 32-bit Windows + +To build executables for Windows 32-bit, install the following dependencies: + + sudo apt-get install g++-mingw-w64-i686 mingw-w64-i686-dev + +Additional WSL Note: WSL support for [launching Win32 applications](https://docs.microsoft.com/en-us/archive/blogs/wsl/windows-and-ubuntu-interoperability#launching-win32-applications-from-within-wsl) +results in `Autoconf` configure scripts being able to execute Windows Portable Executable files. This can cause +unexpected behaviour during the build, such as Win32 error dialogs for missing libraries. The recommended approach +is to temporarily disable WSL support for Win32 applications. + +Then build using: + + sudo bash -c "echo 0 > /proc/sys/fs/binfmt_misc/status" # Disable WSL support for Win32 applications. + cd depends + make HOST=i686-w64-mingw32 + cd .. + ./autogen.sh # not required when building from tarball + CONFIG_SITE=$PWD/depends/i686-w64-mingw32/share/config.site ./configure --prefix=/ + make + sudo bash -c "echo 1 > /proc/sys/fs/binfmt_misc/status" # Enable WSL support for Win32 applications. + +## Depends system + +For further documentation on the depends system see [README.md](../depends/README.md) in the depends directory. Installation ------------- diff --git a/doc/dependencies.md b/doc/dependencies.md index cc74b0f4edc76..31fb105ffe862 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -8,20 +8,19 @@ These are the dependencies currently used by Dash Core. You can find instruction | Berkeley DB | [4.8.30](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.x | No | | | | Boost | [1.73.0](https://www.boost.org/users/download/) | [1.64.0](https://github.com/bitcoin/bitcoin/pull/22320) | No | | | | Clang[ \* ](#note1) | | [5.0+](https://releases.llvm.org/download.html) (C++17 support) | | | | -| D-Bus | [1.10.18](https://cgit.freedesktop.org/dbus/dbus/tree/NEWS?h=dbus-1.10) | | No | Yes | | | Expat | [2.2.7](https://libexpat.github.io/) | | No | Yes | | | fontconfig | [2.12.1](https://www.freedesktop.org/software/fontconfig/release/) | | No | Yes | | | FreeType | [2.7.1](https://download.savannah.gnu.org/releases/freetype) | | No | | [Yes](https://github.com/dashpay/dash/blob/develop/depends/packages/qt.mk) (Android only) | | GCC | | [7+](https://gcc.gnu.org/) (C++17 support) | | | | | glibc | | [2.18](https://www.gnu.org/software/libc/) | | | | | | HarfBuzz-NG | | | | | [Yes](https://github.com/dashpay/dash/blob/develop/depends/packages/qt.mk) | -| libevent | [2.1.11-stable](https://github.com/libevent/libevent/releases) | [2.0.21](https://github.com/bitcoin/bitcoin/pull/18676) | No | | | +| libevent | [2.1.12-stable](https://github.com/libevent/libevent/releases) | [2.0.21](https://github.com/bitcoin/bitcoin/pull/18676) | No | | | | libnatpmp | git commit [4536032...](https://github.com/miniupnp/libnatpmp/tree/4536032ae32268a45c073a4d5e91bbab4534773a) | | No | | | | libpng | | | | | [Yes](https://github.com/dashpay/dash/blob/develop/depends/packages/qt.mk) | -| librsvg | | | | | | +| Linux Kernel | [N/A](https://www.kernel.org/) | 3.2.0 | | | | | MiniUPnPc | [2.2.2](https://miniupnp.tuxfamily.org/files) | | No | | | | PCRE | | | | | [Yes](https://github.com/dashpay/dash/blob/develop/depends/packages/qt.mk) | -| Python (tests) | | [3.5](https://www.python.org/downloads) | | | | +| Python (tests) | | [3.8](https://www.python.org/downloads) | | | | | qrencode | [3.4.4](https://fukuchi.org/works/qrencode) | | No | | | | Qt | [5.12.11](https://download.qt.io/official_releases/qt/) | [5.5.1](https://github.com/bitcoin/bitcoin/issues/13478) | No | | | | SQLite | [3.32.1](https://sqlite.org/download.html) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | | | | @@ -46,5 +45,4 @@ Some dependencies are not needed in all configurations. The following are some f * ZeroMQ is needed only with the `--with-zmq` option. #### Other -* librsvg is only needed if you need to run `make deploy` on (cross-compilation to) macOS. * Not-Qt-bundled zlib is required to build the [DMG tool](../contrib/macdeploy/README.md#deterministic-macos-dmg-notes) from the libdmg-hfsplus project. diff --git a/doc/descriptors.md b/doc/descriptors.md index 8a2a0f588b605..f8040e823d1f6 100644 --- a/doc/descriptors.md +++ b/doc/descriptors.md @@ -17,6 +17,7 @@ Supporting RPCs are: (`regtest` only, since v0.19). - `utxoupdatepsbt` takes as input descriptors to add information to the psbt (since v0.19). +- `createmultisig` and `addmultisigaddress` return descriptors as well (since v0.20) This document describes the language. For the specifics on usage, see the RPC documentation for the functions mentioned above. @@ -28,15 +29,18 @@ Output descriptors currently support: - Pay-to-pubkey-hash scripts (P2PKH), through the `pkh` function. - Pay-to-script-hash scripts (P2SH), through the `sh` function. - Multisig scripts, through the `multi` function. +- Multisig scripts where the public keys are sorted lexicographically, through the `sortedmulti` function. - Any type of supported address through the `addr` function. - Raw hex scripts through the `raw` function. - Public keys (compressed and uncompressed) in hex notation, or BIP32 extended pubkeys with derivation paths. ## Examples -- `pk(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)` represents a P2PK output. -- `multi(1,022f8bde4d1a07209355b4a7250a5c5128e88b84bddc619ab7cba8d569b240efe4,025cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc)` represents a bare *1-of-2* multisig. -- `pkh(xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw/1'/2)` refers to a single P2PKH output, using child key *1'/2* of the specified xpub. +- `pk(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)` describes a P2PK output with the specified public key. +- `combo(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)` describes any P2PK, P2PKH with the specified public key. +- `multi(1,022f8bde4d1a07209355b4a7250a5c5128e88b84bddc619ab7cba8d569b240efe4,025cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc)` describes a bare *1-of-2* multisig with the specified public key. +- `sortedmulti(1,022f8bde4d1a07209355b4a7250a5c5128e88b84bddc619ab7cba8d569b240efe4,025cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc)` describes a bare *1-of-2* multisig with keys sorted lexicographically in the resulting redeemScript. +- `pkh(xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw/1'/2)` describes a P2PKH output with child key *1'/2* of the specified xpub. - `pkh([d34db33f/44'/0'/0']xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/1/*)` describes a set of P2PKH outputs, but additionally specifies that the specified xpub is a child of a master with fingerprint `d34db33f`, and derived using path `44'/0'/0'`. ## Reference @@ -49,6 +53,7 @@ Descriptors consist of several types of expressions. The top level expression is - `sh(SCRIPT)` (top level only): P2SH embed the argument. - `combo(KEY)` (top level only): an alias for the collection of `pk(KEY)` and `pkh(KEY)`. - `multi(k,KEY_1,KEY_2,...,KEY_n)` (anywhere): k-of-n multisig script. +- `sortedmulti(k,KEY_1,KEY_2,...,KEY_n)` (anywhere): k-of-n multisig script with keys sorted lexicographically in the resulting script. - `addr(ADDR)` (top level only): the script which ADDR expands to. - `raw(HEX)` (top level only): the script whose hex encoding is HEX. @@ -91,10 +96,24 @@ not contain "p2" for brevity. Several pieces of software use multi-signature (multisig) scripts based on Bitcoin's OP_CHECKMULTISIG opcode. To support these, we introduce the -`multi(k,key_1,key_2,...,key_n)` function. It represents a *k-of-n* +`multi(k,key_1,key_2,...,key_n)` and `sortedmulti(k,key_1,key_2,...,key_n)` +functions. They represents a *k-of-n* multisig policy, where any *k* out of the *n* provided public keys must sign. +Key order is significant for `multi()`. A `multi()` expression describes a multisig script +with keys in the specified order, and in a search for TXOs, it will not match +outputs with multisig scriptPubKeys that have the same keys in a different +order. Also, to prevent a combinatorial explosion of the search space, if more +than one of the `multi()` key arguments is a BIP32 wildcard path ending in `/*` +or `*'`, the `multi()` expression only matches multisig scripts with the `i`th +child key from each wildcard path in lockstep, rather than scripts with any +combination of child keys from each wildcard path. + +Key order does not matter for `sortedmulti()`. `sortedmulti()` behaves in the same way +as `multi()` does but the keys are reordered in the resulting script such that they +are lexicographically ordered as described in BIP67. + ### BIP32 derived keys and chains Most modern wallet software and hardware uses keys that are derived using @@ -105,7 +124,7 @@ path consists of a sequence of 0 or more integers (in the range *0..231-1*) each optionally followed by `'` or `h`, and separated by `/` characters. The string may optionally end with the literal `/*` or `/*'` (or `/*h`) to refer to all unhardened or hardened -child keys instead. +child keys in a configurable range (by default `0-1000`, inclusive). Whenever a public key is described using a hardened derivation step, the script cannot be computed without access to the corresponding private @@ -150,7 +169,7 @@ steps, or for dumping wallet descriptors including private key material. In order to easily represent the sets of scripts currently supported by existing Dash Core wallets, a convenience function `combo` is -provided, which takes as input a public key, and constructs the P2PK and +provided, which takes as input a public key, and describes a set of P2PK and P2PKH scripts for that key. ### Checksums diff --git a/doc/developer-notes.md b/doc/developer-notes.md index 2c14d219af4b4..d48b8ec68a697 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -9,6 +9,7 @@ Developer Notes - [Coding Style (C++)](#coding-style-c) - [Coding Style (Python)](#coding-style-python) - [Coding Style (Doxygen-compatible comments)](#coding-style-doxygen-compatible-comments) + - [Generating Documentation](#generating-documentation) - [Development tips and tricks](#development-tips-and-tricks) - [Compiling for debugging](#compiling-for-debugging) - [Show sources in debugging](#show-sources-in-debugging) @@ -36,10 +37,14 @@ Developer Notes - [Source code organization](#source-code-organization) - [GUI](#gui) - [Subtrees](#subtrees) + - [Upgrading LevelDB](#upgrading-leveldb) + - [File Descriptor Counts](#file-descriptor-counts) + - [Consensus Compatibility](#consensus-compatibility) - [Scripted diffs](#scripted-diffs) - [Suggestions and examples](#suggestions-and-examples) - [Release notes](#release-notes) - [RPC interface guidelines](#rpc-interface-guidelines) + - [Internal interface guidelines](#internal-interface-guidelines) @@ -149,12 +154,17 @@ For example, to describe a function use: ```c++ /** - * ... text ... - * @param[in] arg1 A description - * @param[in] arg2 Another argument description - * @pre Precondition for function... + * ... Description ... + * + * @param[in] arg1 input description... + * @param[in] arg2 input description... + * @param[out] arg3 output description... + * @return Return cases... + * @throws Error type and cases... + * @pre Pre-condition for function... + * @post Post-condition for function... */ -bool function(int arg1, const char *arg2) +bool function(int arg1, const char *arg2, std::string& arg3) ``` A complete list of `@xxx` commands can be found at http://www.stack.nl/~dimitri/doxygen/manual/commands.html. @@ -169,44 +179,66 @@ To describe a class, use the same construct above the class definition: * @see GetWarnings() */ class CAlert -{ ``` To describe a member or variable use: ```c++ -int var; //!< Detailed description after the member +//! Description before the member +int var; ``` or ```c++ -//! Description before the member -int var; +int var; //!< Description after the member ``` Also OK: ```c++ /// -/// ... text ... +/// ... Description ... /// bool function2(int arg1, const char *arg2) ``` -Not OK (used plenty in the current source, but not picked up): +Not picked up by Doxygen: ```c++ // -// ... text ... +// ... Description ... // ``` A full list of comment syntaxes picked up by Doxygen can be found at https://www.stack.nl/~dimitri/doxygen/manual/docblocks.html, but the above styles are favored. -Documentation can be generated with `make docs` and cleaned up with `make clean-docs`. The resulting files are located in `doc/doxygen/html`; open `index.html` to view the homepage. +Recommendations: -Before running `make docs`, you will need to install dependencies `doxygen` and `dot`. For example, on macOS via Homebrew: -``` -brew install graphviz doxygen -``` +- Avoiding duplicating type and input/output information in function + descriptions. + +- Use backticks (``) to refer to `argument` names in function and + parameter descriptions. + +- Backticks aren't required when referring to functions Doxygen already knows + about; it will build hyperlinks for these automatically. See + http://www.doxygen.nl/manual/autolink.html for complete info. + +- Avoid linking to external documentation; links can break. + +- Javadoc and all valid Doxygen comments are stripped from Doxygen source code + previews (`STRIP_CODE_COMMENTS = YES` in [Doxyfile.in](doc/Doxyfile.in)). If + you want a comment to be preserved, it must instead use `//` or `/* */`. + +### Generating Documentation + +The documentation can be generated with `make docs` and cleaned up with `make +clean-docs`. The resulting files are located in `doc/doxygen/html`; open +`index.html` in that directory to view the homepage. + +Before running `make docs`, you'll need to install these dependencies: + +Linux: `sudo apt install doxygen graphviz` + +MacOS: `brew install doxygen graphviz` Development tips and tricks --------------------------- @@ -1154,7 +1186,133 @@ A few guidelines for introducing and reviewing new RPC interfaces: new RPC is replacing a deprecated RPC, to avoid both RPCs confusingly showing up in the command list. +- Use *invalid* Dash addresses (e.g. in the constant array `EXAMPLE_ADDRESS`) for + `RPCExamples` help documentation. + + - *Rationale*: Prevent accidental transactions by users. + - Use the `UNIX_EPOCH_TIME` constant when describing UNIX epoch time or timestamps in the documentation. - *Rationale*: User-facing consistency. + +Internal interface guidelines +----------------------------- + +Internal interfaces between parts of the codebase that are meant to be +independent (node, wallet, GUI), are defined in +[`src/interfaces/`](../src/interfaces/). The main interface classes defined +there are [`interfaces::Chain`](../src/interfaces/chain.h), used by wallet to +access the node's latest chain state, +[`interfaces::Node`](../src/interfaces/node.h), used by the GUI to control the +node, and [`interfaces::Wallet`](../src/interfaces/wallet.h), used by the GUI +to control an individual wallet. There are also more specialized interface +types like [`interfaces::Handler`](../src/interfaces/handler.h) +[`interfaces::ChainClient`](../src/interfaces/chain.h) passed to and from +various interface methods. + +Interface classes are written in a particular style so node, wallet, and GUI +code doesn't need to run in the same process, and so the class declarations +work more easily with tools and libraries supporting interprocess +communication: + +- Interface classes should be abstract and have methods that are [pure + virtual](https://en.cppreference.com/w/cpp/language/abstract_class). This + allows multiple implementations to inherit from the same interface class, + particularly so one implementation can execute functionality in the local + process, and other implementations can forward calls to remote processes. + +- Interface method definitions should wrap existing functionality instead of + implementing new functionality. Any substantial new node or wallet + functionality should be implemented in [`src/node/`](../src/node/) or + [`src/wallet/`](../src/wallet/) and just exposed in + [`src/interfaces/`](../src/interfaces/) instead of being implemented there, + so it can be more modular and accessible to unit tests. + +- Interface method parameter and return types should either be serializable or + be other interface classes. Interface methods shouldn't pass references to + objects that can't be serialized or accessed from another process. + + Examples: + + ```c++ + // Good: takes string argument and returns interface class pointer + virtual unique_ptr loadWallet(std::string filename) = 0; + + // Bad: returns CWallet reference that can't be used from another process + virtual CWallet& loadWallet(std::string filename) = 0; + ``` + + ```c++ + // Good: accepts and returns primitive types + virtual bool findBlock(const uint256& hash, int& out_height, int64_t& out_time) = 0; + + // Bad: returns pointer to internal node in a linked list inaccessible to + // other processes + virtual const CBlockIndex* findBlock(const uint256& hash) = 0; + ``` + + ```c++ + // Good: takes plain callback type and returns interface pointer + using TipChangedFn = std::function; + virtual std::unique_ptr handleTipChanged(TipChangedFn fn) = 0; + + // Bad: returns boost connection specific to local process + using TipChangedFn = std::function; + virtual boost::signals2::scoped_connection connectTipChanged(TipChangedFn fn) = 0; + ``` + +- For consistency and friendliness to code generation tools, interface method + input and inout parameters should be ordered first and output parameters + should come last. + + Example: + + ```c++ + // Good: error output param is last + virtual bool broadcastTransaction(const CTransactionRef& tx, CAmount max_fee, std::string& error) = 0; + + // Bad: error output param is between input params + virtual bool broadcastTransaction(const CTransactionRef& tx, std::string& error, CAmount max_fee) = 0; + ``` + +- For friendliness to code generation tools, interface methods should not be + overloaded: + + Example: + + ```c++ + // Good: method names are unique + virtual bool disconnectByAddress(const CNetAddr& net_addr) = 0; + virtual bool disconnectById(NodeId id) = 0; + + // Bad: methods are overloaded by type + virtual bool disconnect(const CNetAddr& net_addr) = 0; + virtual bool disconnect(NodeId id) = 0; + ``` + +- For consistency and friendliness to code generation tools, interface method + names should be `lowerCamelCase` and standalone function names should be + `UpperCamelCase`. + + Examples: + + ```c++ + // Good: lowerCamelCase method name + virtual void blockConnected(const CBlock& block, int height) = 0; + + // Bad: uppercase class method + virtual void BlockConnected(const CBlock& block, int height) = 0; + ``` + + ```c++ + // Good: UpperCamelCase standalone function name + std::unique_ptr MakeNode(LocalInit& init); + + // Bad: lowercase standalone function + std::unique_ptr makeNode(LocalInit& init); + ``` + + Note: This last convention isn't generally followed outside of + [`src/interfaces/`](../src/interfaces/), though it did come up for discussion + before in [#14635](https://github.com/bitcoin/bitcoin/pull/14635). diff --git a/doc/files.md b/doc/files.md index 99f94e7ae143d..5a517a9088df9 100644 --- a/doc/files.md +++ b/doc/files.md @@ -8,6 +8,10 @@ - [Multi-wallet environment](#multi-wallet-environment) + - [Berkeley DB database based wallets](#berkeley-db-database-based-wallets) + + - [SQLite database based wallets](#sqlite-database-based-wallets) + - [GUI settings](#gui-settings) - [Legacy subdirectories and files](#legacy-subdirectories-and-files) @@ -61,7 +65,8 @@ Subdirectory | File(s) | Description `./` | `fee_estimates.dat` | Stores statistics used to estimate minimum transaction fees and priorities required for confirmation `./` | `guisettings.ini.bak` | Backup of former [GUI settings](#gui-settings) after `-resetguisettings` option is used `./` | `mempool.dat` | Dump of the mempool's transactions -`./` | `onion_v3_private_key` | Cached Tor hidden service private key for `-listenonion` option +`./` | `onion_v3_private_key` | Cached Tor onion service private key for `-listenonion` option +`./` | `i2p_private_key` | Private key that corresponds to our I2P address. When `-i2psam=` is specified the contents of this file is used to identify ourselves for making outgoing connections to I2P peers and possibly accepting incoming ones. Automatically generated if it does not exist. `./` | `peers.dat` | Peer IP address database (custom format) `./` | `settings.json` | Read-write settings set through GUI or RPC interfaces, augmenting manual settings from [dash.conf](dash-conf.md). File is created automatically if read-write settings storage is not disabled with `-nosettings` option. Path can be specified with `-settings` option `./` | `.cookie` | Session RPC authentication cookie; if used, created at start and deleted on shutdown; can be specified by `-rpccookiefile` option @@ -69,26 +74,36 @@ Subdirectory | File(s) | Description ## Multi-wallet environment -Wallets are Berkeley DB (BDB) databases: +Wallets are Berkeley DB (BDB) or SQLite databases. -Subdirectory | File(s) | Description --------------|-------------------|------------ -`database/` | BDB logging files | Part of BDB environment; created at start and deleted on shutdown; a user *must keep it as safe* as personal wallet `wallet.dat` -`./` | `db.log` | BDB error file -`./` | `wallet.dat` | Personal wallet with keys and transactions. May be either a Berkeley DB or SQLite database file. -`./` | `.walletlock` | Wallet lock file -`./` | `wallet.dat-journal` | SQLite Rollback Journal file for `wallet.dat`. Usually created at start and deleted on shutdown. A user *must keep it as safe* as the `wallet.dat` file. - -1. Each user-defined wallet named "wallet_name" resides in `wallets/wallet_name/` subdirectory. +1. Each user-defined wallet named "wallet_name" resides in the `wallets/wallet_name/` subdirectory. 2. The default (unnamed) wallet resides in `wallets/` subdirectory; if the latter does not exist, the wallet resides in the data directory. -3. A wallet database path can be specified by `-wallet` option. +3. A wallet database path can be specified with the `-wallet` option. 4. `wallet.dat` files must not be shared across different node instances, as that can result in key-reuse and double-spends due the lack of synchronization between instances. 5. Any copy or backup of the wallet should be done through a `backupwallet` call in order to update and lock the wallet, preventing any file corruption caused by updates during the copy. + +### Berkeley DB database based wallets + +Subdirectory | File(s) | Description +-------------|-------------------|------------- +`database/` | BDB logging files | Part of BDB environment; created at start and deleted on shutdown; a user *must keep it as safe* as personal wallet `wallet.dat` +`./` | `db.log` | BDB error file +`./` | `wallet.dat` | Personal wallet (a BDB database) with keys and transactions +`./` | `.walletlock` | BDB wallet lock file + +### SQLite database based wallets + +Subdirectory | File | Description +-------------|----------------------|------------- +`./` | `wallet.dat` | Personal wallet (a SQLite database) with keys and transactions +`./` | `wallet.dat-journal` | SQLite Rollback Journal file for `wallet.dat`. Usually created at start and deleted on shutdown. A user *must keep it as safe* as the `wallet.dat` file. + + ## GUI settings `dash-qt` uses [`QSettings`](https://doc.qt.io/qt-5/qsettings.html) class; this implies platform-specific [locations where application settings are stored](https://doc.qt.io/qt-5/qsettings.html#locations-where-application-settings-are-stored). diff --git a/doc/fuzzing.md b/doc/fuzzing.md index d12dee863998b..4ebbbdc934d38 100644 --- a/doc/fuzzing.md +++ b/doc/fuzzing.md @@ -16,7 +16,7 @@ $ FUZZ=process_message src/test/fuzz/fuzz # abort fuzzing using ctrl-c ``` -## Fuzzing harnesses, fuzzing output and fuzzing corpora +## Fuzzing harnesses and output [`process_message`](https://github.com/dashpay/dash/blob/develop/src/test/fuzz/process_message.cpp) is a fuzzing harness for the [`ProcessMessage(...)` function (`net_processing`)](https://github.com/dashpay/dash/blob/develop/src/net_processing.cpp). The available fuzzing harnesses are found in [`src/test/fuzz/`](https://github.com/dashpay/dash/tree/develop/src/test/fuzz). @@ -64,6 +64,8 @@ block^@M-^?M-^?M-^?M-^?M-^?nM-^?M-^? In this case the fuzzer managed to create a `block` message which when passed to `ProcessMessage(...)` increased coverage. +## Fuzzing corpora + The project's collection of seed corpora is found in the [`bitcoin-core/qa-assets`](https://github.com/bitcoin-core/qa-assets) repo. To fuzz `process_message` using the [`bitcoin-core/qa-assets`](https://github.com/bitcoin-core/qa-assets) seed corpus: @@ -81,6 +83,24 @@ INFO: seed corpus: files: 991 min: 1b max: 1858b total: 288291b rss: 150Mb … ``` +## Run without sanitizers for increased throughput + +Fuzzing on a harness compiled with `--with-sanitizers=address,fuzzer,undefined` is good for finding bugs. However, the very slow execution even under libFuzzer will limit the ability to find new coverage. A good approach is to perform occasional long runs without the additional bug-detectors (configure `--with-sanitizers=fuzzer`) and then merge new inputs into a corpus as described in the qa-assets repo (https://github.com/bitcoin-core/qa-assets/blob/main/.github/PULL_REQUEST_TEMPLATE.md). Patience is useful; even with improved throughput, libFuzzer may need days and 10s of millions of executions to reach deep/hard targets. + +## Reproduce a fuzzer crash reported by the CI + +- `cd` into the `qa-assets` directory and update it with `git pull qa-assets` +- locate the crash case described in the CI output, e.g. `Test unit written to + ./crash-1bc91feec9fc00b107d97dc225a9f2cdaa078eb6` +- make sure to compile with all sanitizers, if they are needed (fuzzing runs + more slowly with sanitizers enabled, but a crash should be reproducible very + quickly from a crash case) +- run the fuzzer with the case number appended to the seed corpus path: + `FUZZ=process_message src/test/fuzz/fuzz + qa-assets/fuzz_seed_corpus/process_message/1bc91feec9fc00b107d97dc225a9f2cdaa078eb6` + +## Submit improved coverage + If you find coverage increasing inputs when fuzzing you are highly encouraged to submit them for inclusion in the [`bitcoin-core/qa-assets`](https://github.com/bitcoin-core/qa-assets) repo. Every single pull request submitted against the Dash Core repo is automatically tested against all inputs in the [`bitcoin-core/qa-assets`](https://github.com/bitcoin-core/qa-assets) repo. Contributing new coverage increasing inputs is an easy way to help make Dash Core more robust. diff --git a/doc/gitian-building.md b/doc/gitian-building.md index c418e59b974f4..24bde926ff80a 100644 --- a/doc/gitian-building.md +++ b/doc/gitian-building.md @@ -331,7 +331,7 @@ Execute the following as user `debian`: ```bash cd gitian-builder -bin/make-base-vm --lxc --arch amd64 --suite bionic +bin/make-base-vm --lxc --arch amd64 --suite focal ``` There will be a lot of warnings printed during the build of the image. These can be ignored. @@ -383,7 +383,7 @@ Output from `gbuild` will look something like Resolving deltas: 100% (41590/41590), done. From https://github.com/dashpay/dash ... (new tags, new branch etc) - --- Building for bionic amd64 --- + --- Building for focal amd64 --- Stopping target if it is up Making a new image copy stdin: is not a tty @@ -432,14 +432,14 @@ So, if you use LXC: export PATH="$PATH":/path/to/gitian-builder/libexec export USE_LXC=1 cd /path/to/gitian-builder -./libexec/make-clean-vm --suite bionic --arch amd64 +./libexec/make-clean-vm --suite focal --arch amd64 -LXC_ARCH=amd64 LXC_SUITE=bionic on-target -u root apt-get update -LXC_ARCH=amd64 LXC_SUITE=bionic on-target -u root \ +LXC_ARCH=amd64 LXC_SUITE=focal on-target -u root apt-get update +LXC_ARCH=amd64 LXC_SUITE=focal on-target -u root \ -e DEBIAN_FRONTEND=noninteractive apt-get --no-install-recommends -y install \ $( sed -ne '/^packages:/,/[^-] .*/ {/^- .*/{s/"//g;s/- //;p}}' ../dash/contrib/gitian-descriptors/*|sort|uniq ) -LXC_ARCH=amd64 LXC_SUITE=bionic on-target -u root apt-get -q -y purge grub -LXC_ARCH=amd64 LXC_SUITE=bionic on-target -u root -e DEBIAN_FRONTEND=noninteractive apt-get -y dist-upgrade +LXC_ARCH=amd64 LXC_SUITE=focal on-target -u root apt-get -q -y purge grub +LXC_ARCH=amd64 LXC_SUITE=focal on-target -u root -e DEBIAN_FRONTEND=noninteractive apt-get -y dist-upgrade ``` And then set offline mode for apt-cacher-ng: diff --git a/doc/guix.md b/doc/guix.md new file mode 100644 index 0000000000000..85013bf4b2952 --- /dev/null +++ b/doc/guix.md @@ -0,0 +1,3 @@ +# Bootstrappable Dash Core Builds + +See [contrib/guix/README.md](../contrib/guix/README.md) diff --git a/doc/i2p.md b/doc/i2p.md new file mode 100644 index 0000000000000..2e94577787ba2 --- /dev/null +++ b/doc/i2p.md @@ -0,0 +1,87 @@ +# I2P support in Dash Core + +It is possible to run Dash Core as an +[I2P (Invisible Internet Project)](https://en.wikipedia.org/wiki/I2P) +service and connect to such services. + +This [glossary](https://geti2p.net/en/about/glossary) may be useful to get +started with I2P terminology. + +## Run Dash Core with an I2P router (proxy) + +A running I2P router (proxy) with [SAM](https://geti2p.net/en/docs/api/samv3) +enabled is required (there is an [official one](https://geti2p.net) and +[a few alternatives](https://en.wikipedia.org/wiki/I2P#Routers)). Notice the IP +address and port the SAM proxy is listening to; usually, it is +`127.0.0.1:7656`. Once it is up and running with SAM enabled, use the following +Dash Core options: + +``` +-i2psam= + I2P SAM proxy to reach I2P peers and accept I2P connections (default: + none) + +-i2pacceptincoming + If set and -i2psam is also set then incoming I2P connections are + accepted via the SAM proxy. If this is not set but -i2psam is set + then only outgoing connections will be made to the I2P network. + Ignored if -i2psam is not set. Listening for incoming I2P + connections is done through the SAM proxy, not by binding to a + local address and port (default: 1) +``` + +In a typical situation, this suffices: + +``` +dashd -i2psam=127.0.0.1:7656 +``` + +The first time Dash Core connects to the I2P router, its I2P address (and +corresponding private key) will be automatically generated and saved in a file +named `i2p_private_key` in the Dash Core data directory. + +## Additional configuration options related to I2P + +You may set the `debug=i2p` config logging option to have additional +information in the debug log about your I2P configuration and connections. Run +`dash-cli help logging` for more information. + +It is possible to restrict outgoing connections in the usual way with +`onlynet=i2p`. I2P support was added to Dash Core in version 22.0 (mid 2021) +and there may be fewer I2P peers than Tor or IP ones. Therefore, using +`onlynet=i2p` alone (without other `onlynet=`) may make a node more susceptible +to [Sybil attacks](https://en.dash.it/wiki/Weaknesses#Sybil_attack). Use +`dash-cli -addrinfo` to see the number of I2P addresses known to your node. + +## I2P related information in Dash Core + +There are several ways to see your I2P address in Dash Core: +- in the debug log (grep for `AddLocal`, the I2P address ends in `.b32.i2p`) +- in the output of the `getnetworkinfo` RPC in the "localaddresses" section +- in the output of `dash-cli -netinfo` peer connections dashboard + +To see which I2P peers your node is connected to, use `dash-cli -netinfo 4` +or the `getpeerinfo` RPC (e.g. `dash-cli getpeerinfo`). + +To see which I2P addresses your node knows, use the `getnodeaddresses 0 i2p` +RPC. + +## Compatibility + +Dash Core uses the [SAM v3.1](https://geti2p.net/en/docs/api/samv3) protocol +to connect to the I2P network. Any I2P router that supports it can be used. + +## Ports in I2P and Dash Core + +Dash Core uses the [SAM v3.1](https://geti2p.net/en/docs/api/samv3) +protocol. One particularity of SAM v3.1 is that it does not support ports, +unlike newer versions of SAM (v3.2 and up) that do support them and default the +port numbers to 0. From the point of view of peers that use newer versions of +SAM or other protocols that support ports, a SAM v3.1 peer is connecting to them +on port 0, from source port 0. + +To allow future upgrades to newer versions of SAM, Dash Core sets its +listening port to 0 when listening for incoming I2P connections and advertises +its own I2P address with port 0. Furthermore, it will not attempt to connect to +I2P addresses with a non-zero port number because with SAM v3.1 the destination +port (`TO_PORT`) is always set to 0 and is not in the control of Dash Core. diff --git a/doc/init.md b/doc/init.md index 4b4c5da565218..f5d2891912fff 100644 --- a/doc/init.md +++ b/doc/init.md @@ -59,11 +59,11 @@ Data directory: `/var/lib/dashd` PID file: `/var/run/dashd/dashd.pid` (OpenRC and Upstart) or `/run/dashd/dashd.pid` (systemd) Lock file: `/var/lock/subsys/dashd` (CentOS) -The configuration file, PID directory (if applicable) and data directory -should all be owned by the dashcore user and group. It is advised for security -reasons to make the configuration file and data directory only readable by the -dashcore user and group. Access to dash-cli and other dashd rpc clients -can then be controlled by group membership. +The PID directory (if applicable) and data directory should both be owned by the +dashcore user and group. It is advised for security reasons to make the +configuration file and data directory only readable by the dashcore user and +group. Access to dash-cli and other dashd rpc clients can then be +controlled by group membership. NOTE: When using the systemd .service file, the creation of the aforementioned directories and the setting of their permissions is automatically handled by diff --git a/doc/productivity.md b/doc/productivity.md index 7997eca073a87..aa949a0ea2c1b 100644 --- a/doc/productivity.md +++ b/doc/productivity.md @@ -12,7 +12,7 @@ Table of Contents * [Multiple working directories with `git worktrees`](#multiple-working-directories-with-git-worktrees) * [Interactive "dummy rebases" for fixups and execs with `git merge-base`](#interactive-dummy-rebases-for-fixups-and-execs-with-git-merge-base) * [Writing code](#writing-code) - * [Format C/C++/Protobuf diffs with `clang-format-diff.py`](#format-ccprotobuf-diffs-with-clang-format-diffpy) + * [Format C/C++ diffs with `clang-format-diff.py`](#format-cc-diffs-with-clang-format-diffpy) * [Format Python diffs with `yapf-diff.py`](#format-python-diffs-with-yapf-diffpy) * [Rebasing/Merging code](#rebasingmerging-code) * [More conflict context with `merge.conflictstyle diff3`](#more-conflict-context-with-mergeconflictstyle-diff3) @@ -119,13 +119,13 @@ You can also set up [upstream refspecs](#reference-prs-easily-with-refspecs) to Writing code ------------ -### Format C/C++/Protobuf diffs with `clang-format-diff.py` +### Format C/C++ diffs with `clang-format-diff.py` See [contrib/devtools/README.md](/contrib/devtools/README.md#clang-format-diff.py). ### Format Python diffs with `yapf-diff.py` -Usage is exactly the same as [`clang-format-diff.py`](#format-ccprotobuf-diffs-with-clang-format-diffpy). You can get it [here](https://github.com/MarcoFalke/yapf-diff). +Usage is exactly the same as [`clang-format-diff.py`](#format-cc-diffs-with-clang-format-diffpy). You can get it [here](https://github.com/MarcoFalke/yapf-diff). Rebasing/Merging code ------------- diff --git a/doc/reduce-traffic.md b/doc/reduce-traffic.md index 1dd733b659a50..5f135b1566f8c 100644 --- a/doc/reduce-traffic.md +++ b/doc/reduce-traffic.md @@ -23,7 +23,7 @@ longer serving historic blocks (blocks older than one week). Keep in mind that new nodes require other nodes that are willing to serve historic blocks. -Whitelisted peers will never be disconnected, although their traffic counts for +Peers with the `download` permission will never be disconnected, although their traffic counts for calculating the target. ## 2. Disable "listening" (`-listen=0`) diff --git a/doc/release-notes-11413.md b/doc/release-notes-11413.md new file mode 100644 index 0000000000000..165fbc8833eb3 --- /dev/null +++ b/doc/release-notes-11413.md @@ -0,0 +1,7 @@ +Updated or changed RPC +---------------------- + +The `fundrawtransaction`, `sendmany`, `sendtoaddress`, and `walletcreatefundedpsbt` +RPC commands have been updated to include two new fee estimation methods "DASH/kB" and "duff/B". +The target is the fee expressed explicitly in the given form. +In addition, the `estimate_mode` parameter is now case insensitive for all of the above RPC commands. diff --git a/doc/release-notes-16060.md b/doc/release-notes-16060.md new file mode 100644 index 0000000000000..7fdf9daf90efe --- /dev/null +++ b/doc/release-notes-16060.md @@ -0,0 +1,15 @@ +Low-level RPC changes +---------------------- + +- Soft fork reporting in the `getblockchaininfo` return object has been + updated. For full details, see the RPC help text. In summary: + - The `bip9_softforks` sub-object is no longer returned + - The `softforks` sub-object now returns an object keyed by soft fork name, + rather than an array + - Each softfork object in the `softforks` object contains a `type` value which + is either `buried` (for soft fork deployments where the activation height is + hard-coded into the client implementation), or `bip9` (for soft fork deployments + where activation is controlled by BIP 9 signaling). + +- `getblocktemplate` no longer returns a `rules` array containing `CSV` + (the BIP 9 deployments that are currently in active state). diff --git a/doc/release-notes-16185.md b/doc/release-notes-16185.md new file mode 100644 index 0000000000000..2567ebea40270 --- /dev/null +++ b/doc/release-notes-16185.md @@ -0,0 +1,6 @@ +RPC changes +----------- +The `gettransaction` RPC now accepts a third (boolean) argument `verbose`. If +set to `true`, a new `decoded` field will be added to the response containing +the decoded transaction. This field is equivalent to RPC `decoderawtransaction`, +or RPC `getrawtransaction` when `verbose` is passed. diff --git a/doc/release-notes-16524.md b/doc/release-notes-16524.md new file mode 100644 index 0000000000000..7000c9c665883 --- /dev/null +++ b/doc/release-notes-16524.md @@ -0,0 +1,8 @@ + +Low-level changes +================= + +Tests +--- + +- `-fallbackfee` was 0 (disabled) by default for the main chain, but 1000 by default for the test chains. Now it is 0 by default for all chains. Testnet and regtest users will have to add fallbackfee=1000 to their configuration if they weren't setting it and they want it to keep working like before. (bitcoin#16524) diff --git a/doc/release-notes-17004.md b/doc/release-notes-17004.md new file mode 100644 index 0000000000000..8789daeafe93f --- /dev/null +++ b/doc/release-notes-17004.md @@ -0,0 +1,31 @@ +P2P and network changes +----------------------- + +#### Removal of reject network messages from Dash Core (BIP61) + +The command line option to enable BIP61 (`-enablebip61`) has been removed. + +This feature has been disabled by default since Dash Core version 0.19.0. +Nodes on the network can not generally be trusted to send valid ("reject") +messages, so this should only ever be used when connected to a trusted node. + +Since Dash Core version 0.20.0 there are extra changes: + +The removal of BIP61 REJECT message support also has the following minor RPC +and logging implications: + +* `testmempoolaccept` and `sendrawtransaction` no longer return the P2P REJECT + code when a transaction is not accepted to the mempool. They still return the + verbal reject reason. + +* Log messages that previously reported the REJECT code when a transaction was + not accepted to the mempool now no longer report the REJECT code. The reason + for rejection is still reported. + +Updated RPCs +------------ + +- `testmempoolaccept` and `sendrawtransaction` no longer return the P2P REJECT + code when a transaction is not accepted to the mempool. See the Section + _Removal of reject network messages from Bitcoin Core (BIP61)_ for details on + the removal of BIP61 REJECT message support. diff --git a/doc/release-notes-17056.md b/doc/release-notes-17056.md new file mode 100644 index 0000000000000..23d5a8c8cdae6 --- /dev/null +++ b/doc/release-notes-17056.md @@ -0,0 +1,4 @@ +Low-level RPC Changes +=== + +- A new descriptor type `sortedmulti(...)` has been added to support multisig scripts where the public keys are sorted lexicographically in the resulting script. diff --git a/doc/release-notes-17437.md b/doc/release-notes-17437.md new file mode 100644 index 0000000000000..3edfd00a38c8c --- /dev/null +++ b/doc/release-notes-17437.md @@ -0,0 +1,5 @@ +Low-level RPC Changes +=== + +- The RPC gettransaction, listtransactions and listsinceblock responses now also +includes the height of the block that contains the wallet transaction, if any. diff --git a/doc/release-notes-17578.md b/doc/release-notes-17578.md new file mode 100644 index 0000000000000..664d17fd78dba --- /dev/null +++ b/doc/release-notes-17578.md @@ -0,0 +1,13 @@ +Deprecated or removed RPCs +-------------------------- + +- RPC `getaddressinfo` changes: + + - the `label` field has been deprecated in favor of the `labels` field and + will be removed in 0.21. It can be re-enabled in the interim by launching + with `-deprecatedrpc=label`. + + - the `labels` behavior of returning an array of JSON objects containing name + and purpose key/value pairs has been deprecated in favor of an array of + label names and will be removed in 0.21. The previous behavior can be + re-enabled in the interim by launching with `-deprecatedrpc=labelspurpose`. diff --git a/doc/release-notes-18466.md b/doc/release-notes-18466.md new file mode 100644 index 0000000000000..e46d5064a385f --- /dev/null +++ b/doc/release-notes-18466.md @@ -0,0 +1,10 @@ +Low-level RPC changes +--------------------- + +- Error codes have been updated to be more accurate for the following error cases (#18466): + - `signmessage` now returns RPC_INVALID_ADDRESS_OR_KEY (-5) if the + passed address is invalid. Previously returned RPC_TYPE_ERROR (-3). + - `verifymessage` now returns RPC_INVALID_ADDRESS_OR_KEY (-5) if the + passed address is invalid. Previously returned RPC_TYPE_ERROR (-3). + - `verifymessage` now returns RPC_TYPE_ERROR (-3) if the passed signature + is malformed. Previously returned RPC_INVALID_ADDRESS_OR_KEY (-5). diff --git a/doc/release-notes-18594.md b/doc/release-notes-18594.md new file mode 100644 index 0000000000000..66d8450d86372 --- /dev/null +++ b/doc/release-notes-18594.md @@ -0,0 +1,5 @@ +## CLI + +The `dash-cli -getinfo` command now displays the wallet name and balance for +each of the loaded wallets when more than one is loaded (e.g. in multiwallet +mode) and a wallet is not specified with `-rpcwallet`. (#18594) diff --git a/doc/release-notes-18733.md b/doc/release-notes-18733.md new file mode 100644 index 0000000000000..637d3ee496e6a --- /dev/null +++ b/doc/release-notes-18733.md @@ -0,0 +1,41 @@ +P2P and network changes +----------------------- + +Updated RPCs +------------ + +Changes to Wallet or GUI related RPCs can be found in the GUI or Wallet section below. + +New RPCs +-------- + +Build System +------------ + +Updated settings +---------------- + +Changes to Wallet or GUI related settings can be found in the GUI or Wallet section below. + +New settings +------------ + +Wallet +------ + +#### Wallet RPC changes + +- The `upgradewallet` RPC replaces the `-upgradewallet` command line option. + (#15761) +- The `settxfee` RPC will fail if the fee was set higher than the `-maxtxfee` + command line setting. The wallet will already fail to create transactions + with fees higher than `-maxtxfee`. (#18467) + +GUI changes +----------- + +Low-level changes +================= + +Tests +----- diff --git a/doc/release-notes-18807.md b/doc/release-notes-18807.md new file mode 100644 index 0000000000000..e0dcd1ecda96a --- /dev/null +++ b/doc/release-notes-18807.md @@ -0,0 +1,33 @@ +P2P and network changes +----------------------- + +- The mempool now tracks whether transactions submitted via the wallet or RPCs + have been successfully broadcast. Every 10-15 minutes, the node will try to + announce unbroadcast transactions until a peer requests it via a `getdata` + message or the transaction is removed from the mempool for other reasons. + The node will not track the broadcast status of transactions submitted to the + node using P2P relay. This version reduces the initial broadcast guarantees + for wallet transactions submitted via P2P to a node running the wallet. (#18038) + +Updated RPCs +------------ + +- `getmempoolinfo` now returns an additional `unbroadcastcount` field. The + mempool tracks locally submitted transactions until their initial broadcast + is acknowledged by a peer. This field returns the count of transactions + waiting for acknowledgement. + +- Mempool RPCs such as `getmempoolentry` and `getrawmempool` with + `verbose=true` now return an additional `unbroadcast` field. This indicates + whether initial broadcast of the transaction has been acknowledged by a + peer. `getmempoolancestors` and `getmempooldescendants` are also updated. + +Wallet +------ + +- To improve wallet privacy, the frequency of wallet rebroadcast attempts is + reduced from approximately once every 15 minutes to once every 12-36 hours. + To maintain a similar level of guarantee for initial broadcast of wallet + transactions, the mempool tracks these transactions as a part of the newly + introduced unbroadcast set. See the "P2P and network changes" section for + more information on the unbroadcast set. (#18038) diff --git a/doc/release-notes-19191.md b/doc/release-notes-19191.md new file mode 100644 index 0000000000000..8df29f9931461 --- /dev/null +++ b/doc/release-notes-19191.md @@ -0,0 +1,7 @@ +Updated settings +---------------- + +- A `download` permission has been extracted from the `noban` permission. For + compatibility, `noban` implies the `download` permission, but this may change + in future releases. Refer to the help of the affected settings `-whitebind` + and `-whitelist` for more details. (#19191) diff --git a/doc/release-notes-5225.md b/doc/release-notes-5225.md new file mode 100644 index 0000000000000..e6cdc296796af --- /dev/null +++ b/doc/release-notes-5225.md @@ -0,0 +1,10 @@ +Testnet Breaking Changes +------------------------ + +A new testnet only LLMQ has been added. This LLMQ is of the type LLMQ_25_67; this LLMQ is only active on testnet. +This LLMQ will not remove the LLMQ_100_67 from testnet; however that quorum (likely) will not form and will perform no role. +See the [diff](https://github.com/dashpay/dash/pull/5225/files#diff-e70a38a3e8c2a63ca0494627301a5c7042141ad301193f78338d97cb1b300ff9R451-R469) for specific parameters of the LLMQ. + +This LLMQ will become active at the height of 847000. **This will be a breaking change and a hard fork for testnet** +This LLMQ is not activated with the v19 hardfork; as such testnet will experience two hardforks. One at height 847000, +and the other to be determined by the BIP9 hard fork process. diff --git a/doc/release-notes-5273.md b/doc/release-notes-5273.md new file mode 100644 index 0000000000000..16bd0c88dcef0 --- /dev/null +++ b/doc/release-notes-5273.md @@ -0,0 +1,5 @@ +Added RPCs +-------- + +- `cleardiscouraged` clears all the already discouraged peers. + diff --git a/doc/release-notes-5338.md b/doc/release-notes-5338.md new file mode 100644 index 0000000000000..ba532311b8bb8 --- /dev/null +++ b/doc/release-notes-5338.md @@ -0,0 +1,4 @@ +Added RPCs +-------- + +- `protx listdiff` RPC returns a full deterministic masternode list diff between two heigts. \ No newline at end of file diff --git a/doc/release-notes-5377.md b/doc/release-notes-5377.md new file mode 100644 index 0000000000000..2213a7592f4fa --- /dev/null +++ b/doc/release-notes-5377.md @@ -0,0 +1,25 @@ +Updated RPCs +-------- + +- `protx diff` RPC returns a new field `quorumsCLSigs`. +This field is a list containing: a ChainLock signature and the list of corresponding quorum indexes in `newQuorums`. + +`MNLISTDIFF` P2P message +-------- + +Starting with protocol version `70230`, the following fields are added to the `MNLISTDIFF` after `newQuorums`. + +| Field | Type | Size | Description | +|--------------------|-----------------------|----------|---------------------------------------------------------------------| +| quorumsCLSigsCount | compactSize uint | 1-9 | Number of quorumsCLSigs elements | +| quorumsCLSigs | quorumsCLSigsObject[] | variable | CL Sig used to calculate members per quorum indexes (in newQuorums) | + +The content of `quorumsCLSigsObject`: + +| Field | Type | Size | Description | +|---------------|------------------|----------|---------------------------------------------------------------------------------------------| +| signature | BLSSig | 96 | ChainLock signature | +| indexSetCount | compactSize uint | 1-9 | Number of quorum indexes using the same `signature` for their member calculation | +| indexSet | uint16_t[] | variable | Quorum indexes corresponding in `newQuorums` using `signature` for their member calculation | + +Note: The `quorumsCLSigs` field in both RPC and P2P will only be populated after the v20 activation. \ No newline at end of file diff --git a/doc/release-notes-5447.md b/doc/release-notes-5447.md new file mode 100644 index 0000000000000..ac9e37c390881 --- /dev/null +++ b/doc/release-notes-5447.md @@ -0,0 +1,6 @@ +Updated RPCs +-------- + +- `masternodelist`: New mode `hpmn` filters only HPMNs. +- `protx list`: New type `hpmn` filters only HPMNs. + diff --git a/doc/release-notes.md b/doc/release-notes.md index 02523da7eabd6..34f1b2fb2a40b 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -115,8 +115,10 @@ See detailed [set of changes](https://github.com/dashpay/dash/compare/v19.1.0... Thanks to everyone who directly contributed to this release: +- Kittywhiskers Van Gogh (kittywhiskers) - Konstantin Akimov (knst) - Nathan Marley (nmarley) +- Odysseas Gabrielides (ogabrielides) - PastaPastaPasta - thephez - UdjinM6 diff --git a/doc/release-process.md b/doc/release-process.md index ce4bc0010d868..cce8dc728c32c 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -24,18 +24,17 @@ Before every major release: * Update [`src/chainparams.cpp`](/src/chainparams.cpp) m_assumed_blockchain_size and m_assumed_chain_state_size with the current size plus some overhead (see [this](#how-to-calculate-m_assumed_blockchain_size-and-m_assumed_chain_state_size) for information on how to calculate them). * Update `src/chainparams.cpp` chainTxData with statistics about the transaction count and rate. Use the output of the RPC `getchaintxstats`, see [this pull request](https://github.com/bitcoin/bitcoin/pull/12270) for an example. Reviewers can verify the results by running `getchaintxstats ` with the `window_block_count` and `window_last_block_hash` from your output. -* Update version of `contrib/gitian-descriptors/*.yml`: usually one'd want to do this on master after branching off the release - but be sure to at least do it before a new major release ### First time / New builders -If you're using the automated script (found in [contrib/gitian-build.py](/contrib/gitian-build.py)), then at this point you should run it with the "--setup" command. Otherwise ignore this. +Install Guix using one of the installation methods detailed in +[contrib/guix/INSTALL.md](/contrib/guix/INSTALL.md). Check out the source code in the following directory hierarchy. cd /path/to/your/toplevel/build - git clone https://github.com/dashpay/gitian.sigs.git + git clone https://github.com/dashpay/guix.sigs.git git clone https://github.com/dashpay/dash-detached-sigs.git - git clone https://github.com/devrandom/gitian-builder.git git clone https://github.com/dashpay/dash.git ### Dash Core maintainers/release engineers, suggestion for writing release notes @@ -52,114 +51,56 @@ Tag version (or release candidate) in git git tag -s v(new version, e.g. 0.12.3) -### Setup and perform Gitian builds +### Setup and perform Guix builds -If you're using the automated script (found in [contrib/gitian-build.py](/contrib/gitian-build.py)), then at this point you should run it with the "--build" command. Otherwise ignore this. +Checkout the Dash Core version you'd like to build: -Setup Gitian descriptors: - - pushd ./dash - export SIGNER="(your Gitian key, ie UdjinM6, Pasta, etc)" - export VERSION=(new version, e.g. 0.12.3) - git fetch - git checkout v${VERSION} - popd - -Ensure your gitian.sigs are up-to-date if you wish to gverify your builds against other Gitian signatures. - - pushd ./gitian.sigs - git pull - popd - -Ensure gitian-builder is up-to-date: - - pushd ./gitian-builder - git pull - popd - - -### Fetch and create inputs: (first time, or when dependency versions change) - - pushd ./gitian-builder - mkdir -p inputs - wget -O inputs/osslsigncode-2.0.tar.gz https://github.com/mtrojnar/osslsigncode/archive/2.0.tar.gz - echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c - popd - -Create the macOS SDK tarball, see the [macOS build instructions](build-osx.md#deterministic-macos-dmg-notes) for details, and copy it into the inputs directory. - -### Optional: Seed the Gitian sources cache and offline git repositories - -NOTE: Gitian is sometimes unable to download files. If you have errors, try the step below. - -By default, Gitian will fetch source files as needed. To cache them ahead of time, make sure you have checked out the tag you want to build in dash, then: - - pushd ./gitian-builder - make -C ../dash/depends download SOURCES_PATH=`pwd`/cache/common - popd - -Only missing files will be fetched, so this is safe to re-run for each build. - -NOTE: Offline builds must use the --url flag to ensure Gitian fetches only from local URLs. For example: - - pushd ./gitian-builder - ./bin/gbuild --url dash=/path/to/dash,signature=/path/to/sigs {rest of arguments} - popd - -The gbuild invocations below DO NOT DO THIS by default. - -### Build and sign Dash Core for Linux, Windows, and macOS: - - pushd ./gitian-builder - ./bin/gbuild --num-make 2 --memory 3000 --commit dash=v${VERSION} ../dash/contrib/gitian-descriptors/gitian-linux.yml - ./bin/gsign --signer "$SIGNER" --release ${VERSION}-linux --destination ../gitian.sigs/ ../dash/contrib/gitian-descriptors/gitian-linux.yml - mv build/out/dash-*.tar.gz build/out/src/dash-*.tar.gz ../ +```sh +pushd ./dash +export SIGNER='(your builder key, ie UdjinM6, Pasta, etc)' +export VERSION='(new version, e.g. 20.0.0)' +git fetch "v${VERSION}" +git checkout "v${VERSION}" +popd +``` - ./bin/gbuild --num-make 2 --memory 3000 --commit dash=v${VERSION} ../dash/contrib/gitian-descriptors/gitian-win.yml - ./bin/gsign --signer "$SIGNER" --release ${VERSION}-win-unsigned --destination ../gitian.sigs/ ../dash/contrib/gitian-descriptors/gitian-win.yml - mv build/out/dash-*-win-unsigned.tar.gz inputs/dash-win-unsigned.tar.gz - mv build/out/dash-*.zip build/out/dash-*.exe ../ +Ensure your guix.sigs are up-to-date if you wish to `guix-verify` your builds +against other `guix-attest` signatures. - ./bin/gbuild --num-make 2 --memory 3000 --commit dash=v${VERSION} ../dash/contrib/gitian-descriptors/gitian-osx.yml - ./bin/gsign --signer "$SIGNER" --release ${VERSION}-osx-unsigned --destination ../gitian.sigs/ ../dash/contrib/gitian-descriptors/gitian-osx.yml - mv build/out/dash-*-osx-unsigned.tar.gz inputs/dash-osx-unsigned.tar.gz - mv build/out/dash-*.tar.gz build/out/dash-*.dmg ../ - popd +```sh +git -C ./guix.sigs pull +``` -Build output expected: +### Create the macOS SDK tarball: (first time, or when SDK version changes) - 1. source tarball (`dash-${VERSION}.tar.gz`) - 2. linux 32-bit and 64-bit dist tarballs (`dash-${VERSION}-linux[32|64].tar.gz`) - 3. windows 32-bit and 64-bit unsigned installers and dist zips (`dash-${VERSION}-win[32|64]-setup-unsigned.exe`, `dash-${VERSION}-win[32|64].zip`) - 4. macOS unsigned installer and dist tarball (`dash-${VERSION}-osx-unsigned.dmg`, `dash-${VERSION}-osx64.tar.gz`) - 5. Gitian signatures (in `gitian.sigs/${VERSION}-/(your Gitian key)/`) +Create the macOS SDK tarball, see the [macOS build +instructions](build-osx.md#deterministic-macos-dmg-notes) for +details. -### Verify other gitian builders signatures to your own. (Optional) +### Build and attest to build outputs: -Add other gitian builders keys to your gpg keyring, and/or refresh keys. +Follow the relevant Guix README.md sections: +- [Building](/contrib/guix/README.md#building) +- [Attesting to build outputs](/contrib/guix/README.md#attesting-to-build-outputs) - gpg --import dash/contrib/gitian-keys/*.pgp - gpg --refresh-keys +### Verify other builders' signatures to your own. (Optional) -Verify the signatures +Add other builders keys to your gpg keyring, and/or refresh keys: See `../dash/contrib/builder-keys/README.md`. - pushd ./gitian-builder - ./bin/gverify -v -d ../gitian.sigs/ -r ${VERSION}-linux ../dash/contrib/gitian-descriptors/gitian-linux.yml - ./bin/gverify -v -d ../gitian.sigs/ -r ${VERSION}-win-unsigned ../dash/contrib/gitian-descriptors/gitian-win.yml - ./bin/gverify -v -d ../gitian.sigs/ -r ${VERSION}-osx-unsigned ../dash/contrib/gitian-descriptors/gitian-osx.yml - popd +Follow the relevant Guix README.md sections: +- [Verifying build output attestations](/contrib/guix/README.md#verifying-build-output-attestations) ### Next steps: -Commit your signature to gitian.sigs: +Commit your signature to guix.sigs: - pushd gitian.sigs - git add ${VERSION}-linux/"${SIGNER}" - git add ${VERSION}-win-unsigned/"${SIGNER}" - git add ${VERSION}-osx-unsigned/"${SIGNER}" - git commit -a - git push # Assuming you can push to the gitian.sigs tree - popd +```sh +pushd guix.sigs +git add "${VERSION}/${SIGNER}/noncodesigned.SHA256SUMS{,.asc}" +git commit -a +git push # Assuming you can push to the guix.sigs tree +popd +``` Codesigner only: Create Windows/macOS detached signatures: - Only one person handles codesigning. Everyone else should skip to the next step. @@ -171,7 +112,7 @@ Codesigner only: Sign the macOS binary: tar xf dashcore-osx-unsigned.tar.gz ./detached-sig-create.sh -s "Key ID" -o runtime Enter the keychain password and authorize the signature - Move signature-osx.tar.gz back to the gitian host + Move signature-osx.tar.gz back to the guix-build host Codesigner only: Sign the windows binaries: @@ -182,84 +123,67 @@ Codesigner only: Sign the windows binaries: Codesigner only: Commit the detached codesign payloads: - cd ~/dashcore-detached-sigs - checkout the appropriate branch for this release series - rm -rf * - tar xf signature-osx.tar.gz - tar xf signature-win.tar.gz - git add -A - git commit -m "point to ${VERSION}" - git tag -s v${VERSION} HEAD - git push the current branch and new tag +```sh +pushd ~/dashcore-detached-sigs +# checkout the appropriate branch for this release series +rm -rf * +tar xf signature-osx.tar.gz +tar xf signature-win.tar.gz +git add -A +git commit -m "point to ${VERSION}" +git tag -s "v${VERSION}" HEAD +git push the current branch and new tag +popd +``` Non-codesigners: wait for Windows/macOS detached signatures: - Once the Windows/macOS builds each have 3 matching signatures, they will be signed with their respective release keys. - Detached signatures will then be committed to the [dash-detached-sigs](https://github.com/dashpay/dash-detached-sigs) repository, which can be combined with the unsigned apps to create signed binaries. -Create (and optionally verify) the signed macOS binary: - - pushd ./gitian-builder - ./bin/gbuild -i --commit signature=v${VERSION} ../dash/contrib/gitian-descriptors/gitian-osx-signer.yml - ./bin/gsign --signer "$SIGNER" --release ${VERSION}-osx-signed --destination ../gitian.sigs/ ../dash/contrib/gitian-descriptors/gitian-osx-signer.yml - ./bin/gverify -v -d ../gitian.sigs/ -r ${VERSION}-osx-signed ../dash/contrib/gitian-descriptors/gitian-osx-signer.yml - mv build/out/dash-osx-signed.dmg ../dash-${VERSION}-osx.dmg - popd - -Create (and optionally verify) the signed Windows binaries: - - pushd ./gitian-builder - ./bin/gbuild -i --commit signature=v${VERSION} ../dash/contrib/gitian-descriptors/gitian-win-signer.yml - ./bin/gsign --signer "$SIGNER" --release ${VERSION}-win-signed --destination ../gitian.sigs/ ../dash/contrib/gitian-descriptors/gitian-win-signer.yml - ./bin/gverify -v -d ../gitian.sigs/ -r ${VERSION}-win-signed ../dash/contrib/gitian-descriptors/gitian-win-signer.yml - mv build/out/dash-*win64-setup.exe ../dash-${VERSION}-win64-setup.exe - popd +Create (and optionally verify) the codesigned outputs: +- [Codesigning](/contrib/guix/README.md#codesigning) Commit your signature for the signed macOS/Windows binaries: - pushd gitian.sigs - git add ${VERSION}-osx-signed/"${SIGNER}" - git add ${VERSION}-win-signed/"${SIGNER}" - git commit -m "Add ${SIGNER} ${VERSION} signed binaries signatures" - git push # Assuming you can push to the gitian.sigs tree - popd +```sh +pushd ./guix.sigs +git add "${VERSION}/${SIGNER}"/all.SHA256SUMS{,.asc} +git commit -m "Add ${SIGNER} ${VERSION} signed binaries signatures" +git push # Assuming you can push to the guix.sigs tree +popd +``` -### After 3 or more people have gitian-built and their results match: +### After 3 or more people have guix-built and their results match: -- Create `SHA256SUMS.asc` for the builds, and GPG-sign it: +Combine the `all.SHA256SUMS.asc` file from all signers into `SHA256SUMS.asc`: ```bash -sha256sum * > SHA256SUMS +cat "$VERSION"/*/all.SHA256SUMS.asc > SHA256SUMS.asc ``` -The list of files should be: -``` -dash-${VERSION}-aarch64-linux-gnu.tar.gz -dash-${VERSION}-riscv64-linux-gnu.tar.gz -dash-${VERSION}-x86_64-linux-gnu.tar.gz -dash-${VERSION}-osx64.tar.gz -dash-${VERSION}-osx.dmg -dash-${VERSION}.tar.gz -dash-${VERSION}-win64-setup.exe -dash-${VERSION}-win64.zip -``` -The `*-debug*` files generated by the Gitian build contain debug symbols -for troubleshooting by developers. It is assumed that anyone that is interested -in debugging can run Gitian to generate the files for themselves. To avoid -end-user confusion about which file to pick, as well as save storage -space *do not upload these to the dash.org server*. +- Upload to the dash.org server: + 1. The contents of each `./dash/guix-build-${VERSION}/output/${HOST}/` directory, except for + `*-debug*` files. -- GPG-sign it, delete the unsigned file: -``` -gpg --digest-algo sha256 --clearsign SHA256SUMS # outputs SHA256SUMS.asc -rm SHA256SUMS -``` -(the digest algorithm is forced to sha256 to avoid confusion of the `Hash:` header that GPG adds with the SHA256 used for the files) -Note: check that SHA256SUMS itself doesn't end up in SHA256SUMS, which is a spurious/nonsensical entry. + Guix will output all of the results into host subdirectories, but the SHA256SUMS + file does not include these subdirectories. In order for downloads via torrent + to verify without directory structure modification, all of the uploaded files + need to be in the same directory as the SHA256SUMS file. -- Upload zips and installers, as well as `SHA256SUMS.asc` from last step, to the dash.org server + The `*-debug*` files generated by the guix build contain debug symbols + for troubleshooting by developers. It is assumed that anyone that is + interested in debugging can run guix to generate the files for + themselves. To avoid end-user confusion about which file to pick, as well + as save storage space *do not upload these to the dash.org server*. -- Update dash.org + ```sh + find guix-build-${VERSION}/output/ -maxdepth 2 -type f -not -name "SHA256SUMS.part" -and -not -name "*debug*" -exec scp {} user@dash.org:/var/www/bin/dash-core-${VERSION} \; + ``` + + 2. The `SHA256SUMS` file + + 3. The `SHA256SUMS.asc` combined signature file you just created - Announce the release: @@ -275,6 +199,41 @@ Note: check that SHA256SUMS itself doesn't end up in SHA256SUMS, which is a spur - Celebrate +### MacOS Notarization + +#### Prerequisites +Make sure you have the latest Xcode installed on your macOS device. You can download it from the Apple Developer website. +You should have a valid Apple Developer ID under the team you are using which is necessary for the notarization process. +To avoid including your password as cleartext in a notarization script, you can provide a reference to a keychain item. You can add a new keychain item named AC_PASSWORD from the command line using the notarytool utility: +``` +xcrun notarytool store-credentials "AC_PASSWORD" --apple-id "AC_USERNAME" --team-id --password +``` + +#### Notarization +Open Terminal, and navigate to the location of the .dmg file. + +Then, run the following command to notarize the .dmg file: + +``` +xcrun notarytool submit dashcore-{version}-osx.dmg --keychain-profile "AC_PASSWORD" --wait +``` +Replace "{version}" with the version you are notarizing. This command uploads the .dmg file to Apple's notary service. + +The --wait option makes the command wait to return until the notarization process is complete. + +If the notarization process is successful, the notary service generates a log file URL. Please save this URL, as it contains valuable information regarding the notarization process. + +#### Notarization Validation + +After successfully notarizing the .dmg file, extract "Dash-Qt.app" from the .dmg. +To verify that the notarization process was successful, run the following command: + +``` +spctl -a -vv -t install Dash-Qt.app +``` +Replace "Dash-Qt.app" with the path to your .app file. This command checks whether your .app file passes Gatekeeper’s +checks. If the app is successfully notarized, the command line will include a line stating source=Notarized Developer ID. + ### Additional information #### How to calculate `m_assumed_blockchain_size` and `m_assumed_chain_state_size` diff --git a/doc/tor.md b/doc/tor.md index 8e6d6be52555f..5ca1843c1a61d 100644 --- a/doc/tor.md +++ b/doc/tor.md @@ -1,6 +1,6 @@ # TOR SUPPORT IN DASH CORE -It is possible to run Dash Core as a Tor hidden service, and connect to such services. +It is possible to run Dash Core as a Tor onion service, and connect to such services. The following directions assume you have a Tor proxy running on port 9050. Many distributions default to having a SOCKS proxy listening on port 9050, but others @@ -16,14 +16,19 @@ outgoing connections, but more is possible. -proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy server will be used to try to reach .onion addresses as well. + You need to use -noonion or -onion=0 to explicitly disable + outbound access to onion services. - -onion=ip:port Set the proxy server to use for Tor hidden services. You do not - need to set this if it's the same as -proxy. You can use -noonion - to explicitly disable access to hidden services. + -onion=ip:port Set the proxy server to use for Tor onion services. You do not + need to set this if it's the same as -proxy. You can use -onion=0 + to explicitly disable access to onion services. + Note: Only the -proxy option sets the proxy for DNS requests; + with -onion they will not route over Tor, so use -proxy if you + have privacy concerns. -listen When using -proxy, listening is disabled by default. If you want - to run a hidden service (see next section), you'll need to enable - it explicitly. + to manually configure an onion service (see section 3), you'll + need to enable it explicitly. -connect=X When behind a Tor proxy, you can specify .onion addresses instead -addnode=X of IP addresses or hostnames in these parameters. It requires @@ -33,7 +38,11 @@ outgoing connections, but more is possible. -onlynet=onion Make outgoing connections only to .onion addresses. Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple network types, e.g. - ipv4, ipv6, or onion. + ipv4, ipv6 or onion. If you use this option with values other + than onion you *cannot* disable onion connections; outgoing onion + connections will be enabled when you use -proxy or -onion. Use + -noonion or -onion=0 if you want to be sure there are no outbound + onion connections over the default proxy or your defined -proxy. An example how to start the client if the Tor proxy is running on local host on port 9050 and only allows .onion nodes to connect: @@ -44,8 +53,99 @@ In a typical situation, this suffices to run behind a Tor proxy: ./dashd -proxy=127.0.0.1:9050 +## 2. Automatically create a Dash Core onion service -## 2. Run a Dash Core hidden server +Dash Core makes use of Tor's control socket API to create and destroy +ephemeral onion services programmatically. This means that if Tor is running and +proper authentication has been configured, Dash Core automatically creates an +onion service to listen on. The goal is to increase the number of available +onion nodes. + +This feature is enabled by default if Dash Core is listening (`-listen`) and +it requires a Tor connection to work. It can be explicitly disabled with +`-listenonion=0`. If it is not disabled, it can be configured using the +`-torcontrol` and `-torpassword` settings. + +To see verbose Tor information in the dashd debug log, pass `-debug=tor`. + +### Control Port + +You may need to set up the Tor Control Port. On Linux distributions there may be +some or all of the following settings in `/etc/tor/torrc`, generally commented +out by default (if not, add them): + +``` +ControlPort 9051 +CookieAuthentication 1 +CookieAuthFileGroupReadable 1 +``` + +Add or uncomment those, save, and restart Tor (usually `systemctl restart tor` +or `sudo systemctl restart tor` on most systemd-based systems, including recent +Debian and Ubuntu, or just restart the computer). + +On some systems (such as Arch Linux), you may also need to add the following +line: + +``` +DataDirectoryGroupReadable 1 +``` + +### Authentication + +Connecting to Tor's control socket API requires one of two authentication +methods to be configured: cookie authentication or dashd's `-torpassword` +configuration option. + +#### Cookie authentication + +For cookie authentication, the user running dashd must have read access to +the `CookieAuthFile` specified in the Tor configuration. In some cases this is +preconfigured and the creation of an onion service is automatic. Don't forget to +use the `-debug=tor` dashd configuration option to enable Tor debug logging. + +If a permissions problem is seen in the debug log, e.g. `tor: Authentication +cookie /run/tor/control.authcookie could not be opened (check permissions)`, it +can be resolved by adding both the user running Tor and the user running +dashd to the same Tor group and setting permissions appropriately. + +On Debian-derived systems, the Tor group will likely be `debian-tor` and one way +to verify could be to list the groups and grep for a "tor" group name: + +``` +getent group | cut -d: -f1 | grep -i tor +``` + +You can also check the group of the cookie file. On most Linux systems, the Tor +auth cookie will usually be `/run/tor/control.authcookie`: + +``` +TORGROUP=$(stat -c '%G' /run/tor/control.authcookie) +``` + +Once you have determined the `${TORGROUP}` and selected the `${USER}` that will +run dashd, run this as root: + +``` +usermod -a -G ${TORGROUP} ${USER} +``` + +Then restart the computer (or log out) and log in as the `${USER}` that will run +dashd. + +#### `torpassword` authentication + +For the `-torpassword=password` option, the password is the clear text form that +was used when generating the hashed password for the `HashedControlPassword` +option in the Tor configuration file. + +The hashed password can be obtained with the command `tor --hash-password +password` (refer to the [Tor Dev +Manual](https://2019.www.torproject.org/docs/tor-manual.html.en) for more +details). + + +## 3. Manually create a Dash Core onion service If you configure your Tor system accordingly, it is possible to make your node also reachable from the Tor network. Add these lines to your /etc/tor/torrc (or equivalent @@ -53,11 +153,12 @@ config file): *Needed for Tor version 0.2.7.0 and older versions of Tor only. Fo versions of Tor see [Section 4](#4-automatically-listen-on-tor).* HiddenServiceDir /var/lib/tor/dashcore-service/ - HiddenServicePort 9999 127.0.0.1:9999 - HiddenServicePort 19999 127.0.0.1:19999 + HiddenServicePort 9999 127.0.0.1:9996 + HiddenServicePort 19999 127.0.0.1:19996 -The directory can be different of course, but (both) port numbers should be equal to -your dashd's P2P listen port (9999 by default). +The directory can be different of course, but virtual port numbers should be equal to +your dashd's P2P listen port (9999 by default), and target addresses and ports +should be equal to binding address and port for inbound Tor connections (127.0.0.1:9996 by default). -externalip=X You can tell Dash Core about its publicly reachable address using this option, and this can be a .onion address. Given the above @@ -100,7 +201,7 @@ for normal IPv4/IPv6 communication, use: ./dashd -onion=127.0.0.1:9050 -externalip=ssapp53tmftyjmjb.onion -discover -## 3. List of known Dash Core Tor relays +## 3.1. List of known Dash Core Tor relays Note: All these nodes are hosted by masternodehosting.com @@ -115,45 +216,10 @@ Note: All these nodes are hosted by masternodehosting.com * ys5upbdeotplam3y.onion * fijy6aikzxfea54i.onion +## 4. Privacy recommendations -## 4. Automatically listen on Tor - -Starting with Tor version 0.2.7.1 it is possible, through Tor's control socket -API, to create and destroy 'ephemeral' hidden services programmatically. -Dash Core has been updated to make use of this. - -This means that if Tor is running (and proper authentication has been configured), -Dash Core automatically creates a hidden service to listen on. This will positively -affect the number of available .onion nodes. - -This new feature is enabled by default if Dash Core is listening (`-listen`), and -requires a Tor connection to work. It can be explicitly disabled with `-listenonion=0` -and, if not disabled, configured using the `-torcontrol` and `-torpassword` settings. -To show verbose debugging information, pass `-debug=tor`. - -Connecting to Tor's control socket API requires one of two authentication methods to be -configured. It also requires the control socket to be enabled, e.g. put `ControlPort 9051` -in `torrc` config file. For cookie authentication the user running dashd must have read -access to the `CookieAuthFile` specified in Tor configuration. In some cases this is -preconfigured and the creation of a hidden service is automatic. If permission problems -are seen with `-debug=tor` they can be resolved by adding both the user running Tor and -the user running dashd to the same group and setting permissions appropriately. On -Debian-based systems the user running dashd can be added to the debian-tor group, -which has the appropriate permissions. Before starting dashd you will need to re-login -to allow debian-tor group to be applied. Otherwise you will see the following notice: "tor: -Authentication cookie /run/tor/control.authcookie could not be opened (check permissions)" -on debug.log. - -An alternative authentication method is the use -of the `-torpassword=password` option. The `password` is the clear text form that -was used when generating the hashed password for the `HashedControlPassword` option -in the tor configuration file. The hashed password can be obtained with the command -`tor --hash-password password` (read the tor manual for more details). - -## 5. Privacy recommendations - -- Do not add anything but Dash Core ports to the hidden service created in section 2. - If you run a web service too, create a new hidden service for that. - Otherwise it is trivial to link them, which may reduce privacy. Hidden - services created automatically (as in section 3) always have only one port +- Do not add anything but Dash Core ports to the onion service created in section 3. + If you run a web service too, create a new onion service for that. + Otherwise it is trivial to link them, which may reduce privacy. Onion + services created automatically (as in section 2) always have only one port open. diff --git a/doc/travis-ci.md b/doc/travis-ci.md deleted file mode 100644 index e54a208b858f1..0000000000000 --- a/doc/travis-ci.md +++ /dev/null @@ -1,42 +0,0 @@ -Travis CI -========= - -Support for using travis-ci has been added in order to automate pull-testing. -See [travis-ci.org](https://travis-ci.org/) for more info - -This procedure is different than the pull-tester that came before it in a few -ways. - -There is nothing to administer. This is a major feature as it means -that builds have no local state. Because there is no ability to login to the -builders to install packages (tools, dependencies, etc), the entire build -procedure must instead be controlled by a declarative script `.travis.yml`. -This script declares each build configuration, creates virtual machines as -necessary, builds, then discards the virtual machines. - -A build matrix is constructed to test a wide range of configurations, rather -than a single pass/fail. This helps to catch build failures and logic errors -that present on platforms other than the ones the author has tested. This -matrix is defined in the build script and can be changed at any time. - -All builders use the dependency-generator in the [depends dir](/depends), rather than -using apt-get to install build dependencies. This guarantees that the tester -is using the same versions as Gitian, so the build results are nearly identical -to what would be found in a final release. However, this also means that builds -will fail if new dependencies are introduced without being added to the -dependency generator. - -In order to avoid rebuilding all dependencies for each build, the binaries are -cached and re-used when possible. Changes in the dependency-generator will -trigger cache-invalidation and rebuilds as necessary. - -These caches can be manually removed if necessary. This is one of the very few -manual operations that is possible with Travis, and it can be done by the -Dash Core committer via the Travis web interface. - -In some cases, secure strings may be needed for hiding sensitive info such as -private keys or URLs. The travis client may be used to create these strings: -http://docs.travis-ci.com/user/encryption-keys/ - -For the details of the build descriptor, see the official docs: -http://docs.travis-ci.com/user/build-configuration/ diff --git a/share/setup.nsi.in b/share/setup.nsi.in index 8c8677b037a54..fce609cdde00e 100644 --- a/share/setup.nsi.in +++ b/share/setup.nsi.in @@ -3,6 +3,7 @@ Name "@PACKAGE_NAME@ (64-bit)" RequestExecutionLevel highest SetCompressor /SOLID lzma SetDateSave off +Unicode true # Uncomment these lines when investigating reproducibility errors #SetCompress off diff --git a/src/Makefile.am b/src/Makefile.am index af0ce5848bdab..a72d1d758d265 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -3,6 +3,10 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +# Pattern rule to print variables, e.g. make print-top_srcdir +print-%: FORCE + @echo '$*'='$($*)' + DIST_SUBDIRS = secp256k1 AM_LDFLAGS = $(LIBTOOL_LDFLAGS) $(HARDENED_LDFLAGS) $(GPROF_LDFLAGS) $(SANITIZER_LDFLAGS) @@ -165,8 +169,10 @@ BITCOIN_CORE_H = \ cuckoocache.h \ ctpl_stl.h \ cxxtimer.hpp \ + evo/assetlocktx.h \ evo/dmn_types.h \ evo/cbtx.h \ + evo/creditpool.h \ evo/deterministicmns.h \ evo/dmnstate.h \ evo/evodb.h \ @@ -190,6 +196,7 @@ BITCOIN_CORE_H = \ fs.h \ httprpc.h \ httpserver.h \ + i2p.h \ index/base.h \ index/blockfilterindex.h \ index/disktxpos.h \ @@ -246,6 +253,7 @@ BITCOIN_CORE_H = \ node/transaction.h \ node/utxo_snapshot.h \ noui.h \ + outputtype.h \ policy/feerate.h \ policy/fees.h \ policy/policy.h \ @@ -258,6 +266,7 @@ BITCOIN_CORE_H = \ reverse_iterator.h \ rpc/blockchain.h \ rpc/client.h \ + rpc/mining.h \ rpc/protocol.h \ rpc/rawtransaction_util.h \ rpc/register.h \ @@ -301,6 +310,7 @@ BITCOIN_CORE_H = \ util/error.h \ util/fees.h \ util/golombrice.h \ + util/hash_type.h \ util/irange.h \ util/spanparsing.h \ util/system.h \ @@ -310,9 +320,11 @@ BITCOIN_CORE_H = \ util/message.h \ util/moneystr.h \ util/ranges.h \ + util/readwritefile.h \ util/underlying.h \ util/serfloat.h \ util/settings.h \ + util/skip_set.h \ util/sock.h \ util/string.h \ util/time.h \ @@ -320,7 +332,7 @@ BITCOIN_CORE_H = \ util/translation.h \ util/vector.h \ util/url.h \ - util/validation.h \ + util/vector.h \ validation.h \ validationinterface.h \ versionbits.h \ @@ -334,7 +346,6 @@ BITCOIN_CORE_H = \ wallet/fees.h \ wallet/ismine.h \ wallet/load.h \ - wallet/psbtwallet.h \ wallet/rpcwallet.h \ wallet/salvage.h \ wallet/scriptpubkeyman.h \ @@ -378,7 +389,9 @@ libbitcoin_server_a_SOURCES = \ consensus/tx_verify.cpp \ dbwrapper.cpp \ dsnotificationinterface.cpp \ + evo/assetlocktx.cpp \ evo/cbtx.cpp \ + evo/creditpool.cpp \ evo/deterministicmns.cpp \ evo/dmnstate.cpp \ evo/evodb.cpp \ @@ -391,11 +404,10 @@ libbitcoin_server_a_SOURCES = \ flatfile.cpp \ httprpc.cpp \ httpserver.cpp \ + i2p.cpp \ index/base.cpp \ index/blockfilterindex.cpp \ index/txindex.cpp \ - interfaces/chain.cpp \ - interfaces/node.cpp \ init.cpp \ governance/governance.cpp \ governance/classes.cpp \ @@ -432,6 +444,7 @@ libbitcoin_server_a_SOURCES = \ node/coin.cpp \ node/coinstats.cpp \ node/context.cpp \ + node/interfaces.cpp \ node/transaction.cpp \ noui.cpp \ policy/fees.cpp \ @@ -491,14 +504,13 @@ libbitcoin_wallet_a_SOURCES = \ coinjoin/client.cpp \ coinjoin/options.cpp \ coinjoin/util.cpp \ - interfaces/wallet.cpp \ wallet/coincontrol.cpp \ wallet/context.cpp \ wallet/crypter.cpp \ wallet/db.cpp \ wallet/fees.cpp \ + wallet/interfaces.cpp \ wallet/load.cpp \ - wallet/psbtwallet.cpp \ wallet/rpcdump.cpp \ wallet/rpcwallet.cpp \ wallet/scriptpubkeyman.cpp \ @@ -673,6 +685,7 @@ libbitcoin_common_a_SOURCES = \ netaddress.cpp \ netbase.cpp \ net_permissions.cpp \ + outputtype.cpp \ policy/feerate.cpp \ policy/policy.cpp \ protocol.cpp \ @@ -725,7 +738,9 @@ libbitcoin_util_a_SOURCES = \ util/system.cpp \ util/message.cpp \ util/moneystr.cpp \ + util/readwritefile.cpp \ util/settings.cpp \ + util/skip_set.cpp \ util/spanparsing.cpp \ util/strencodings.cpp \ util/time.cpp \ @@ -733,7 +748,6 @@ libbitcoin_util_a_SOURCES = \ util/string.cpp \ util/threadnames.cpp \ util/url.cpp \ - util/validation.cpp \ $(BITCOIN_CORE_H) if GLIBC_BACK_COMPAT @@ -929,20 +943,13 @@ clean-local: $(AM_V_GEN) $(WINDRES) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(CPPFLAGS) -DWINDRES_PREPROC -i $< -o $@ check-symbols: $(bin_PROGRAMS) -if TARGET_DARWIN - @echo "Checking macOS dynamic libraries..." - $(AM_V_at) OTOOL=$(OTOOL) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS) -endif - -if GLIBC_BACK_COMPAT - @echo "Checking glibc back compat..." - $(AM_V_at) CPPFILT=$(CPPFILT) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS) -endif + @echo "Running symbol and dynamic library checks..." + $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS) check-security: $(bin_PROGRAMS) if HARDEN @echo "Checking binary security..." - $(AM_V_at) OBJDUMP=$(OBJDUMP) OTOOL=$(OTOOL) $(PYTHON) $(top_srcdir)/contrib/devtools/security-check.py $(bin_PROGRAMS) + $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/security-check.py $(bin_PROGRAMS) endif diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index 061ee304b599c..684cdced6f75c 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -13,6 +13,7 @@ GENERATED_BENCH_FILES = $(RAW_BENCH_FILES:.raw=.raw.h) bench_bench_dash_SOURCES = \ $(RAW_BENCH_FILES) \ + bench/addrman.cpp \ bench/bench_bitcoin.cpp \ bench/bench.cpp \ bench/bench.h \ @@ -38,6 +39,7 @@ bench_bench_dash_SOURCES = \ bench/mempool_stress.cpp \ bench/nanobench.h \ bench/nanobench.cpp \ + bench/rpc_blockchain.cpp \ bench/rpc_mempool.cpp \ bench/util_time.cpp \ bench/base58.cpp \ @@ -46,12 +48,11 @@ bench_bench_dash_SOURCES = \ bench/poly1305.cpp \ bench/prevector.cpp \ bench/string_cast.cpp \ - test/util.cpp \ - test/util.h + bench/verify_script.cpp nodist_bench_bench_dash_SOURCES = $(GENERATED_BENCH_FILES) -bench_bench_dash_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CLFAGS) $(EVENT_PTHREADS_CFLAGS) -I$(builddir)/bench/ +bench_bench_dash_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CFLAGS) $(EVENT_PTHREADS_CFLAGS) -I$(builddir)/bench/ bench_bench_dash_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) bench_bench_dash_LDADD = \ $(LIBBITCOIN_SERVER) \ diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include index 0f6d54e836047..280d56991c879 100644 --- a/src/Makefile.qt.include +++ b/src/Makefile.qt.include @@ -148,6 +148,7 @@ BITCOIN_QT_H = \ qt/rpcconsole.h \ qt/sendcoinsdialog.h \ qt/sendcoinsentry.h \ + qt/sendcoinsrecipient.h \ qt/signverifymessagedialog.h \ qt/splashscreen.h \ qt/trafficgraphdata.h \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 6e4e3b85eb829..c259e53fc6894 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -95,8 +95,10 @@ BITCOIN_TESTS =\ test/dip0020opcodes_tests.cpp \ test/descriptor_tests.cpp \ test/dynamic_activation_thresholds_tests.cpp \ + test/evo_assetlocks_tests.cpp \ test/evo_deterministicmns_tests.cpp \ test/evo_instantsend_tests.cpp \ + test/evo_mnhf_tests.cpp \ test/evo_simplifiedmns_tests.cpp \ test/evo_trivialvalidation.cpp \ test/evo_utils_tests.cpp \ @@ -105,6 +107,8 @@ BITCOIN_TESTS =\ test/getarg_tests.cpp \ test/governance_validators_tests.cpp \ test/hash_tests.cpp \ + test/i2p_tests.cpp \ + test/interfaces_tests.cpp \ test/key_io_tests.cpp \ test/key_tests.cpp \ test/lcg.h \ @@ -143,7 +147,6 @@ BITCOIN_TESTS =\ test/sigopcount_tests.cpp \ test/skiplist_tests.cpp \ test/sock_tests.cpp \ - test/specialtx_tests.cpp \ test/streams_tests.cpp \ test/subsidy_tests.cpp \ test/sync_tests.cpp \ @@ -170,7 +173,8 @@ BITCOIN_TESTS += \ wallet/test/wallet_crypto_tests.cpp \ wallet/test/coinselector_tests.cpp \ wallet/test/init_tests.cpp \ - wallet/test/ismine_tests.cpp + wallet/test/ismine_tests.cpp \ + wallet/test/scriptpubkeyman_tests.cpp if USE_BDB BITCOIN_TESTS += wallet/test/db_tests.cpp @@ -209,7 +213,6 @@ test_fuzz_fuzz_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_fuzz_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) $(RUNTIME_LDFLAGS) test_fuzz_fuzz_SOURCES = \ test/fuzz/addition_overflow.cpp \ - test/fuzz/addrdb.cpp \ test/fuzz/addrman.cpp \ test/fuzz/asmap.cpp \ test/fuzz/asmap_direct.cpp \ @@ -246,6 +249,7 @@ test_fuzz_fuzz_SOURCES = \ test/fuzz/golomb_rice.cpp \ test/fuzz/hex.cpp \ test/fuzz/http_request.cpp \ + test/fuzz/i2p.cpp \ test/fuzz/integer.cpp \ test/fuzz/key.cpp \ test/fuzz/key_io.cpp \ diff --git a/src/Makefile.test_fuzz.include b/src/Makefile.test_fuzz.include index 77554f4c00ac3..06734546e9d4c 100644 --- a/src/Makefile.test_fuzz.include +++ b/src/Makefile.test_fuzz.include @@ -11,13 +11,14 @@ TEST_FUZZ_H = \ test/fuzz/fuzz.h \ test/fuzz/FuzzedDataProvider.h \ test/fuzz/util.h \ - test/util.h + test/util/mining.h libtest_fuzz_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(MINIUPNPC_CPPFLAGS) $(EVENT_CFLAGS) $(EVENT_PTHREADS_CFLAGS) libtest_fuzz_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) libtest_fuzz_a_SOURCES = \ test/fuzz/fuzz.cpp \ - test/util.cpp \ + test/util/mining.cpp \ + test/fuzz/util.cpp \ $(TEST_FUZZ_H) LIBTEST_FUZZ += $(LIBBITCOIN_SERVER) diff --git a/src/Makefile.test_util.include b/src/Makefile.test_util.include index 57df7ab8641d6..d7bc73defb414 100644 --- a/src/Makefile.test_util.include +++ b/src/Makefile.test_util.include @@ -10,24 +10,27 @@ EXTRA_LIBRARIES += \ TEST_UTIL_H = \ test/util/blockfilter.h \ test/util/logging.h \ + test/util/mining.h \ test/util/net.h \ test/util/setup_common.h \ test/util/str.h \ - test/util/transaction_utils.h + test/util/transaction_utils.h \ + test/util/wallet.h libtest_util_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(MINIUPNPC_CPPFLAGS) $(EVENT_CFLAGS) $(EVENT_PTHREADS_CFLAGS) libtest_util_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) libtest_util_a_SOURCES = \ test/util/blockfilter.cpp \ test/util/logging.cpp \ + test/util/mining.cpp \ test/util/net.cpp \ test/util/setup_common.cpp \ test/util/str.cpp \ test/util/transaction_utils.cpp \ + test/util/wallet.cpp \ $(TEST_UTIL_H) LIBTEST_UTIL += $(LIBBITCOIN_SERVER) LIBTEST_UTIL += $(LIBBITCOIN_COMMON) LIBTEST_UTIL += $(LIBBITCOIN_UTIL) LIBTEST_UTIL += $(LIBBITCOIN_CRYPTO_BASE) - diff --git a/src/addrman.cpp b/src/addrman.cpp index 1df0847dbb8f7..add3f0566b411 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -6,10 +6,13 @@ #include #include +#include #include +#include #include #include +#include int CAddrInfo::GetTriedBucket(const uint256& nKey, const std::vector &asmap) const { @@ -73,6 +76,38 @@ double CAddrInfo::GetChance(int64_t nNow) const return fChance; } +void CAddrMan::RemoveInvalid() +{ + for (size_t bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; ++bucket) { + for (size_t i = 0; i < ADDRMAN_BUCKET_SIZE; ++i) { + const auto id = vvNew[bucket][i]; + if (id != -1 && !mapInfo[id].IsValid()) { + ClearNew(bucket, i); + } + } + } + + for (size_t bucket = 0; bucket < ADDRMAN_TRIED_BUCKET_COUNT; ++bucket) { + for (size_t i = 0; i < ADDRMAN_BUCKET_SIZE; ++i) { + const auto id = vvTried[bucket][i]; + if (id == -1) { + continue; + } + const auto& addr_info = mapInfo[id]; + if (addr_info.IsValid()) { + continue; + } + vvTried[bucket][i] = -1; + --nTried; + SwapRandom(addr_info.nRandomPos, vRandom.size() - 1); + vRandom.pop_back(); + mapAddr.erase(addr_info); + mapInfo.erase(id); + m_tried_collisions.erase(id); + } + } +} + CAddrInfo* CAddrMan::Find(const CService& addr, int* pnId) { CService addr2 = addr; @@ -500,13 +535,18 @@ int CAddrMan::Check_() } #endif -void CAddrMan::GetAddr_(std::vector& vAddr) +void CAddrMan::GetAddr_(std::vector& vAddr, size_t max_addresses, size_t max_pct, std::optional network) { - unsigned int nNodes = ADDRMAN_GETADDR_MAX_PCT * vRandom.size() / 100; - if (nNodes > ADDRMAN_GETADDR_MAX) - nNodes = ADDRMAN_GETADDR_MAX; + size_t nNodes = vRandom.size(); + if (max_pct != 0) { + nNodes = max_pct * nNodes / 100; + } + if (max_addresses != 0) { + nNodes = std::min(nNodes, max_addresses); + } // gather a list of random nodes, skipping those of low quality + const int64_t now{GetAdjustedTime()}; for (unsigned int n = 0; n < vRandom.size(); n++) { if (vAddr.size() >= nNodes) break; @@ -516,8 +556,14 @@ void CAddrMan::GetAddr_(std::vector& vAddr) assert(mapInfo.count(vRandom[n]) == 1); const CAddrInfo& ai = mapInfo[vRandom[n]]; - if (!ai.IsTerrible()) - vAddr.push_back(ai); + + // Filter by network (optional) + if (network != std::nullopt && ai.GetNetClass() != network) continue; + + // Filter for quality + if (ai.IsTerrible(now)) continue; + + vAddr.push_back(ai); } } @@ -688,3 +734,100 @@ std::vector CAddrMan::DecodeAsmap(fs::path path) } return bits; } + +void CAddrMan::ResetI2PPorts() +{ + for (int bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; ++bucket) { + for (int i = 0; i < ADDRMAN_BUCKET_SIZE; ++i) { + const auto id = vvNew[bucket][i]; + if (id == -1) { + continue; + } + auto it = mapInfo.find(id); + if (it == mapInfo.end()) { + return; + } + auto& addr_info = it->second; + if (!addr_info.IsI2P() || addr_info.GetPort() == I2P_SAM31_PORT) { + continue; + } + + auto addr_info_newport = addr_info; + // The below changes addr_info_newport.GetKey(), which is used in finding a + // bucket and a position within that bucket. So a re-bucketing may be necessary. + addr_info_newport.port = I2P_SAM31_PORT; + + // Reposition entries of vvNew within the same bucket because we don't know the source + // address which led to the decision to store the entry in vvNew[bucket] so we can't + // re-evaluate that decision, but even if we could, CAddrInfo::GetNewBucket() does not + // use CAddrInfo::GetKey() so it would end up in the same bucket as before the port + // change. + const auto i_target = addr_info_newport.GetBucketPosition(nKey, true, bucket); + + if (i_target == i) { // No need to re-position. + addr_info = addr_info_newport; + continue; + } + + // Reposition from i to i_target, removing the entry from i_target (if any). + ClearNew(bucket, i_target); + vvNew[bucket][i_target] = id; + vvNew[bucket][i] = -1; + addr_info = addr_info_newport; + } + } + + for (int bucket = 0; bucket < ADDRMAN_TRIED_BUCKET_COUNT; ++bucket) { + for (int i = 0; i < ADDRMAN_BUCKET_SIZE; ++i) { + const auto id = vvTried[bucket][i]; + if (id == -1) { + continue; + } + auto it = mapInfo.find(id); + if (it == mapInfo.end()) { + return; + } + auto& addr_info = it->second; + if (!addr_info.IsI2P() || addr_info.GetPort() == I2P_SAM31_PORT) { + continue; + } + + auto addr_info_newport = addr_info; + // The below changes addr_info_newport.GetKey(), which is used in finding a + // bucket and a position within that bucket. So a re-bucketing may be necessary. + addr_info_newport.port = I2P_SAM31_PORT; + + const auto bucket_target = addr_info_newport.GetTriedBucket(nKey, m_asmap); + const auto i_target = addr_info_newport.GetBucketPosition(nKey, false, bucket_target); + + if (bucket_target == bucket && i_target == i) { // No need to re-position. + addr_info = addr_info_newport; + continue; + } + + // Reposition from (bucket, i) to (bucket_target, i_target). If the latter is + // occupied, then move the entry from there to vvNew. + + const auto old_target_id = vvTried[bucket_target][i_target]; + if (old_target_id != -1) { + CAddrInfo& old_target_info = mapInfo[old_target_id]; + + old_target_info.fInTried = false; + vvTried[bucket_target][i_target] = -1; + --nTried; + + const auto new_bucket = old_target_info.GetNewBucket(nKey, m_asmap); + const auto new_bucket_i = old_target_info.GetBucketPosition(nKey, true, new_bucket); + ClearNew(new_bucket, new_bucket_i); + + old_target_info.nRefCount = 1; + vvNew[new_bucket][new_bucket_i] = old_target_id; + ++nNew; + } + + vvTried[bucket_target][i_target] = id; + vvTried[bucket][i] = -1; + addr_info = addr_info_newport; + } + } +} diff --git a/src/addrman.h b/src/addrman.h index 6a031a0d513ed..33a05c445df48 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -1,5 +1,5 @@ // Copyright (c) 2012 Pieter Wuille -// Copyright (c) 2012-2015 The Bitcoin Core developers +// Copyright (c) 2012-2020 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -7,6 +7,7 @@ #define BITCOIN_ADDRMAN_H #include +#include #include #include #include @@ -19,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -102,19 +104,23 @@ class CAddrInfo : public CAddress * * Make sure no (localized) attacker can fill the entire table with his nodes/addresses. * * To that end: - * * Addresses are organized into buckets. - * * Addresses that have not yet been tried go into 1024 "new" buckets. - * * Based on the address range (/16 for IPv4) of the source of information, 64 buckets are selected at random. + * * Addresses are organized into buckets that can each store up to 64 entries. + * * Addresses to which our node has not successfully connected go into 1024 "new" buckets. + * * Based on the address range (/16 for IPv4) of the source of information, or if an asmap is provided, + * the AS it belongs to (for IPv4/IPv6), 64 buckets are selected at random. * * The actual bucket is chosen from one of these, based on the range in which the address itself is located. + * * The position in the bucket is chosen based on the full address. * * One single address can occur in up to 8 different buckets to increase selection chances for addresses that * are seen frequently. The chance for increasing this multiplicity decreases exponentially. - * * When adding a new address to a full bucket, a randomly chosen entry (with a bias favoring less recently seen - * ones) is removed from it first. + * * When adding a new address to an occupied position of a bucket, it will not replace the existing entry + * unless that address is also stored in another bucket or it doesn't meet one of several quality criteria + * (see IsTerrible for exact criteria). * * Addresses of nodes that are known to be accessible go into 256 "tried" buckets. * * Each address range selects at random 8 of these buckets. * * The actual bucket is chosen from one of these, based on the full address. - * * When adding a new good address to a full bucket, a randomly chosen entry (with a bias favoring less recently - * tried ones) is evicted from it, back to the "new" buckets. + * * When adding a new good address to an occupied position of a bucket, a FEELER connection to the + * old address is attempted. The old entry is only replaced and moved back to the "new" buckets if this + * attempt was unsuccessful. * * Bucket selection is based on cryptographic hashing, using a randomly-generated 256-bit key, which should not * be observable by adversaries. * * Several indexes are kept for high performance. Defining DEBUG_ADDRMAN will introduce frequent (and expensive) @@ -154,12 +160,6 @@ class CAddrInfo : public CAddress //! how recent a successful connection should be before we allow an address to be evicted from tried #define ADDRMAN_REPLACEMENT_HOURS 4 -//! the maximum percentage of nodes to return in a getaddr call -#define ADDRMAN_GETADDR_MAX_PCT 23 - -//! the maximum number of nodes to return in a getaddr call -#define ADDRMAN_GETADDR_MAX 2500 - //! Convenience #define ADDRMAN_TRIED_BUCKET_COUNT (1 << ADDRMAN_TRIED_BUCKET_COUNT_LOG2) #define ADDRMAN_NEW_BUCKET_COUNT (1 << ADDRMAN_NEW_BUCKET_COUNT_LOG2) @@ -176,116 +176,7 @@ static const int64_t ADDRMAN_TEST_WINDOW = 40*60; // 40 minutes */ class CAddrMan { -protected: - friend class CAddrManTest; - - //! critical section to protect the inner data structures - mutable CCriticalSection cs; - -private: - //! last used nId - int nIdCount GUARDED_BY(cs); - - //! table with information about all nIds - std::map mapInfo GUARDED_BY(cs); - - //! find an nId based on its network address - std::map mapAddr GUARDED_BY(cs); - - //! randomly-ordered vector of all nIds - std::vector vRandom GUARDED_BY(cs); - - // number of "tried" entries - int nTried GUARDED_BY(cs); - - //! list of "tried" buckets - int vvTried[ADDRMAN_TRIED_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs); - - //! number of (unique) "new" entries - int nNew GUARDED_BY(cs); - - //! list of "new" buckets - int vvNew[ADDRMAN_NEW_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs); - - //! last time Good was called (memory only) - int64_t nLastGood GUARDED_BY(cs); - - // discriminate entries based on port. Should be false on mainnet/testnet and can be true on devnet/regtest - bool discriminatePorts GUARDED_BY(cs); - - //! Holds addrs inserted into tried table that collide with existing entries. Test-before-evict discipline used to resolve these collisions. - std::set m_tried_collisions; - -protected: - //! secret key to randomize bucket select with - uint256 nKey; - - //! Source of random numbers for randomization in inner loops - FastRandomContext insecure_rand; - - //! Find an entry. - CAddrInfo* Find(const CService& addr, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! find an entry, creating it if necessary. - //! nTime and nServices of the found node are updated, if necessary. - CAddrInfo* Create(const CAddress &addr, const CNetAddr &addrSource, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Swap two elements in vRandom. - void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Move an entry from the "new" table(s) to the "tried" table - void MakeTried(CAddrInfo& info, int nId) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Delete an entry. It must not be in tried, and have refcount 0. - void Delete(int nId) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Clear a position in a "new" table. This is the only place where entries are actually deleted. - void ClearNew(int nUBucket, int nUBucketPos) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Mark an entry "good", possibly moving it from "new" to "tried". - void Good_(const CService &addr, bool test_before_evict, int64_t time) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Add an entry to the "new" table. - bool Add_(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Mark an entry as attempted to connect. - void Attempt_(const CService &addr, bool fCountFailure, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Select an address to connect to, if newOnly is set to true, only the new table is selected from. - CAddrInfo Select_(bool newOnly) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! See if any to-be-evicted tried table entries have been tested and if so resolve the collisions. - void ResolveCollisions_() EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Return a random to-be-evicted tried table address. - CAddrInfo SelectTriedCollision_() EXCLUSIVE_LOCKS_REQUIRED(cs); - -#ifdef DEBUG_ADDRMAN - //! Perform consistency check. Returns an error code or zero. - int Check_() EXCLUSIVE_LOCKS_REQUIRED(cs); -#endif - - //! Select several addresses at once. - void GetAddr_(std::vector &vAddr) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Mark an entry as currently-connected-to. - void Connected_(const CService &addr, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Update an entry's service bits. - void SetServices_(const CService &addr, ServiceFlags nServices) EXCLUSIVE_LOCKS_REQUIRED(cs); - - //! Get address info for address - CAddrInfo GetAddressInfo_(const CService& addr) EXCLUSIVE_LOCKS_REQUIRED(cs); - public: - //! Serialization versions. - enum class Format : uint8_t { - V0_HISTORICAL = 0, //!< historic format, before commit e6b343d88 - V1_DETERMINISTIC = 1, //!< for pre-asmap files - V2_ASMAP = 2, //!< for files including asmap version - V3_BIP155 = 3, //!< same as V2_ASMAP plus addresses are in BIP155 format - }; - // Compressed IP->ASN mapping, loaded from a file when a node starts. // Should be always empty if no file was provided. // This mapping is then used for bucketing nodes in Addrman. @@ -308,8 +199,18 @@ class CAddrMan /** * Serialized format. - * * version byte (@see `Format`) - * * 0x20 + nKey (serialized as if it were a vector, for backward compatibility) + * * format version byte (@see `Format`) + * * lowest compatible format version byte. This is used to help old software decide + * whether to parse the file. For example: + * * Bitcoin Core version N knows how to parse up to format=3. If a new format=4 is + * introduced in version N+1 that is compatible with format=3 and it is known that + * version N will be able to parse it, then version N+1 will write + * (format=4, lowest_compatible=3) in the first two bytes of the file, and so + * version N will still try to parse it. + * * Bitcoin Core version N+2 introduces a new incompatible format=5. It will write + * (format=5, lowest_compatible=5) and so any versions that do not know how to parse + * format=5 will not try to read the file. + * * nKey * * nNew * * nTried * * number of "new" buckets XOR 2**30 @@ -338,12 +239,17 @@ class CAddrMan { LOCK(cs); - // Always serialize in the latest version (currently Format::V3_BIP155). + // Always serialize in the latest version (FILE_FORMAT). OverrideStream s(&s_, s_.GetType(), s_.GetVersion() | ADDRV2_FORMAT); - s << static_cast(Format::V3_BIP155); - s << ((unsigned char)32); + s << static_cast(FILE_FORMAT); + + // Increment `lowest_compatible` iff a newly introduced format is incompatible with + // the previous one. + static constexpr uint8_t lowest_compatible = Format::V3_BIP155; + s << static_cast(INCOMPATIBILITY_BASE + lowest_compatible); + s << nKey; s << nNew; s << nTried; @@ -403,15 +309,6 @@ class CAddrMan Format format; s_ >> Using>(format); - static constexpr Format maximum_supported_format = Format::V3_BIP155; - if (format > maximum_supported_format) { - throw std::ios_base::failure(strprintf( - "Unsupported format of addrman database: %u. Maximum supported is %u. " - "Continuing operation without using the saved list of peers.", - static_cast(format), - static_cast(maximum_supported_format))); - } - int stream_version = s_.GetVersion(); if (format >= Format::V3_BIP155) { // Add ADDRV2_FORMAT to the version so that the CNetAddr and CAddress @@ -421,9 +318,16 @@ class CAddrMan OverrideStream s(&s_, s_.GetType(), stream_version); - unsigned char nKeySize; - s >> nKeySize; - if (nKeySize != 32) throw std::ios_base::failure("Incorrect keysize in addrman deserialization"); + uint8_t compat; + s >> compat; + const uint8_t lowest_compatible = compat - INCOMPATIBILITY_BASE; + if (lowest_compatible > FILE_FORMAT) { + throw std::ios_base::failure(strprintf( + "Unsupported format of addrman database: %u. It is compatible with formats >=%u, " + "but the maximum supported by this version of %s is %u.", + format, lowest_compatible, PACKAGE_NAME, static_cast(FILE_FORMAT))); + } + s >> nKey; s >> nNew; s >> nTried; @@ -555,6 +459,10 @@ class CAddrMan LogPrint(BCLog::ADDRMAN, "addrman lost %i new and %i tried addresses due to collisions\n", nLostUnk, nLost); } + RemoveInvalid(); + + ResetI2PPorts(); + Check(); } @@ -697,20 +605,26 @@ class CAddrMan return addrRet; } - //! Return a bunch of addresses, selected at random. - std::vector GetAddr() + /** + * Return all or many randomly selected addresses, optionally by network. + * + * @param[in] max_addresses Maximum number of addresses to return (0 = all). + * @param[in] max_pct Maximum percentage of addresses to return (0 = all). + * @param[in] network Select only addresses of this network (nullopt = all). + */ + std::vector GetAddr(size_t max_addresses, size_t max_pct, std::optional network) { Check(); std::vector vAddr; { LOCK(cs); - GetAddr_(vAddr); + GetAddr_(vAddr, max_addresses, max_pct, network); } Check(); return vAddr; } - //! Mark an entry as currently-connected-to. + //! Outer function for Connected_() void Connected(const CService &addr, int64_t nTime = GetAdjustedTime()) { LOCK(cs); @@ -738,7 +652,154 @@ class CAddrMan } return addrRet; } +protected: + //! secret key to randomize bucket select with + uint256 nKey; + + //! Source of random numbers for randomization in inner loops + FastRandomContext insecure_rand; + +private: + //! critical section to protect the inner data structures + mutable RecursiveMutex cs; + + //! Serialization versions. + enum Format : uint8_t { + V0_HISTORICAL = 0, //!< historic format, before commit e6b343d88 + V1_DETERMINISTIC = 1, //!< for pre-asmap files + V2_ASMAP = 2, //!< for files including asmap version + V3_BIP155 = 3, //!< same as V2_ASMAP plus addresses are in BIP155 format + }; + + //! The maximum format this software knows it can unserialize. Also, we always serialize + //! in this format. + //! The format (first byte in the serialized stream) can be higher than this and + //! still this software may be able to unserialize the file - if the second byte + //! (see `lowest_compatible` in `Unserialize()`) is less or equal to this. + static constexpr Format FILE_FORMAT = Format::V3_BIP155; + + //! The initial value of a field that is incremented every time an incompatible format + //! change is made (such that old software versions would not be able to parse and + //! understand the new file format). This is 32 because we overtook the "key size" + //! field which was 32 historically. + //! @note Don't increment this. Increment `lowest_compatible` in `Serialize()` instead. + static constexpr uint8_t INCOMPATIBILITY_BASE = 32; + + //! last used nId + int nIdCount GUARDED_BY(cs); + + //! table with information about all nIds + std::map mapInfo GUARDED_BY(cs); + + //! find an nId based on its network address + std::map mapAddr GUARDED_BY(cs); + + //! randomly-ordered vector of all nIds + std::vector vRandom GUARDED_BY(cs); + + // number of "tried" entries + int nTried GUARDED_BY(cs); + + //! list of "tried" buckets + int vvTried[ADDRMAN_TRIED_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs); + + //! number of (unique) "new" entries + int nNew GUARDED_BY(cs); + + //! list of "new" buckets + int vvNew[ADDRMAN_NEW_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs); + + //! last time Good was called (memory only) + int64_t nLastGood GUARDED_BY(cs); + + // discriminate entries based on port. Should be false on mainnet/testnet and can be true on devnet/regtest + bool discriminatePorts GUARDED_BY(cs); + + //! Holds addrs inserted into tried table that collide with existing entries. Test-before-evict discipline used to resolve these collisions. + std::set m_tried_collisions; + + //! Find an entry. + CAddrInfo* Find(const CService& addr, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs); + //! Create a new entry and add it to the internal data structures mapInfo, mapAddr and vRandom. + CAddrInfo* Create(const CAddress &addr, const CNetAddr &addrSource, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Swap two elements in vRandom. + void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Move an entry from the "new" table(s) to the "tried" table + void MakeTried(CAddrInfo& info, int nId) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Delete an entry. It must not be in tried, and have refcount 0. + void Delete(int nId) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Clear a position in a "new" table. This is the only place where entries are actually deleted. + void ClearNew(int nUBucket, int nUBucketPos) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Mark an entry "good", possibly moving it from "new" to "tried". + void Good_(const CService &addr, bool test_before_evict, int64_t time) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Add an entry to the "new" table. + bool Add_(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Mark an entry as attempted to connect. + void Attempt_(const CService &addr, bool fCountFailure, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Select an address to connect to, if newOnly is set to true, only the new table is selected from. + CAddrInfo Select_(bool newOnly) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! See if any to-be-evicted tried table entries have been tested and if so resolve the collisions. + void ResolveCollisions_() EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Return a random to-be-evicted tried table address. + CAddrInfo SelectTriedCollision_() EXCLUSIVE_LOCKS_REQUIRED(cs); + +#ifdef DEBUG_ADDRMAN + //! Perform consistency check. Returns an error code or zero. + int Check_() EXCLUSIVE_LOCKS_REQUIRED(cs); +#endif + + /** + * Return all or many randomly selected addresses, optionally by network. + * + * @param[out] vAddr Vector of randomly selected addresses from vRandom. + * @param[in] max_addresses Maximum number of addresses to return (0 = all). + * @param[in] max_pct Maximum percentage of addresses to return (0 = all). + * @param[in] network Select only addresses of this network (nullopt = all). + */ + void GetAddr_(std::vector& vAddr, size_t max_addresses, size_t max_pct, std::optional network) EXCLUSIVE_LOCKS_REQUIRED(cs); + + /** We have successfully connected to this peer. Calling this function + * updates the CAddress's nTime, which is used in our IsTerrible() + * decisions and gossiped to peers. Callers should be careful that updating + * this information doesn't leak topology information to network spies. + * + * net_processing calls this function when it *disconnects* from a peer to + * not leak information about currently connected peers. + * + * @param[in] addr The address of the peer we were connected to + * @param[in] nTime The time that we were last connected to this peer + */ + void Connected_(const CService& addr, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Update an entry's service bits. + void SetServices_(const CService &addr, ServiceFlags nServices) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Get address info for address + CAddrInfo GetAddressInfo_(const CService& addr) EXCLUSIVE_LOCKS_REQUIRED(cs); + + //! Remove invalid addresses. + void RemoveInvalid() EXCLUSIVE_LOCKS_REQUIRED(cs); + + /** + * Reset the ports of I2P peers to 0. + * This is needed as a temporary measure because now we enforce port 0 and + * only connect to I2P hosts if the port is 0, but in the early days some + * I2P addresses with port 8333 were rumoured and persisted into addrmans. + */ + void ResetI2PPorts() EXCLUSIVE_LOCKS_REQUIRED(cs); + + friend class CAddrManTest; }; #endif // BITCOIN_ADDRMAN_H diff --git a/src/banman.h b/src/banman.h index ca46677e3ee41..e11d4e0c8218a 100644 --- a/src/banman.h +++ b/src/banman.h @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2017 The Bitcoin Core developers +// Copyright (c) 2009-2020 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_BANMAN_H @@ -87,7 +87,7 @@ class BanMan //!clean unused entries (if bantime has expired) void SweepBanned(); - CCriticalSection m_cs_banned; + RecursiveMutex m_cs_banned; banmap_t m_banned GUARDED_BY(m_cs_banned); bool m_is_dirty GUARDED_BY(m_cs_banned); CClientUIInterface* m_client_interface = nullptr; diff --git a/src/base58.cpp b/src/base58.cpp index 822c9306f2b94..36d00120664c8 100644 --- a/src/base58.cpp +++ b/src/base58.cpp @@ -84,21 +84,21 @@ bool DecodeBase58(const char* psz, std::vector& vch, int max_ret_ return true; } -std::string EncodeBase58(const unsigned char* pbegin, const unsigned char* pend) +std::string EncodeBase58(Span input) { // Skip & count leading zeroes. int zeroes = 0; int length = 0; - while (pbegin != pend && *pbegin == 0) { - pbegin++; + while (input.size() > 0 && input[0] == 0) { + input = input.subspan(1); zeroes++; } // Allocate enough space in big-endian base58 representation. - int size = (pend - pbegin) * 138 / 100 + 1; // log(256) / log(58), rounded up. + int size = input.size() * 138 / 100 + 1; // log(256) / log(58), rounded up. std::vector b58(size); // Process the bytes. - while (pbegin != pend) { - int carry = *pbegin; + while (input.size() > 0) { + int carry = input[0]; int i = 0; // Apply "b58 = b58 * 256 + ch". for (std::vector::reverse_iterator it = b58.rbegin(); (carry != 0 || i < length) && (it != b58.rend()); it++, i++) { @@ -109,7 +109,7 @@ std::string EncodeBase58(const unsigned char* pbegin, const unsigned char* pend) assert(carry == 0); length = i; - pbegin++; + input = input.subspan(1); } // Skip leading zeroes in base58 result. std::vector::iterator it = b58.begin() + (size - length); @@ -124,11 +124,6 @@ std::string EncodeBase58(const unsigned char* pbegin, const unsigned char* pend) return str; } -std::string EncodeBase58(const std::vector& vch) -{ - return EncodeBase58(vch.data(), vch.data() + vch.size()); -} - bool DecodeBase58(const std::string& str, std::vector& vchRet, int max_ret_len) { if (!ValidAsCString(str)) { @@ -137,10 +132,10 @@ bool DecodeBase58(const std::string& str, std::vector& vchRet, in return DecodeBase58(str.c_str(), vchRet, max_ret_len); } -std::string EncodeBase58Check(const std::vector& vchIn) +std::string EncodeBase58Check(Span input) { // add 4-byte hash check to the end - std::vector vch(vchIn); + std::vector vch(input.begin(), input.end()); uint256 hash = Hash(vch.begin(), vch.end()); vch.insert(vch.end(), (unsigned char*)&hash, (unsigned char*)&hash + 4); return EncodeBase58(vch); diff --git a/src/base58.h b/src/base58.h index 2049f105737a8..8ee47ba24766a 100644 --- a/src/base58.h +++ b/src/base58.h @@ -15,20 +15,15 @@ #define BITCOIN_BASE58_H #include +#include #include #include /** - * Encode a byte sequence as a base58-encoded string. - * pbegin and pend cannot be nullptr, unless both are. + * Encode a byte span as a base58-encoded string */ -std::string EncodeBase58(const unsigned char* pbegin, const unsigned char* pend); - -/** - * Encode a byte vector as a base58-encoded string - */ -std::string EncodeBase58(const std::vector& vch); +std::string EncodeBase58(Span input); /** * Decode a base58-encoded string (psz) into a byte vector (vchRet). @@ -44,9 +39,9 @@ std::string EncodeBase58(const std::vector& vch); [[nodiscard]] bool DecodeBase58(const std::string& str, std::vector& vchRet, int max_ret_len); /** - * Encode a byte vector into a base58-encoded string, including checksum + * Encode a byte span into a base58-encoded string, including checksum */ -std::string EncodeBase58Check(const std::vector& vchIn); +std::string EncodeBase58Check(Span input); /** * Decode a base58-encoded string (psz) that includes a checksum into a byte diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp new file mode 100644 index 0000000000000..b7bd8a32612b6 --- /dev/null +++ b/src/bench/addrman.cpp @@ -0,0 +1,141 @@ +// Copyright (c) 2020-2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include + +#include +#include + +/* A "source" is a source address from which we have received a bunch of other addresses. */ + +static constexpr size_t NUM_SOURCES = 64; +static constexpr size_t NUM_ADDRESSES_PER_SOURCE = 256; + +static std::vector g_sources; +static std::vector> g_addresses; + +static void CreateAddresses() +{ + if (g_sources.size() > 0) { // already created + return; + } + + FastRandomContext rng(uint256(std::vector(32, 123))); + + auto randAddr = [&rng]() { + in6_addr addr; + memcpy(&addr, rng.randbytes(sizeof(addr)).data(), sizeof(addr)); + + uint16_t port; + memcpy(&port, rng.randbytes(sizeof(port)).data(), sizeof(port)); + if (port == 0) { + port = 1; + } + + CAddress ret(CService(addr, port), NODE_NETWORK); + + ret.nTime = GetAdjustedTime(); + + return ret; + }; + + for (size_t source_i = 0; source_i < NUM_SOURCES; ++source_i) { + g_sources.emplace_back(randAddr()); + g_addresses.emplace_back(); + for (size_t addr_i = 0; addr_i < NUM_ADDRESSES_PER_SOURCE; ++addr_i) { + g_addresses[source_i].emplace_back(randAddr()); + } + } +} + +static void AddAddressesToAddrMan(CAddrMan& addrman) +{ + for (size_t source_i = 0; source_i < NUM_SOURCES; ++source_i) { + addrman.Add(g_addresses[source_i], g_sources[source_i]); + } +} + +static void FillAddrMan(CAddrMan& addrman) +{ + CreateAddresses(); + + AddAddressesToAddrMan(addrman); +} + +/* Benchmarks */ + +static void AddrManAdd(benchmark::Bench& bench) +{ + CreateAddresses(); + + CAddrMan addrman; + + bench.run([&] { + AddAddressesToAddrMan(addrman); + addrman.Clear(); + }); +} + +static void AddrManSelect(benchmark::Bench& bench) +{ + CAddrMan addrman; + + FillAddrMan(addrman); + + bench.run([&] { + const auto& address = addrman.Select(); + assert(address.GetPort() > 0); + }); +} + +static void AddrManGetAddr(benchmark::Bench& bench) +{ + CAddrMan addrman; + + FillAddrMan(addrman); + + bench.run([&] { + const auto& addresses = addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23, /* network */ std::nullopt); + assert(addresses.size() > 0); + }); +} + +static void AddrManGood(benchmark::Bench& bench) +{ + /* Create many CAddrMan objects - one to be modified at each loop iteration. + * This is necessary because the CAddrMan::Good() method modifies the + * object, affecting the timing of subsequent calls to the same method and + * we want to do the same amount of work in every loop iteration. */ + + bench.epochs(5).epochIterations(1); + + std::vector addrmans(bench.epochs() * bench.epochIterations()); + for (auto& addrman : addrmans) { + FillAddrMan(addrman); + } + + auto markSomeAsGood = [](CAddrMan& addrman) { + for (size_t source_i = 0; source_i < NUM_SOURCES; ++source_i) { + for (size_t addr_i = 0; addr_i < NUM_ADDRESSES_PER_SOURCE; ++addr_i) { + if (addr_i % 32 == 0) { + addrman.Good(g_addresses[source_i][addr_i]); + } + } + } + }; + + uint64_t i = 0; + bench.run([&] { + markSomeAsGood(addrmans.at(i)); + ++i; + }); +} + +BENCHMARK(AddrManAdd); +BENCHMARK(AddrManSelect); +BENCHMARK(AddrManGetAddr); +BENCHMARK(AddrManGood); diff --git a/src/bench/base58.cpp b/src/bench/base58.cpp index 5e7428febb5fe..3abedb65d07d8 100644 --- a/src/bench/base58.cpp +++ b/src/bench/base58.cpp @@ -20,7 +20,7 @@ static void Base58Encode(benchmark::Bench& bench) } }; bench.batch(buff.size()).unit("byte").run([&] { - EncodeBase58(buff.data(), buff.data() + buff.size()); + EncodeBase58(buff); }); } @@ -34,10 +34,8 @@ static void Base58CheckEncode(benchmark::Bench& bench) 200, 24 } }; - std::vector vch; - vch.assign(buff.begin(), buff.end()); bench.batch(buff.size()).unit("byte").run([&] { - EncodeBase58Check(vch); + EncodeBase58Check(buff); }); } diff --git a/src/bench/bench.h b/src/bench/bench.h index 955e85a2016e5..5889d16c87cd5 100644 --- a/src/bench/bench.h +++ b/src/bench/bench.h @@ -1,4 +1,4 @@ -// Copyright (c) 2015 The Bitcoin Core developers +// Copyright (c) 2015-2020 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/bench/block_assemble.cpp b/src/bench/block_assemble.cpp index 222f57c8c214d..fe60b87cdc9da 100644 --- a/src/bench/block_assemble.cpp +++ b/src/bench/block_assemble.cpp @@ -6,7 +6,8 @@ #include #include #include