chore: upgrade generic-array & re-export it #330
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Rust | |
on: | |
merge_group: | |
pull_request: | |
types: [opened, synchronize, reopened, ready_for_review] | |
branches: [main, dev] | |
env: | |
CARGO_TERM_COLOR: always | |
# Disable incremental compilation. | |
# | |
# Incremental compilation is useful as part of an edit-build-test-edit cycle, | |
# as it lets the compiler avoid recompiling code that hasn't changed. However, | |
# on CI, we're not making small edits; we're almost always building the entire | |
# project from scratch. Thus, incremental compilation on CI actually | |
# introduces *additional* overhead to support making future builds | |
# faster...but no future builds will ever occur in any given CI environment. | |
# | |
# See https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow | |
# for details. | |
CARGO_INCREMENTAL: 0 | |
# Allow more retries for network requests in cargo (downloading crates) and | |
# rustup (installing toolchains). This should help to reduce flaky CI failures | |
# from transient network timeouts or other issues. | |
CARGO_NET_RETRY: 10 | |
RUSTUP_MAX_RETRIES: 10 | |
# Don't emit giant backtraces in the CI logs. | |
RUST_BACKTRACE: short | |
concurrency: | |
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} | |
cancel-in-progress: true | |
jobs: | |
test: | |
runs-on: ${{ matrix.os }} | |
strategy: | |
matrix: | |
os: | |
- ubuntu-latest | |
fail-fast: false | |
env: | |
RUSTFLAGS: -D warnings | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: actions-rs/toolchain@v1 | |
- uses: taiki-e/install-action@nextest | |
- uses: Swatinem/rust-cache@v2 | |
# make sure benches don't bit-rot | |
- name: build benches | |
# TODO: --all-features | |
run: cargo build --benches --release | |
- name: cargo test | |
# TODO: --all-features | |
run: | | |
cargo nextest run --release | |
- name: Doctests | |
run: | | |
cargo test --doc | |
- name: Abomonation feature | |
run: cargo run --features "abomonation" --example poseidon_constants_serde | |
clippy: | |
runs-on: ubuntu-latest | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: actions-rs/toolchain@v1 | |
with: | |
components: rustfmt, clippy | |
- uses: Swatinem/rust-cache@v2 | |
# See '.cargo/config' for list of enabled/disabled clippy lints | |
- name: rustfmt | |
uses: actions-rs/cargo@v1 | |
with: | |
command: fmt | |
args: --all --check | |
- name: cargo clippy | |
run: cargo xclippy -D warnings | |
msrv: | |
runs-on: ubuntu-latest | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Install rustup | |
uses: actions-rs/toolchain@v1 | |
with: | |
toolchain: stable | |
override: true | |
- name: Install cargo-msrv | |
run: cargo install cargo-msrv | |
- name: Check Rust MSRV | |
run: cargo msrv verify | |
# Runs the test suite on a self-hosted GPU machine with CUDA enabled | |
test-cuda: | |
name: Rust tests on CUDA | |
runs-on: self-hosted | |
env: | |
NVIDIA_VISIBLE_DEVICES: all | |
NVIDIA_DRIVER_CAPABILITITES: compute,utility | |
EC_GPU_FRAMEWORK: cuda | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: actions-rs/toolchain@v1 | |
- uses: taiki-e/install-action@nextest | |
- uses: Swatinem/rust-cache@v2 | |
# Check we have access to the machine's Nvidia drivers | |
- run: nvidia-smi | |
# The `compute`/`sm` number corresponds to the Nvidia GPU architecture | |
# In this case, the self-hosted machine uses the Ampere architecture, but we want this to be configurable | |
# See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ | |
- name: Set env for CUDA compute | |
run: echo "CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader | sed 's/\.//g')" >> $GITHUB_ENV | |
- name: set env for EC_GPU | |
run: echo 'EC_GPU_CUDA_NVCC_ARGS=--fatbin --gpu-architecture=sm_${{ env.CUDA_ARCH }} --generate-code=arch=compute_${{ env.CUDA_ARCH }},code=sm_${{ env.CUDA_ARCH }}' >> $GITHUB_ENV | |
- run: echo "${{ env.EC_GPU_CUDA_NVCC_ARGS}}" | |
# Check that CUDA is installed with a driver-compatible version | |
# This must also be compatible with the GPU architecture, see above link | |
- run: nvcc --version | |
- name: CUDA tests | |
run: | | |
cargo nextest run --release --no-default-features --features cuda,pasta,bls,arity2,arity4,arity8,arity11,arity16,arity24,arity36 | |
# Runs the test suite on a self-hosted GPU machine with CUDA and OpenCL enabled (that is using the OpenCL backend for NVIDIA GPUs) | |
test-opencl: | |
name: Rust tests on OpenCL | |
runs-on: self-hosted | |
env: | |
NVIDIA_VISIBLE_DEVICES: all | |
NVIDIA_DRIVER_CAPABILITITES: compute,utility | |
EC_GPU_FRAMEWORK: opencl | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: actions-rs/toolchain@v1 | |
- uses: taiki-e/install-action@nextest | |
- uses: Swatinem/rust-cache@v2 | |
- name: Install GPU deps | |
run: | | |
apt-get update | |
apt-get -y install ocl-icd-opencl-dev | |
# Check we have access to the machine's Nvidia drivers | |
- run: nvidia-smi | |
# The `compute`/`sm` number corresponds to the Nvidia GPU architecture | |
# In this case, the self-hosted machine uses the Ampere architecture, but we want this to be configurable | |
# See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ | |
- name: Set env for CUDA compute | |
run: echo "CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader | sed 's/\.//g')" >> $GITHUB_ENV | |
- name: set env for EC_GPU | |
run: echo 'EC_GPU_CUDA_NVCC_ARGS=--fatbin --gpu-architecture=sm_${{ env.CUDA_ARCH }} --generate-code=arch=compute_${{ env.CUDA_ARCH }},code=sm_${{ env.CUDA_ARCH }}' >> $GITHUB_ENV | |
- run: echo "${{ env.EC_GPU_CUDA_NVCC_ARGS}}" | |
# Check that CUDA is installed with a driver-compatible version | |
# This must also be compatible with the GPU architecture, see above link | |
- run: nvcc --version | |
# Check that we can access the OpenCL headers | |
- run: clinfo | |
- name: OpenCL tests | |
run: | | |
cargo nextest run --release --all-features |