-
Notifications
You must be signed in to change notification settings - Fork 295
95 lines (85 loc) · 2.82 KB
/
_linux-benchmark-cuda.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
name: linux-benchmark-cuda
on:
workflow_call:
inputs:
userbenchmark:
required: true
type: string
description: Name of the benchmark
userbenchmark-run-args:
required: true
type: string
description: Userbenchmark run command line arguments
only:
required: False
type: string
default: ''
description: Only run the selected model, used for testing
secrets:
HUGGING_FACE_HUB_TOKEN:
required: false
description: |
HF auth token to avoid rate limits when downloading models or datasets from hub
jobs:
benchmark:
# Don't run on forked repos
if: github.repository_owner == 'pytorch'
runs-on: linux.aws.a100
timeout-minutes: 1440
environment: docker-s3-upload
env:
OUTPUT_DIR: .userbenchmark
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
ONLY: ${{ inputs.only || '' }}
steps:
- name: Checkout TorchBench
uses: actions/checkout@v3
with:
path: benchmark
- name: Remove result if it already exists
shell: bash
working-directory: benchmark
run: |
set -eux
if [[ -d "${OUTPUT_DIR}" ]]; then
rm -rf "${OUTPUT_DIR}"
fi
- name: Setup miniconda
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
with:
python-version: "3.9"
- name: Install torch dependencies
shell: bash
working-directory: benchmark
run: |
set -eux
${CONDA_RUN} pip3 install --pre torch torchvision torchaudio torchao \
--index-url https://download.pytorch.org/whl/nightly/cu124
- name: Install benchmark
shell: bash
working-directory: benchmark
run: |
set -eux
if [[ -z "${ONLY}" ]]; then
${CONDA_RUN} python install.py --numpy
else
${CONDA_RUN} python install.py --numpy --models "${ONLY}"
fi
- name: Run benchmark
shell: bash
working-directory: benchmark
run: |
set -eux
if [[ -z "${ONLY}" ]]; then
${CONDA_RUN} python run_benchmark.py ${{ inputs.userbenchmark }} ${{ inputs.userbenchmark-run-args }}
else
${CONDA_RUN} python run_benchmark.py ${{ inputs.userbenchmark }} ${{ inputs.userbenchmark-run-args }} \
--only "${ONLY}"
fi
- name: Upload the benchmark results to OSS benchmark database for the dashboard
uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main
with:
benchmark-results-dir: benchmark/${{ env.OUTPUT_DIR }}/${{ inputs.userbenchmark }}
dry-run: false
schema-version: v3
github-token: ${{ secrets.GITHUB_TOKEN }}