forked from mediacloud/backend
-
Notifications
You must be signed in to change notification settings - Fork 0
180 lines (140 loc) · 5.94 KB
/
build.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
name: Pull, build, push, test
on: [push]
jobs:
pull_build_push:
name: Pull, build, push
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, 'ci skip')"
steps:
- name: Check out repository
uses: actions/checkout@v1
with:
submodules: recursive
- name: Clean up disk space
run: ./.github/free-up-disk-space.sh
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: 3.5
- name: Upgrade Pip / setuptools
run: pip install -U pip setuptools wheel
- name: Update APT listing
run: sudo apt-get -y update
- name: Install parallel
run: sudo apt-get -y install parallel
# We need it as a Python module
- name: Install docker-compose
run: pip install docker-compose
# Install PyYAML for docker-compose.yml validation
- name: Install PyYAML
run: pip install PyYAML
# FIXME upgrade Docker?
- name: Print kernel and Docker information
run: |
uname -a
docker version
docker-compose version
- name: Pull images
# Don't stop on a single failure because the image might not exist or a
# network error might have happened
run: ./dev/pull.py || { echo "One or more images couldn't be pulled"; }
- name: Build images
# Prune images after every rebuild because otherwise CI instance might run
# out of disk space on bigger rebuilds
# FIXME skip nytlabels-annotator due to its size
run: ./dev/build.py --prune_images -p | grep -v nytlabels-annotator | bash
- name: Push images
run: |
echo "${{ secrets.DOCKERHUB_PASSWORD }}" | docker login -u "${{ secrets.DOCKERHUB_USERNAME }}" --password-stdin
./dev/push.py
run_tests:
name: Run tests
runs-on: ubuntu-latest
needs: pull_build_push
if: "!contains(github.event.head_commit.message, 'ci skip')"
env:
# Keep in sync with "strategy/matrix/chunk_number"
TEST_CHUNK_COUNT: 5
strategy:
# Try running all tests even if some of them fail
fail-fast: false
matrix:
# Keep in sync with "env/TEST_CHUNK_COUNT"
chunk_number: [1, 2, 3, 4, 5]
steps:
- name: Check out repository
uses: actions/checkout@v1
with:
submodules: recursive
- name: Clean up disk space
run: ./.github/free-up-disk-space.sh
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: 3.5
- name: Upgrade Pip / setuptools
run: pip install -U pip setuptools wheel
- name: Install parallel
run: sudo apt-get -y install parallel
# We need it as a Python module
- name: Install docker-compose
run: pip install docker-compose
# Install PyYAML for docker-compose.yml validation
- name: Install PyYAML
run: pip install PyYAML
# FIXME upgrade Docker?
- name: Print kernel and Docker information
run: |
uname -a
docker version
docker-compose version
- name: Pull images
# Don't stop on a single failure because the image might not exist or a
# network error might have happened
run: ./dev/pull.py || { echo "One or more images couldn't be pulled"; }
# List all tests (commands) to be run into a file
- name: Enumerate all tests
run: ./dev/run_all_tests.py --print_commands > tests_all_ordered
# Randomize test order so that both chunks run for about the same amount of
# time; use a fixed seed to make it predictable which particular chunk of tests
# every test will run on
- name: Randomize test order with fixed seed
run: >-
shuf
--random-source=<(openssl enc -aes-256-ctr -pass pass:"42" -nosalt </dev/zero 2>/dev/null)
tests_all_ordered > tests_all &&
rm tests_all_ordered
- name: Split tests into equal parts
# Assuming that there won't be more than 9 chunks here
run: >-
split
--number=l/$TEST_CHUNK_COUNT
--numeric-suffixes=1
--suffix-length=1
tests_all
tests_chunk_
- name: Print tests that are going to be run in this chunk
run: cat tests_chunk_${{ matrix.chunk_number }}
# Run a selected chunk of tests in parallel, keep a log; on error, print out said log
- name: Run tests
run: >-
cat tests_chunk_${{ matrix.chunk_number }}
| sort
| parallel --timeout 600 --group --joblog joblog.txt
|| { cat joblog.txt && exit 1; }
env:
# Map secrets to environment variables
MC_CRIMSON_HEXAGON_API_KEY: ${{ secrets.MC_CRIMSON_HEXAGON_API_KEY }}
MC_DOWNLOADS_AMAZON_S3_ACCESS_KEY_ID: ${{ secrets.MC_DOWNLOADS_AMAZON_S3_ACCESS_KEY_ID }}
MC_DOWNLOADS_AMAZON_S3_BUCKET_NAME: ${{ secrets.MC_DOWNLOADS_AMAZON_S3_BUCKET_NAME }}
MC_DOWNLOADS_AMAZON_S3_DIRECTORY_NAME: ${{ secrets.MC_DOWNLOADS_AMAZON_S3_DIRECTORY_NAME }}
MC_DOWNLOADS_AMAZON_S3_SECRET_ACCESS_KEY: ${{ secrets.MC_DOWNLOADS_AMAZON_S3_SECRET_ACCESS_KEY }}
MC_FACEBOOK_APP_ID: ${{ secrets.MC_FACEBOOK_APP_ID }}
MC_FACEBOOK_APP_SECRET: ${{ secrets.MC_FACEBOOK_APP_SECRET }}
MC_PODCAST_FETCH_EPISODE_BUCKET_NAME: ${{ secrets.MC_PODCAST_FETCH_EPISODE_BUCKET_NAME }}
MC_PODCAST_FETCH_TRANSCRIPT_RUN_COSTLY_TEST: ${{ secrets.MC_PODCAST_FETCH_TRANSCRIPT_RUN_COSTLY_TEST }}
MC_PODCAST_GC_AUTH_JSON_BASE64: ${{ secrets.MC_PODCAST_GC_AUTH_JSON_BASE64 }}
MC_TWITTER_ACCESS_TOKEN: ${{ secrets.MC_TWITTER_ACCESS_TOKEN }}
MC_TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.MC_TWITTER_ACCESS_TOKEN_SECRET }}
MC_TWITTER_CONSUMER_KEY: ${{ secrets.MC_TWITTER_CONSUMER_KEY }}
MC_TWITTER_CONSUMER_SECRET: ${{ secrets.MC_TWITTER_CONSUMER_SECRET }}