diff --git a/.github/workflows/data.yml b/.github/workflows/data.yml index 48555b364..ac91395d5 100644 --- a/.github/workflows/data.yml +++ b/.github/workflows/data.yml @@ -4,7 +4,7 @@ on: workflow_call: outputs: path: - value: ${{ jobs.path.outputs.path }} + value: ${{ env.DATA_PATH }} crds_path: value: ${{ jobs.crds.outputs.path }} crds_server: @@ -19,21 +19,19 @@ on: schedule: - cron: "42 4 * * 3" +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + DATA_PATH: /tmp/data + jobs: - path: - name: set data path - runs-on: ubuntu-latest - outputs: - path: ${{ steps.path.outputs.path }} - steps: - - id: path - run: echo "path=/tmp/data" >> $GITHUB_OUTPUT crds: - needs: [ path ] name: retrieve current CRDS context runs-on: ubuntu-latest env: - CRDS_PATH: ${{ needs.path.outputs.path }}/crds + CRDS_PATH: ${{ env.DATA_PATH }}/crds CRDS_SERVER_URL: https://roman-crds.stsci.edu OBSERVATORY: roman outputs: @@ -50,7 +48,6 @@ jobs: # Get default CRDS_CONTEXT without installing crds client # See https://hst-crds.stsci.edu/static/users_guide/web_services.html#generic-request download_webbpsf_data: - needs: [ path ] if: (github.repository == 'spacetelescope/romancal' && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'update webbpsf data'))) name: download and cache WebbPSF data runs-on: ubuntu-latest @@ -66,7 +63,7 @@ jobs: wget ${{ env.WEBBPSF_DATA_URL }} -O tmp/webbpsf-data.tar.gz echo "hash=$( shasum tmp/webbpsf-data.tar.gz | cut -d ' ' -f 1 )" >> $GITHUB_OUTPUT - id: cache_path - run: echo "path=${{ needs.path.outputs.path }}/webbpsf-data" >> $GITHUB_OUTPUT + run: echo "path=${{ env.DATA_PATH }}/webbpsf-data" >> $GITHUB_OUTPUT - id: cache_download name: check downloaded data against the existing cache uses: actions/cache@v3 @@ -76,10 +73,10 @@ jobs: - if: ${{ steps.cache_download.outputs.cache-hit != 'true' }} name: extract data to cache directory run: | - mkdir -p ${{ needs.path.outputs.path }} - tar -xzvf tmp/webbpsf-data.tar.gz -C ${{ needs.path.outputs.path }} + mkdir -p ${{ env.DATA_PATH }} + tar -xzvf tmp/webbpsf-data.tar.gz -C ${{ env.DATA_PATH }} retrieve_webbpsf_data_hash: - needs: [ path, download_webbpsf_data ] + needs: [ download_webbpsf_data ] # run regardless if `download_webbpsf_data' succeeds or is skipped if: always() && (needs.download_webbpsf_data.result == 'success' || needs.download_webbpsf_data.result == 'skipped') name: retrieve hash of cached WebbPSF data @@ -91,7 +88,7 @@ jobs: hash: ${{ steps.retrieve_hash.outputs.hash }} steps: - id: cache_path - run: echo "path=${{ needs.path.outputs.path }}/webbpsf-data" >> $GITHUB_OUTPUT + run: echo "path=${{ env.DATA_PATH }}/webbpsf-data" >> $GITHUB_OUTPUT - id: retrieve_hash name: retrieve data hash of latest cache key run: |