diff --git a/.github/workflows/1_create_release_pr.yml b/.github/workflows/1_create_release_pr.yml index 9f537369f4d..10a700b7ab4 100644 --- a/.github/workflows/1_create_release_pr.yml +++ b/.github/workflows/1_create_release_pr.yml @@ -21,7 +21,7 @@ jobs: uses: cylc/release-actions/stage-1/sanitize-inputs@v1 - name: Checkout repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ env.BASE_REF }} fetch-depth: 0 # need to fetch all commits to check contributors @@ -30,7 +30,7 @@ jobs: uses: cylc/release-actions/check-shortlog@v1 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.x' @@ -43,15 +43,14 @@ jobs: init-file: 'cylc/flow/__init__.py' pypi-package-name: 'cylc-flow' - - name: Update "released on" date in changelog - continue-on-error: true - uses: cylc/release-actions/stage-1/update-changelog-release-date@v1 - with: - changelog-file: 'CHANGES.md' - - name: Test build uses: cylc/release-actions/build-python-package@v1 + - name: Generate changelog + run: | + python3 -m pip install -q towncrier + towncrier build --yes + - name: Create pull request uses: cylc/release-actions/stage-1/create-release-pr@v1 with: diff --git a/.github/workflows/2_auto_publish_release.yml b/.github/workflows/2_auto_publish_release.yml index d3886eedddf..f76f8afde2d 100644 --- a/.github/workflows/2_auto_publish_release.yml +++ b/.github/workflows/2_auto_publish_release.yml @@ -22,12 +22,12 @@ jobs: steps: - name: Checkout repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ env.MERGE_SHA }} - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.x' @@ -38,7 +38,7 @@ jobs: uses: cylc/release-actions/build-python-package@v1 - name: Publish distribution to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.7 + uses: pypa/gh-action-pypi-publish@v1.8.11 with: user: __token__ # uses the API token feature of PyPI - least permissions possible password: ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/bash.yml b/.github/workflows/bash.yml index cf0e49c362b..eb34e3ad76d 100644 --- a/.github/workflows/bash.yml +++ b/.github/workflows/bash.yml @@ -44,7 +44,7 @@ jobs: - '5.0' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run Docker container run: | @@ -85,7 +85,7 @@ jobs: - name: Upload artifact if: failure() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: Upload cylc-run artifact + name: 'cylc-run (bash-${{ matrix.bash-version }})' path: cylc-run diff --git a/.github/workflows/branch_sync.yml b/.github/workflows/branch_sync.yml index c4334e465a3..92475094217 100644 --- a/.github/workflows/branch_sync.yml +++ b/.github/workflows/branch_sync.yml @@ -4,117 +4,17 @@ on: push: branches: - '8.*.x' + schedule: + - cron: '33 04 * * 1-5' # 04:33 UTC Mon-Fri workflow_dispatch: inputs: - branch: + head_branch: description: Branch to merge into master required: true jobs: sync: - runs-on: ubuntu-latest - timeout-minutes: 5 - env: - BRANCH: ${{ inputs.branch || github.ref_name }} - steps: - - name: Check branch name - shell: python - run: | - import os - import sys - - branch = os.environ['BRANCH'].strip() - if not branch: - sys.exit("::error::Branch name cannot be empty") - if branch.endswith('deconflict'): - sys.exit("::error::Do not run this workflow for already-created deconflict branches") - - with open(os.environ['GITHUB_ENV'], 'a') as F: - print(f'BRANCH={branch}', file=F) - print(f'DECONFLICT_BRANCH={branch}-deconflict', file=F) - - - name: Check for existing PR - id: check-pr - shell: python - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - import os - import json - import subprocess - import sys - - for env_var in ('BRANCH', 'DECONFLICT_BRANCH'): - branch = os.environ[env_var] - cmd = f'gh pr list -B master -H {branch} -s open --json url -R ${{ github.repository }}' - ret = subprocess.run( - cmd, shell=True, capture_output=True, text=True - ) - print(ret.stdout) - if ret.stderr: - print(f"::error::{ret.stderr}") - if ret.returncode: - sys.exit(ret.returncode) - if json.loads(ret.stdout): - print(f"::notice::Found existing PR for {branch}") - sys.exit(0) - - print("No open PRs found") - with open(os.environ['GITHUB_OUTPUT'], 'a') as f: - print('continue=true', file=f) - - - name: Checkout - if: steps.check-pr.outputs.continue - uses: actions/checkout@v3 - with: - fetch-depth: 0 - ref: master - - - name: Configure git - if: steps.check-pr.outputs.continue - uses: cylc/release-actions/configure-git@v1 - - - name: Attempt merge - id: merge - if: steps.check-pr.outputs.continue - continue-on-error: true - run: git merge "origin/${BRANCH}" - - - name: Diff - id: diff - if: steps.merge.outcome == 'success' - run: | - if [[ "$(git rev-parse HEAD)" == "$(git rev-parse origin/master)" ]]; then - echo "::notice::master is up to date with $BRANCH" - exit 0 - fi - if git diff HEAD^ --exit-code --stat; then - echo "::notice::No diff between master and $BRANCH" - exit 0 - fi - echo "continue=true" >> $GITHUB_OUTPUT - - - name: Create deconflict branch - if: steps.merge.outcome == 'failure' - run: | - git merge --abort - git checkout -b "$DECONFLICT_BRANCH" "origin/${BRANCH}" - git push origin "$DECONFLICT_BRANCH" - echo "BRANCH=${DECONFLICT_BRANCH}" >> $GITHUB_ENV - - - name: Open PR - if: steps.merge.outcome == 'failure' || steps.diff.outputs.continue - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - BODY: | - Please do a **normal merge**, not squash merge - - --- - - Triggered by `${{ github.event_name }}` - run: | - gh pr create --head "$BRANCH" \ - --title "🤖 Merge ${BRANCH} into master" \ - --body "$BODY" - - gh pr edit "$BRANCH" --add-label "sync" || true + uses: cylc/release-actions/.github/workflows/branch-sync.yml@v1 + with: + head_branch: ${{ inputs.head_branch }} + secrets: inherit diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 041b6e50a70..d1d76b4e4d1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,10 +24,10 @@ jobs: python: '3.7' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} diff --git a/.github/workflows/shortlog.yml b/.github/workflows/shortlog.yml index 7998d92ad3f..25e2b0427e3 100644 --- a/.github/workflows/shortlog.yml +++ b/.github/workflows/shortlog.yml @@ -14,7 +14,7 @@ jobs: timeout-minutes: 10 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # need to fetch all commits to check contributors diff --git a/.github/workflows/test_conda-build.yml b/.github/workflows/test_conda-build.yml index a077e798366..91d1ac2ad6b 100644 --- a/.github/workflows/test_conda-build.yml +++ b/.github/workflows/test_conda-build.yml @@ -19,7 +19,7 @@ jobs: timeout-minutes: 10 steps: - name: checkout cylc-flow - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: build conda env run: | diff --git a/.github/workflows/test_fast.yml b/.github/workflows/test_fast.yml index c4fc5c3308c..515a05222a4 100644 --- a/.github/workflows/test_fast.yml +++ b/.github/workflows/test_fast.yml @@ -17,21 +17,31 @@ jobs: runs-on: ${{ matrix.os }} timeout-minutes: 20 strategy: - fail-fast: false # Don't let a failed MacOS run stop the Ubuntu runs + fail-fast: false # don't stop on first failure matrix: os: ['ubuntu-latest'] - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] + python-version: ['3.7', '3.8', '3.10', '3.11', '3'] include: - - os: 'macos-latest' - python-version: '3.7' + # mac os test + - os: 'macos-13' + python-version: '3.7' # oldest supported version + + # non-utc timezone test + - os: 'ubuntu-latest' + python-version: '3.9' # not the oldest, not the most recent version + time-zone: 'XXX-09:35' + env: + # Use non-UTC time zone + TZ: ${{ matrix.time-zone }} PYTEST_ADDOPTS: --cov --cov-append -n 5 --color=yes + steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Configure Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -39,7 +49,7 @@ jobs: if: startsWith(matrix.os, 'ubuntu') run: | sudo apt-get update - sudo apt-get install -y shellcheck sqlite3 + sudo apt-get install -y sqlite3 - name: Install run: | @@ -48,31 +58,10 @@ jobs: - name: Configure git # Needed by the odd test uses: cylc/release-actions/configure-git@v1 - - name: Style - if: startsWith(matrix.os, 'ubuntu') - run: | - flake8 - etc/bin/shellchecker - - - name: Typing - if: startsWith(matrix.os, 'ubuntu') - run: mypy - - - name: Doctests - timeout-minutes: 4 - run: | - pytest cylc/flow - - name: Unit Tests - timeout-minutes: 4 + timeout-minutes: 5 run: | - pytest tests/unit - - - name: Bandit - if: ${{ matrix.python-version == '3.7' }} - # https://github.com/PyCQA/bandit/issues/658 - run: | - bandit -r --ini .bandit cylc/flow + pytest cylc/flow tests/unit - name: Integration Tests timeout-minutes: 6 @@ -81,7 +70,7 @@ jobs: - name: Upload failed tests artifact if: failure() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cylc-run (${{ matrix.os }} py-${{ matrix.python-version }}) path: ~/cylc-run/ @@ -92,15 +81,53 @@ jobs: coverage report - name: Upload coverage artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: coverage_${{ matrix.os }}_py-${{ matrix.python-version }} path: coverage.xml retention-days: 7 + lint: + runs-on: 'ubuntu-latest' + timeout-minutes: 10 + steps: + - name: Apt-Get Install + run: | + sudo apt-get update + sudo apt-get install -y shellcheck + + - name: Checkout + uses: actions/checkout@v4 + + # note: exclude python 3.10+ from mypy checks as these produce false + # positives in installed libraries for python 3.7 + - name: Configure Python + uses: actions/setup-python@v5 + with: + python-version: 3.9 + + - name: Install + run: | + pip install -e ."[tests]" + + - name: Flake8 + run: flake8 + + - name: Bandit + run: | + bandit -r --ini .bandit cylc/flow + + - name: Shellchecker + run: etc/bin/shellchecker + + - name: MyPy + run: mypy + + - name: Towncrier + run: towncrier build --draft + - name: Linkcheck - if: startsWith(matrix.python-version, '3.10') - run: pytest -m linkcheck --dist=load tests/unit + run: pytest -m linkcheck --dist=load --color=yes -n 10 tests/unit/test_links.py codecov: needs: test @@ -108,10 +135,10 @@ jobs: timeout-minutes: 2 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download coverage artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 - name: Codecov upload uses: codecov/codecov-action@v3 diff --git a/.github/workflows/test_functional.yml b/.github/workflows/test_functional.yml index 2c3915b6caf..ed411c2eae8 100644 --- a/.github/workflows/test_functional.yml +++ b/.github/workflows/test_functional.yml @@ -46,9 +46,9 @@ jobs: # NOTE: includes must define ALL of the matrix values include: # latest python - - name: 'py-3.11' + - name: 'py-3-latest' os: 'ubuntu-latest' - python-version: '3.11' + python-version: '3' test-base: 'tests/f' chunk: '1/4' platform: '_local_background*' @@ -74,13 +74,13 @@ jobs: platform: '_remote_background_indep_tcp _remote_at_indep_tcp' # macos - name: 'macos 1/5' - os: 'macos-latest' + os: 'macos-11' python-version: '3.7' test-base: 'tests/f' chunk: '1/5' platform: '_local_background*' - name: 'macos 2/5' - os: 'macos-latest' + os: 'macos-11' python-version: '3.7' test-base: 'tests/f' chunk: '2/5' @@ -96,10 +96,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Configure Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -108,7 +108,7 @@ jobs: run: | # install system deps brew update - brew install bash coreutils gnu-sed + brew install bash coreutils gnu-sed grep # add GNU coreutils and sed to the user PATH # (see instructions in brew install output) @@ -118,6 +118,9 @@ jobs: echo \ "/usr/local/opt/gnu-sed/libexec/gnubin" \ >> "${GITHUB_PATH}" + echo \ + "/usr/local/opt/grep/libexec/gnubin" \ + >> "${GITHUB_PATH}" # add coreutils to the bashrc too (for jobs) cat >> "${HOME}/.bashrc" <<__HERE__ @@ -248,6 +251,7 @@ jobs: -exec echo '====== {} ======' \; -exec cat '{}' \; - name: Set artifact upload name + if: always() id: uploadname run: | # artifact name cannot contain '/' characters @@ -256,7 +260,7 @@ jobs: - name: Upload failed tests artifact if: failure() && steps.test.outcome == 'failure' - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cylc-run (${{ steps.uploadname.outputs.uploadname }}) path: ~/cylc-run/ @@ -294,7 +298,7 @@ jobs: coverage report - name: Upload coverage artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: coverage_${{ steps.uploadname.outputs.uploadname }} path: coverage.xml @@ -306,10 +310,10 @@ jobs: timeout-minutes: 2 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download coverage artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 - name: Codecov upload uses: codecov/codecov-action@v3 diff --git a/.github/workflows/test_manylinux.yml b/.github/workflows/test_manylinux.yml index e3d21cee291..84e1e041283 100644 --- a/.github/workflows/test_manylinux.yml +++ b/.github/workflows/test_manylinux.yml @@ -38,13 +38,13 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Configure git # Needed by the odd test uses: cylc/release-actions/configure-git@v1 - name: Configure Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/test_tutorial_workflow.yml b/.github/workflows/test_tutorial_workflow.yml index 1e1fec1746e..7859b8588e2 100644 --- a/.github/workflows/test_tutorial_workflow.yml +++ b/.github/workflows/test_tutorial_workflow.yml @@ -26,12 +26,12 @@ jobs: timeout-minutes: 10 steps: - name: configure python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install docs/tutorial dependencies uses: cylc/cylc-doc/.github/actions/install-dependencies@master diff --git a/.github/workflows/update_copyright.yml b/.github/workflows/update_copyright.yml index 603973f4d83..878412a1a27 100644 --- a/.github/workflows/update_copyright.yml +++ b/.github/workflows/update_copyright.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Checkout repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Configure git uses: cylc/release-actions/configure-git@v1 diff --git a/CHANGES.md b/CHANGES.md index eb0fda8e34f..4dfe61f42db 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,13 +4,60 @@ List of notable changes, for a complete list of changes see the [closed milestones](https://github.com/cylc/cylc-flow/milestones?state=closed) for each release. - + -## __cylc-8.2.0 (Upcoming)__ + + +## __cylc-8.2.3 (Released 2023-11-02)__ + +### 🔧 Fixes + +[#5660](https://github.com/cylc/cylc-flow/pull/5660) - Re-worked graph n-window algorithm for better efficiency. + +[#5753](https://github.com/cylc/cylc-flow/pull/5753) - Fixed bug where execution time limit polling intervals could end up incorrectly applied + +[#5776](https://github.com/cylc/cylc-flow/pull/5776) - Ensure that submit-failed tasks are marked as incomplete (so remain visible) when running in back-compat mode. + +[#5791](https://github.com/cylc/cylc-flow/pull/5791) - fix a bug where if multiple clock triggers are set for a task only one was being satisfied. + +## __cylc-8.2.2 (Released 2023-10-05)__ + +### 🚀 Enhancements + +[#5237](https://github.com/cylc/cylc-flow/pull/5237) - Back-compat: allow workflow-state xtriggers (and the `cylc workflow-state` + command) to read Cylc 7 databases. + +### 🔧 Fixes + +[#5693](https://github.com/cylc/cylc-flow/pull/5693) - Log command issuer, if not the workflow owner, for all commands. + +[#5694](https://github.com/cylc/cylc-flow/pull/5694) - Don't fail config file parsing if current working directory does not exist. + (Note however this may not be enough to prevent file parsing commands failing + elsewhere in the Python library). + +[#5704](https://github.com/cylc/cylc-flow/pull/5704) - Fix off-by-one error in automatic upgrade of Cylc 7 "max active cycle points" to Cylc 8 "runahead limit". + +[#5708](https://github.com/cylc/cylc-flow/pull/5708) - Fix runahead limit at start-up, with recurrences that start beyond the limit. + +[#5755](https://github.com/cylc/cylc-flow/pull/5755) - Fixes an issue where submit-failed tasks could be incorrectly considered as completed rather than causing the workflow to stall. + + +## __cylc-8.2.1 (Released 2023-08-14)__ + +### 🔧 Fixes + +[#5631](https://github.com/cylc/cylc-flow/pull/5631) - Fix bug in remote clean for workflows that generated `flow.cylc` files at runtime. + +[#5650](https://github.com/cylc/cylc-flow/pull/5650) - Fix a bug preventing clean-up of finished tasks in the GUI and TUI. + +[#5685](https://github.com/cylc/cylc-flow/pull/5685) - Fix "cylc pause" command help (it targets workflows, not tasks, but was + printing task-matching documentation as well). + + +## __cylc-8.2.0 (Released 2023-07-21)__ ### Breaking Changes @@ -21,13 +68,23 @@ issue which could cause jobs to fail if this variable became too long. ### Enhancements --[#5605](https://github.com/cylc/cylc-flow/pull/5605) - A shorthand for defining --a list of strings - Before: `cylc command -s "X=['a', 'bc', 'd']"` - After: --`cylc command -z X=a,bc,d`. +[#5992](https://github.com/cylc/cylc-flow/pull/5992) - +Before trying to reload the workflow definition, the scheduler will +now wait for preparing tasks to submit, and pause the workflow. +After successful reload the scheduler will unpause the workflow. + +[#5605](https://github.com/cylc/cylc-flow/pull/5605) - Added `-z` shorthand +option for defining a list of strings: +- Before: `cylc command -s "X=['a', 'bc', 'd']"` +- After: `cylc command -z X=a,bc,d`. [#5537](https://github.com/cylc/cylc-flow/pull/5537) - Allow parameters in family names to be split, e.g. `FAM`. +[#5589](https://github.com/cylc/cylc-flow/pull/5589) - Move to workflow +directory during file parsing, to give the template processor access to +workflow files. + [#5405](https://github.com/cylc/cylc-flow/pull/5405) - Improve scan command help, and add scheduler PID to the output. @@ -42,18 +99,14 @@ Add the `-n` short option for `--workflow-name` to `cylc vip`; rename the `-n` short option for `--no-detach` to `-N`; add `-r` as a short option for `--run-name`. -[#5525](https://github.com/cylc/cylc-flow/pull/5525) - Jobs can use scripts -in `share/bin` and Python modules in `share/lib/python`. - -[#5328](https://github.com/cylc/cylc-flow/pull/5328) - -Efficiency improvements to reduce task management overheads on the Scheduler. +[#5231](https://github.com/cylc/cylc-flow/pull/5231) - stay up for a timeout +period on restarting a completed workflow, to allow for manual triggering. +[#5549](https://github.com/cylc/cylc-flow/pull/5549), [#5546](https://github.com/cylc/cylc-flow/pull/5546) - -`cylc lint` will provide a non-zero return code if any issues are identified. -This can be overridden using the new `--exit-zero` flag. - -[#5549](https://github.com/cylc/cylc-flow/pull/5549) - A large number of -enhancements to `cylc lint`: +Various enhancements to `cylc lint`: +* `cylc lint` will provide a non-zero return code if any issues are identified. + This can be overridden using the new `--exit-zero` flag. * Fix numbering of lint codes (n.b. lint codes should now be permenantly unchanging, but may have changed since Cylc 8.1.4, so `pyproject.toml` files may need updating). @@ -64,12 +117,40 @@ enhancements to `cylc lint`: * Only check for missing Jinja2 shebangs in `flow.cylc` and `suite.rc` files. + +[#5525](https://github.com/cylc/cylc-flow/pull/5525) - Jobs can use scripts +in `share/bin` and Python modules in `share/lib/python`. + ### Fixes +[#5328](https://github.com/cylc/cylc-flow/pull/5328) - +Efficiency improvements to reduce task management overheads on the Scheduler. + +[#5611](https://github.com/cylc/cylc-flow/pull/5611) - +Improve the documentation of the GraphQL schema. + +[#5616](https://github.com/cylc/cylc-flow/pull/5616) - +Improve PBS support for job IDs with trailing components. + +[#5619](https://github.com/cylc/cylc-flow/pull/5619) - +Fix an issue where the `task_pool` table in the database wasn't being updated +in a timely fashion when tasks completed. + +[#5606](https://github.com/cylc/cylc-flow/pull/5606) - +Task outputs and messages are now validated to avoid conflicts with built-in +outputs, messages, qualifiers and Cylc keywords. + +[#5614](https://github.com/cylc/cylc-flow/pull/5614) - +Fix a bug in Cylc 7 compatibility mode where tasks running in the `none` flow +(e.g. via `cylc trigger --flow=none`) would trigger downstream tasks. + [#5604](https://github.com/cylc/cylc-flow/pull/5604) - Fix a possible issue where workflows started using `cylc play --start-cycle-point` could hang during startup. +[#5573](https://github.com/cylc/cylc-flow/pull/5573) - Fix bug that ran a +queued waiting task even after removal by `cylc remove`. + [#5524](https://github.com/cylc/cylc-flow/pull/5524) - Logging includes timestamps for `cylc play` when called by `cylc vip` or `cylc vr`. @@ -84,6 +165,13 @@ mode before running pre-configure plugins. Permit commas in xtrigger arguments and fix minor issues with the parsing of xtrigger function signatures. +[#5618](https://github.com/cylc/cylc-flow/pull/5618) - +Fix a bug when rapidly issuing the same/opposite commands e.g. pausing & +resuming a workflow. + +[#5625](https://github.com/cylc/cylc-flow/pull/5625) - Exclude `setuptools` +version (v67) which results in dependency check failure with editable installs. + ## __cylc-8.1.4 (Released 2023-05-04)__ ### Fixes diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 72f63d67d03..a1bf42e6215 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,6 +36,11 @@ Feel free to ask questions on the issue or [developers chat](https://matrix.to/#/#cylc-general:matrix.org) if unsure about anything. +We use [towncrier](https://towncrier.readthedocs.io/en/stable/index.html) for +generating the changelog. Changelog entries are added by running +``` +towncrier create ..md --content "Short description" +``` ## Code Contributors @@ -59,7 +64,7 @@ requests_). - Prasanna Challuri - David Matthews - Tim Whitcomb - - (Scott Wales) + - Scott Wales - Tomek Trzeciak - Thomas Coleman - Bruno Kinoshita @@ -87,6 +92,7 @@ requests_). - John Haiducek - (Andrew Huang) - Cheng Da + - Mark Dawson (All contributors are identifiable with email addresses in the git version diff --git a/changes.d/5571.feat.md b/changes.d/5571.feat.md new file mode 100644 index 00000000000..4bda6c6af4b --- /dev/null +++ b/changes.d/5571.feat.md @@ -0,0 +1 @@ +Make workflow `CYLC_` variables available to the template processor during parsing. diff --git a/changes.d/5709.feat.md b/changes.d/5709.feat.md new file mode 100644 index 00000000000..11aeabcf81d --- /dev/null +++ b/changes.d/5709.feat.md @@ -0,0 +1 @@ +Forward arbitrary environment variables over SSH connections diff --git a/changes.d/5727.break.md b/changes.d/5727.break.md new file mode 100644 index 00000000000..06cb196216d --- /dev/null +++ b/changes.d/5727.break.md @@ -0,0 +1 @@ +Cylc now ignores `PYTHONPATH` to make it more robust to task environments which set this value. If you want to add to the Cylc environment itself, e.g. to install a Cylc extension, use `CYLC_PYTHONPATH`. \ No newline at end of file diff --git a/changes.d/5731.feat.md b/changes.d/5731.feat.md new file mode 100644 index 00000000000..b0c28a01ac1 --- /dev/null +++ b/changes.d/5731.feat.md @@ -0,0 +1 @@ +Major upgrade to `cylc tui` which now supports larger workflows and can browse installed workflows. diff --git a/changes.d/5772.feat.md b/changes.d/5772.feat.md new file mode 100644 index 00000000000..da0984a82ec --- /dev/null +++ b/changes.d/5772.feat.md @@ -0,0 +1 @@ +`cylc lint`: added a check for indentation being 4N spaces. diff --git a/changes.d/5789.fix.md b/changes.d/5789.fix.md new file mode 100644 index 00000000000..7eda67036e0 --- /dev/null +++ b/changes.d/5789.fix.md @@ -0,0 +1 @@ +Stop users changing run modes on restart. diff --git a/changes.d/5794.break.md b/changes.d/5794.break.md new file mode 100644 index 00000000000..53c5315b013 --- /dev/null +++ b/changes.d/5794.break.md @@ -0,0 +1 @@ +Remove `cylc report-timings` from automatic installation with `pip install cylc-flow[all]`. If you now wish to install it use `pip install cylc-flow[report-timings]`. `cylc report-timings` is incompatible with Python 3.12. \ No newline at end of file diff --git a/changes.d/5801.fix.md b/changes.d/5801.fix.md new file mode 100644 index 00000000000..e7fd0584090 --- /dev/null +++ b/changes.d/5801.fix.md @@ -0,0 +1 @@ +Fix traceback when using parentheses on right hand side of graph trigger. diff --git a/changes.d/5803.feat.md b/changes.d/5803.feat.md new file mode 100644 index 00000000000..a4bc0f1b898 --- /dev/null +++ b/changes.d/5803.feat.md @@ -0,0 +1 @@ +Updated 'reinstall' functionality to support multiple workflows \ No newline at end of file diff --git a/changes.d/5821.fix.md b/changes.d/5821.fix.md new file mode 100644 index 00000000000..0c6c8b7918d --- /dev/null +++ b/changes.d/5821.fix.md @@ -0,0 +1 @@ +Fixed issue where large uncommitted changes could cause `cylc install` to hang. diff --git a/changes.d/5836.break.md b/changes.d/5836.break.md new file mode 100644 index 00000000000..8c14b101f63 --- /dev/null +++ b/changes.d/5836.break.md @@ -0,0 +1 @@ +Removed the 'CYLC_TASK_DEPENDENCIES' environment variable \ No newline at end of file diff --git a/changes.d/5838.feat.md b/changes.d/5838.feat.md new file mode 100644 index 00000000000..8e9919d3a0f --- /dev/null +++ b/changes.d/5838.feat.md @@ -0,0 +1 @@ +`cylc lint`: added rule to check for `rose date` usage (should be replaced with `isodatetime`). diff --git a/changes.d/5841.fix.md b/changes.d/5841.fix.md new file mode 100644 index 00000000000..4bc41462fca --- /dev/null +++ b/changes.d/5841.fix.md @@ -0,0 +1 @@ +`cylc lint`: improved handling of S011 to not warn if the `#` is `#$` (e.g. shell base arithmetic). diff --git a/changes.d/5872.feat.md b/changes.d/5872.feat.md new file mode 100644 index 00000000000..d88b0dd8116 --- /dev/null +++ b/changes.d/5872.feat.md @@ -0,0 +1 @@ +Improvements to `cylc clean` remote timeout handling. diff --git a/changes.d/changelog-template.jinja b/changes.d/changelog-template.jinja new file mode 100644 index 00000000000..9a96512694c --- /dev/null +++ b/changes.d/changelog-template.jinja @@ -0,0 +1,13 @@ +{% if sections[""] %} +{% for category, val in definitions.items() if category in sections[""] %} +### {{ definitions[category]['name'] }} + +{% for text, pulls in sections[""][category].items() %} +{{ pulls|join(', ') }} - {{ text }} + +{% endfor %} +{% endfor %} +{% else %} +No significant changes. + +{% endif %} diff --git a/conda-environment.yml b/conda-environment.yml index 65083a25fbe..93f5cda68fc 100644 --- a/conda-environment.yml +++ b/conda-environment.yml @@ -9,14 +9,14 @@ dependencies: - graphviz # for static graphing # Note: can't pin jinja2 any higher than this until we give up on Cylc 7 back-compat - jinja2 >=3.0,<3.1 - - metomi-isodatetime >=1!3.0.0, <1!3.1.0 + - metomi-isodatetime >=1!3.0.0, <1!3.2.0 + - packaging # Constrain protobuf version for compatible Scheduler-UIS comms across hosts - - protobuf >=4.21.2,<4.22.0 + - protobuf >=4.24.4,<4.25.0 - psutil >=5.6.0 - python - pyzmq >=22 - - setuptools >=49, <67 - - importlib_metadata # [py<3.8] + - importlib_metadata >=5.0 # [py<3.12] - urwid >=2,<3 - tomli >=2 # [py<3.11] diff --git a/cylc/flow/__init__.py b/cylc/flow/__init__.py index f74e9a57cfa..d4192f72766 100644 --- a/cylc/flow/__init__.py +++ b/cylc/flow/__init__.py @@ -53,16 +53,24 @@ def environ_init(): environ_init() -__version__ = '8.2.0.dev' +__version__ = '8.3.0.dev' def iter_entry_points(entry_point_name): """Iterate over Cylc entry points.""" - import pkg_resources + import sys + if sys.version_info[:2] > (3, 11): + from importlib.metadata import entry_points + else: + # BACK COMPAT: importlib_metadata + # importlib.metadata was added in Python 3.8. The required interfaces + # were completed by 3.12. For lower versions we must use the + # importlib_metadata backport. + # FROM: Python 3.7 + # TO: Python: 3.12 + from importlib_metadata import entry_points yield from ( entry_point - for entry_point in pkg_resources.iter_entry_points(entry_point_name) - # Filter out the cylc namespace as it should be empty. - # All cylc packages should take the form cylc- - if entry_point.dist.key != 'cylc' + # for entry_point in entry_points()[entry_point_name] + for entry_point in entry_points().select(group=entry_point_name) ) diff --git a/cylc/flow/async_util.py b/cylc/flow/async_util.py index 73826ffe3ce..1e103615b33 100644 --- a/cylc/flow/async_util.py +++ b/cylc/flow/async_util.py @@ -17,6 +17,7 @@ import asyncio from functools import partial, wraps +from inspect import signature import os from pathlib import Path from typing import List, Union @@ -262,10 +263,22 @@ def __str__(self): def __repr__(self): return _AsyncPipe(self.func).__repr__() + @property + def __name__(self): + return self.func.__name__ + @property def __doc__(self): return self.func.__doc__ + @property + def __signature__(self): + return signature(self.func) + + @property + def __annotations__(self): + return self.func.__annotations__ + def pipe(func=None, preproc=None): """An asynchronous pipe implementation in pure Python. diff --git a/cylc/flow/cfgspec/globalcfg.py b/cylc/flow/cfgspec/globalcfg.py index c50845310db..2d39ad74829 100644 --- a/cylc/flow/cfgspec/globalcfg.py +++ b/cylc/flow/cfgspec/globalcfg.py @@ -22,7 +22,7 @@ from typing import List, Optional, Tuple, Any, Union from contextlib import suppress -from pkg_resources import parse_version +from packaging.version import Version from cylc.flow import LOG from cylc.flow import __version__ as CYLC_VERSION @@ -263,6 +263,13 @@ .. versionchanged:: 8.0.0 {REPLACES}``abort on inactivity``. + ''', + 'restart timeout': ''' + How long to wait for intervention on restarting a completed workflow. + The timer stops if any task is triggered. + + .. versionadded:: 8.2.0 + ''' } @@ -445,7 +452,8 @@ anywhere the scheduler environment ``$PATH``. They should return quickly. Multiple event handlers can be specified as a list of command line templates. -For supported template variables see :ref:`task_event_template_variables`. +For supported template variables see :ref:`user_guide.runtime.\ +event_handlers.task_event_handling.template_variables`. Python template substitution syntax is used: `String Formatting Operations in the Python documentation ][events]handler events`. + + Information about the event can be provided to the command + using :ref:`user_guide.runtime.event_handlers.\ +task_event_handling.template_variables`. + For more information, see + :ref:`user_guide.runtime.task_event_handling`. + + For workflow events, see + :ref:`user_guide.scheduler.workflow_event_handling`. + + Example:: + + echo %(event)s occurred in %(workflow)s >> my-log-file + ''', 'execution timeout': ''' If a task has not finished after the specified interval, the execution timeout event handler(s) will be called. ''', 'handler events': ''' + A list of events for which :cylc:conf:`[..]handlers` are run. + Specify the events for which the general task event handlers :cylc:conf:`flow.cylc[runtime][][events]handlers` should be invoked. + See :ref:`user_guide.runtime.task_event_handling` for more information. + Example:: submission failed, failed @@ -582,6 +610,26 @@ def default_for( Prior to Cylc 8, ``global.cylc`` was named ``global.rc``, but that name is no longer supported. ''') as SPEC: + with Conf('hub', desc=''' + Configure the public URL of Jupyter Hub. + + If configured, the ``cylc gui`` command will open a web browser at this + location rather than starting a standalone server when called. + + + .. seealso:: + + * The cylc hub :ref:`architecture-reference` for fuller details. + * :ref:`UI_Server_config` for practical details. + + '''): + Conf('url', VDR.V_STRING, '', desc=''' + .. versionadded:: 8.3.0 + + Where Jupyter Hub is used a url can be provided for routing on + execution of ``cylc gui`` command. + ''') + with Conf('scheduler', desc=( default_for(SCHEDULER_DESCR, "[scheduler]", section=True) )): @@ -839,6 +887,8 @@ def default_for( vdr_type = VDR.V_INTERVAL if item == "stall timeout": default = DurationFloat(3600) + elif item == "restart timeout": + default = DurationFloat(120) else: default = None Conf(item, vdr_type, default, desc=desc) @@ -1024,11 +1074,11 @@ def default_for( Alternative location for the run dir. If specified, the workflow run directory will - be created in ``/cylc-run/`` + be created in ``/cylc-run/`` and a symbolic link will be created from - ``$HOME/cylc-run/``. + ``$HOME/cylc-run/``. If not specified the workflow run directory will be created - in ``$HOME/cylc-run/``. + in ``$HOME/cylc-run/``. All the workflow files and the ``.service`` directory get installed into this directory. @@ -1038,11 +1088,11 @@ def default_for( Alternative location for the log dir. If specified the workflow log directory will be created in - ``/cylc-run//log`` and a + ``/cylc-run//log`` and a symbolic link will be created from - ``$HOME/cylc-run//log``. If not specified + ``$HOME/cylc-run//log``. If not specified the workflow log directory will be created in - ``$HOME/cylc-run//log``. + ``$HOME/cylc-run//log``. .. versionadded:: 8.0.0 """) @@ -1050,11 +1100,11 @@ def default_for( Alternative location for the share dir. If specified the workflow share directory will be - created in ``/cylc-run//share`` + created in ``/cylc-run//share`` and a symbolic link will be created from - ``<$HOME/cylc-run//share``. If not specified + ``<$HOME/cylc-run//share``. If not specified the workflow share directory will be created in - ``$HOME/cylc-run//share``. + ``$HOME/cylc-run//share``. .. versionadded:: 8.0.0 """) @@ -1063,11 +1113,11 @@ def default_for( If specified the workflow share/cycle directory will be created in - ``/cylc-run//share/cycle`` + ``/cylc-run//share/cycle`` and a symbolic link will be created from - ``$HOME/cylc-run//share/cycle``. If not + ``$HOME/cylc-run//share/cycle``. If not specified the workflow share/cycle directory will be - created in ``$HOME/cylc-run//share/cycle``. + created in ``$HOME/cylc-run//share/cycle``. .. versionadded:: 8.0.0 """) @@ -1075,11 +1125,11 @@ def default_for( Alternative directory for the work dir. If specified the workflow work directory will be created in - ``/cylc-run//work`` and a + ``/cylc-run//work`` and a symbolic link will be created from - ``$HOME/cylc-run//work``. If not specified + ``$HOME/cylc-run//work``. If not specified the workflow work directory will be created in - ``$HOME/cylc-run//work``. + ``$HOME/cylc-run//work``. .. versionadded:: 8.0.0 """) @@ -1172,6 +1222,9 @@ def default_for( {PLATFORM_REPLACES.format("[job]batch system")} ''') + replaces = PLATFORM_REPLACES.format( + "[job]batch submit command template" + ) Conf('job runner command template', VDR.V_STRING, desc=f''' Set the command used by the chosen job runner. @@ -1180,9 +1233,7 @@ def default_for( .. versionadded:: 8.0.0 - {PLATFORM_REPLACES.format( - "[job]batch submit command template" - )} + {replaces} ''') Conf('shell', VDR.V_STRING, '/bin/bash', desc=''' @@ -1415,6 +1466,8 @@ def default_for( {REPLACES}``global.rc[hosts][]retrieve job logs command``. ''') + replaces = PLATFORM_REPLACES.format( + "[remote]retrieve job logs max size") Conf('retrieve job logs max size', VDR.V_STRING, desc=f''' {LOG_RETR_SETTINGS['retrieve job logs max size']} @@ -1422,9 +1475,10 @@ def default_for( {REPLACES}``global.rc[hosts][]retrieve job logs max size``. - {PLATFORM_REPLACES.format( - "[remote]retrieve job logs max size")} + {replaces} ''') + replaces = PLATFORM_REPLACES.format( + "[remote]retrieve job logs retry delays") Conf('retrieve job logs retry delays', VDR.V_INTERVAL_LIST, desc=f''' {LOG_RETR_SETTINGS['retrieve job logs retry delays']} @@ -1433,8 +1487,7 @@ def default_for( {REPLACES}``global.rc[hosts][]retrieve job logs retry delays``. - {PLATFORM_REPLACES.format( - "[remote]retrieve job logs retry delays")} + {replaces} ''') Conf('tail command template', VDR.V_STRING, 'tail -n +1 --follow=name %(filename)s', @@ -1602,6 +1655,14 @@ def default_for( .. versionadded:: 8.0.0 ''') + Conf('ssh forward environment variables', VDR.V_STRING_LIST, '', + desc=''' + A list containing the names of the environment variables to + forward with SSH connections to the workflow host from + the host running 'cylc play' + + .. versionadded:: 8.3.0 + ''') with Conf('selection', desc=''' How to select a host from the list of platform hosts. @@ -1808,8 +1869,7 @@ def get_version_hierarchy(version: str) -> List[str]: ['', '8', '8.0', '8.0.1', '8.0.1a2', '8.0.1a2.dev'] """ - smart_ver: Any = parse_version(version) - # (No type anno. yet for Version in pkg_resources.extern.packaging.version) + smart_ver = Version(version) base = [str(i) for i in smart_ver.release] hierarchy = [''] hierarchy += ['.'.join(base[:i]) for i in range(1, len(base) + 1)] diff --git a/cylc/flow/cfgspec/workflow.py b/cylc/flow/cfgspec/workflow.py index 9a94f1783d5..de919c27c0f 100644 --- a/cylc/flow/cfgspec/workflow.py +++ b/cylc/flow/cfgspec/workflow.py @@ -401,7 +401,7 @@ def get_script_common_text(this: str, example: Optional[str] = None): # differentiate between not set vs set to empty default = None elif item.endswith("handlers"): - desc = desc + '\n\n' + dedent(rf''' + desc = desc + '\n\n' + dedent(f''' Examples: .. code-block:: cylc @@ -413,9 +413,9 @@ def get_script_common_text(this: str, example: Optional[str] = None): {item} = echo %(workflow)s # configure multiple event handlers - {item} = \ - 'echo %(workflow)s, %(event)s', \ - 'my_exe %(event)s %(message)s' \ + {item} = \\ + 'echo %(workflow)s, %(event)s', \\ + 'my_exe %(event)s %(message)s' \\ 'curl -X PUT -d event=%(event)s host:port' ''') elif item.startswith("abort on"): @@ -605,7 +605,7 @@ def get_script_common_text(this: str, example: Optional[str] = None): The stop cycle point can be overridden on the command line using ``cylc play --stop-cycle-point=POINT`` - .. note: + .. note:: Not to be confused with :cylc:conf:`[..]final cycle point`: There can be more graph beyond this point, but you are @@ -747,25 +747,29 @@ def get_script_common_text(this: str, example: Optional[str] = None): ``cylc ext-trigger`` command. ''') Conf('clock-expire', VDR.V_STRING_LIST, desc=''' - Don't submit jobs if they are very late in wall clock time. + Don't submit jobs if they are too late in wall clock time. Clock-expire tasks enter the ``expired`` state and skip job submission if too far behind the wall clock when they become ready to run. - The expiry time is specified as an offset from - wall-clock time; typically it should be negative - see - :ref:`ClockExpireTasks`. - - .. note:: - The offset: + The expiry time is specified as an offset from the task's + cycle point. The offset: * May be positive or negative - * The offset may be omitted if it is zero. + * May be omitted if it is zero - Example: + .. seealso:: + + :ref:`ClockExpireTasks`. + + Examples: - ``PT1H`` - 1 hour + ``foo(PT1H)`` - expire task ``foo`` if the current wall clock + time has reached 1 hour after the task's cycle point. + + ``bar(-PT5M)`` - expire task ``bar`` if the current wall clock + time has reached 5 minutes *before* the task's cycle point. ''') Conf('sequential', VDR.V_STRING_LIST, desc=''' A list of tasks which automatically depend on their own @@ -1256,10 +1260,17 @@ def get_script_common_text(this: str, example: Optional[str] = None): - ``all`` - all instance of the task will fail - ``2017-08-12T06, 2017-08-12T18`` - these instances of the task will fail + + If you set :cylc:conf:`[..][..]execution retry delays` + the second attempt will succeed unless you set + :cylc:conf:`[..]fail try 1 only = False`. ''') Conf('fail try 1 only', VDR.V_BOOLEAN, True, desc=''' If ``True`` only the first run of the task instance will fail, otherwise retries will fail too. + + Task instances must be set to fail by + :cylc:conf:`[..]fail cycle points`. ''') Conf('disable task event handlers', VDR.V_BOOLEAN, True, desc=''' @@ -1530,7 +1541,7 @@ def get_script_common_text(this: str, example: Optional[str] = None): The items in this section reflect options and defaults of the ``cylc workflow-state`` command, - except that the target workflow name and the + except that the target workflow ID and the ``--task``, ``--cycle``, and ``--status`` options are taken from the graph notation. @@ -1594,7 +1605,7 @@ def get_script_common_text(this: str, example: Optional[str] = None): You can also specify job environment templates here for :ref:`parameterized tasks `. '''): - Conf('', VDR.V_STRING, desc=''' + Conf('', VDR.V_STRING, desc=r''' A custom user defined variable for a task execution environment. @@ -1635,6 +1646,32 @@ def get_script_common_text(this: str, example: Optional[str] = None): MYITEM = %(item)s MYFILE = /path/to/%(i)03d/%(item)s + .. note:: + + As with other Cylc configurations, leading or trailing + whitespace will be stripped, so the following two + examples are equivalent: + + .. list-table:: + :class: grid-table + + * - .. code-block:: cylc + + [environment] + FOO = " a " + BAR = """ + $(foo bar baz) + """ + - .. code-block:: cylc + + [environment] + FOO = "a" + BAR = "$(foo bar baz)" + + If leading or trailing whitespace is required, consider + using the ``\0`` escape character, or set the variable + in :cylc:conf:`[..][..]env-script`. + .. versionchanged:: 7.8.7/7.9.2 Parameter environment templates (previously in @@ -1655,10 +1692,25 @@ def get_script_common_text(this: str, example: Optional[str] = None): this section (:ref:`MessageTriggers`) '''): Conf('', VDR.V_STRING, desc=''' - Task output messages (:ref:`MessageTriggers`). + Define custom task outputs (aka :ref:`MessageTriggers`). + + :term:`Custom outputs ` allow you to extend + the built-in task outputs e.g. ``succeeded`` and ``failed`` + in order to provide more detailed information about task + state. Custom outputs can be used to express dependencies + in the graph as with built-in outputs. + + Custom outputs are defined in the form: - The item name is used to select the custom output - message in graph trigger notation. + .. code-block:: cylc + + output = message + + Where ``output`` is the name of the output as it is used in + the graph, and ``message`` is the task message sent by + the ``cylc message`` command which tells Cylc that this + output has been completed. See :ref:`MessageTriggers` for + more details. Examples: @@ -1667,10 +1719,15 @@ def get_script_common_text(this: str, example: Optional[str] = None): out1 = "sea state products ready" out2 = "NWP restart files completed" - Task outputs are validated by - :py:class:`cylc.flow.unicode_rules.TaskOutputValidator`. + Custom outputs must satisfy these rules: .. autoclass:: cylc.flow.unicode_rules.TaskOutputValidator + :noindex: + + Task messages must satisfy these rules: + + .. autoclass:: cylc.flow.unicode_rules.TaskMessageValidator + :noindex: ''') with Conf('parameter environment templates', desc=''' @@ -1737,7 +1794,7 @@ def upg(cfg, descr): ['cylc', 'simulation', 'disable suite event handlers']) u.obsolete('8.0.0', ['cylc', 'simulation'], is_section=True) u.obsolete('8.0.0', ['visualization'], is_section=True) - u.obsolete('8.0.0', ['scheduling', 'spawn to max active cycle points']), + u.obsolete('8.0.0', ['scheduling', 'spawn to max active cycle points']) u.deprecate( '8.0.0', ['cylc', 'task event mail interval'], @@ -1798,7 +1855,10 @@ def upg(cfg, descr): '8.0.0', ['scheduling', 'max active cycle points'], ['scheduling', 'runahead limit'], - cvtr=converter(lambda x: f'P{x}' if x != '' else '', '"n" -> "Pn"'), + cvtr=converter( + lambda x: f'P{int(x) - 1}' if x != '' else '', + '"{old}" -> "{new}"' + ), silent=cylc.flow.flags.cylc7_back_compat, ) u.deprecate( diff --git a/cylc/flow/clean.py b/cylc/flow/clean.py index 8d980c6cfe6..c4434dc9244 100644 --- a/cylc/flow/clean.py +++ b/cylc/flow/clean.py @@ -103,15 +103,15 @@ async def get_contained_workflows(partial_id) -> List[str]: ) -def _clean_check(opts: 'Values', reg: str, run_dir: Path) -> None: +def _clean_check(opts: 'Values', id_: str, run_dir: Path) -> None: """Check whether a workflow can be cleaned. Args: - reg: Workflow name. + id_: Workflow name. run_dir: Path to the workflow run dir on the filesystem. """ - validate_workflow_name(reg) - reg = os.path.normpath(reg) + validate_workflow_name(id_) + id_ = os.path.normpath(id_) # Thing to clean must be a dir or broken symlink: if not run_dir.is_dir() and not run_dir.is_symlink(): raise FileNotFoundError(f"No directory to clean at {run_dir}") @@ -124,10 +124,10 @@ def _clean_check(opts: 'Values', reg: str, run_dir: Path) -> None: # about contact file. return try: - detect_old_contact_file(reg) + detect_old_contact_file(id_) except ContactFileExists as exc: raise ServiceFileError( - f"Cannot clean running workflow {reg}.\n\n{exc}" + f"Cannot clean running workflow {id_}.\n\n{exc}" ) @@ -187,7 +187,7 @@ def init_clean(id_: str, opts: 'Values') -> None: if platform_names and platform_names != {'localhost'}: remote_clean( - id_, platform_names, opts.rm_dirs, opts.remote_timeout + id_, platform_names, opts.remote_timeout, opts.rm_dirs ) if not opts.remote_only: @@ -336,38 +336,40 @@ def _clean_using_glob( def remote_clean( - reg: str, + id_: str, platform_names: Iterable[str], + timeout: str, rm_dirs: Optional[List[str]] = None, - timeout: str = '120' ) -> None: - """Run subprocesses to clean workflows on remote install targets + """Run subprocesses to clean a workflow on its remote install targets (skip localhost), given a set of platform names to look up. Args: - reg: Workflow name. + id_: Workflow name. platform_names: List of platform names to look up in the global config, in order to determine the install targets to clean on. + timeout: ISO 8601 duration or number of seconds to wait before + cancelling. rm_dirs: Sub dirs to remove instead of the whole run dir. - timeout: Number of seconds to wait before cancelling. """ try: install_targets_map = ( get_install_target_to_platforms_map(platform_names)) except PlatformLookupError as exc: raise PlatformLookupError( - f"Cannot clean {reg} on remote platforms as the workflow database " + f"Cannot clean {id_} on remote platforms as the workflow database " f"is out of date/inconsistent with the global config - {exc}") + queue: Deque[RemoteCleanQueueTuple] = deque() remote_clean_cmd = partial( - _remote_clean_cmd, reg=reg, rm_dirs=rm_dirs, timeout=timeout + _remote_clean_cmd, id_=id_, rm_dirs=rm_dirs, timeout=timeout ) for target, platforms in install_targets_map.items(): if target == get_localhost_install_target(): continue shuffle(platforms) LOG.info( - f"Cleaning {reg} on install target: " + f"Cleaning {id_} on install target: " f"{platforms[0]['install target']}" ) # Issue ssh command: @@ -376,7 +378,7 @@ def remote_clean( remote_clean_cmd(platform=platforms[0]), target, platforms ) ) - failed_targets: Dict[str, PlatformError] = {} + failed_targets: Dict[str, Union[PlatformError, str]] = {} # Handle subproc pool results almost concurrently: while queue: item = queue.popleft() @@ -387,7 +389,12 @@ def remote_clean( out, err = item.proc.communicate() if out: LOG.info(f"[{item.install_target}]\n{out}") - if ret_code: + if ret_code == 124: + failed_targets[item.install_target] = ( + f"cylc clean timed out after {timeout}s. You can increase " + "this timeout using the --timeout option." + ) + elif ret_code: this_platform = item.platforms.pop(0) excp = PlatformError( PlatformError.MSG_TIDY, @@ -415,15 +422,15 @@ def remote_clean( LOG.debug(f"[{item.install_target}]\n{err}") sleep(0.2) if failed_targets: - for target, excp in failed_targets.items(): + for target, info in failed_targets.items(): LOG.error( - f"Could not clean {reg} on install target: {target}\n{excp}" + f"Could not clean {id_} on install target: {target}\n{info}" ) - raise CylcError(f"Remote clean failed for {reg}") + raise CylcError(f"Remote clean failed for {id_}") def _remote_clean_cmd( - reg: str, + id_: str, platform: Dict[str, Any], rm_dirs: Optional[List[str]], timeout: str @@ -433,7 +440,7 @@ def _remote_clean_cmd( Call "cylc clean --local-only" over ssh and return the subprocess. Args: - reg: Workflow name. + id_: Workflow name. platform: Config for the platform on which to remove the workflow. rm_dirs: Sub dirs to remove instead of the whole run dir. timeout: Number of seconds to wait before cancelling the command. @@ -443,10 +450,10 @@ def _remote_clean_cmd( """ LOG.debug( - f"Cleaning {reg} on install target: {platform['install target']} " + f"Cleaning {id_} on install target: {platform['install target']} " f"(using platform: {platform['name']})" ) - cmd = ['clean', '--local-only', reg] + cmd = ['clean', '--local-only', '--no-scan', id_] if rm_dirs is not None: for item in rm_dirs: cmd.extend(['--rm', item]) diff --git a/cylc/flow/command_polling.py b/cylc/flow/command_polling.py index b36709e24aa..dcf186edbd9 100644 --- a/cylc/flow/command_polling.py +++ b/cylc/flow/command_polling.py @@ -28,17 +28,14 @@ def add_to_cmd_options(cls, parser, d_interval=60, d_max_polls=10): """Add command line options for commands that can do polling""" parser.add_option( "--max-polls", - help="Maximum number of polls (default " + str(d_max_polls) + ").", + help=r"Maximum number of polls (default: %default).", metavar="INT", action="store", dest="max_polls", default=d_max_polls) parser.add_option( "--interval", - help=( - "Polling interval in seconds (default " + str(d_interval) + - ")." - ), + help=r"Polling interval in seconds (default: %default).", metavar="SECS", action="store", dest="interval", diff --git a/cylc/flow/config.py b/cylc/flow/config.py index 6e0de54c2e6..d80456266bf 100644 --- a/cylc/flow/config.py +++ b/cylc/flow/config.py @@ -59,16 +59,16 @@ from cylc.flow.cycling.iso8601 import ingest_time, ISO8601Interval from cylc.flow.exceptions import ( CylcError, - WorkflowConfigError, + InputError, IntervalParsingError, - TaskDefError, ParamExpandError, - InputError + TaskDefError, + WorkflowConfigError, ) import cylc.flow.flags from cylc.flow.graph_parser import GraphParser from cylc.flow.listify import listify -from cylc.flow.option_parsers import verbosity_to_env +from cylc.flow.log_level import verbosity_to_env from cylc.flow.graphnode import GraphNodeParser from cylc.flow.param_expand import NameExpander from cylc.flow.parsec.exceptions import ItemNotFoundError @@ -79,8 +79,8 @@ get_cylc_run_dir, is_relative_to, ) -from cylc.flow.platforms import FORBIDDEN_WITH_PLATFORM from cylc.flow.print_tree import print_tree +from cylc.flow.simulation import configure_sim_modes from cylc.flow.subprocctx import SubFuncContext from cylc.flow.task_events_mgr import ( EventData, @@ -96,6 +96,7 @@ from cylc.flow.unicode_rules import ( TaskNameValidator, TaskOutputValidator, + TaskMessageValidator, XtriggerNameValidator, ) from cylc.flow.wallclock import ( @@ -208,6 +209,8 @@ def dequote(string): 'foo' >>> dequote('"f') '"f' + >>> dequote('f') + 'f' """ if len(string) < 2: @@ -518,7 +521,8 @@ def __init__( self.process_runahead_limit() if self.run_mode('simulation', 'dummy'): - self.configure_sim_modes() + configure_sim_modes( + self.taskdefs.values(), self.run_mode()) self.configure_workflow_state_polling_tasks() @@ -1337,68 +1341,6 @@ def configure_workflow_state_polling_tasks(self): script = "echo " + comstr + "\n" + comstr rtc['script'] = script - def configure_sim_modes(self): - """Adjust task defs for simulation and dummy mode.""" - for tdef in self.taskdefs.values(): - # Compute simulated run time by scaling the execution limit. - rtc = tdef.rtconfig - limit = rtc['execution time limit'] - speedup = rtc['simulation']['speedup factor'] - if limit and speedup: - sleep_sec = (DurationParser().parse( - str(limit)).get_seconds() / speedup) - else: - sleep_sec = DurationParser().parse( - str(rtc['simulation']['default run length']) - ).get_seconds() - rtc['execution time limit'] = ( - sleep_sec + DurationParser().parse(str( - rtc['simulation']['time limit buffer'])).get_seconds() - ) - rtc['job']['simulated run length'] = sleep_sec - - # Generate dummy scripting. - rtc['init-script'] = "" - rtc['env-script'] = "" - rtc['pre-script'] = "" - rtc['post-script'] = "" - scr = "sleep %d" % sleep_sec - # Dummy message outputs. - for msg in rtc['outputs'].values(): - scr += "\ncylc message '%s'" % msg - if rtc['simulation']['fail try 1 only']: - arg1 = "true" - else: - arg1 = "false" - arg2 = " ".join(rtc['simulation']['fail cycle points']) - scr += "\ncylc__job__dummy_result %s %s || exit 1" % (arg1, arg2) - rtc['script'] = scr - - # Dummy mode jobs should run on platform localhost - # All Cylc 7 config items which conflict with platform are removed. - for section, keys in FORBIDDEN_WITH_PLATFORM.items(): - if section in rtc: - for key in keys: - if key in rtc[section]: - rtc[section][key] = None - - rtc['platform'] = 'localhost' - - # Disable environment, in case it depends on env-script. - rtc['environment'] = {} - - # Simulation mode tasks should fail in which cycle points? - f_pts = [] - f_pts_orig = rtc['simulation']['fail cycle points'] - if 'all' in f_pts_orig: - # None for "fail all points". - f_pts = None - else: - # (And [] for "fail no points".) - for point_str in f_pts_orig: - f_pts.append(get_point(point_str).standardise()) - rtc['simulation']['fail cycle points'] = f_pts - def get_parent_lists(self): return self.runtime['parents'] @@ -2271,10 +2213,17 @@ def get_taskdef( for output, message in ( self.cfg['runtime'][name]['outputs'].items() ): - valid, msg = TaskOutputValidator.validate(message) + valid, msg = TaskOutputValidator.validate(output) + if not valid: + raise WorkflowConfigError( + f'Invalid task output "' + f'[runtime][{name}][outputs]' + f'{output} = {message}" - {msg}' + ) + valid, msg = TaskMessageValidator.validate(message) if not valid: raise WorkflowConfigError( - f'Invalid message trigger "' + f'Invalid task message "' f'[runtime][{name}][outputs]' f'{output} = {message}" - {msg}' ) diff --git a/cylc/flow/cycling/__init__.py b/cylc/flow/cycling/__init__.py index 1bb60f916dc..e3d8a2fe64a 100644 --- a/cylc/flow/cycling/__init__.py +++ b/cylc/flow/cycling/__init__.py @@ -320,8 +320,8 @@ class SequenceBase(metaclass=ABCMeta): They should also provide get_async_expr, get_interval, get_offset & set_offset (deprecated), is_on_sequence, get_nearest_prev_point, get_next_point, - get_next_point_on_sequence, get_first_point, and - get_stop_point. + get_next_point_on_sequence, get_first_point + get_start_point, and get_stop_point. They should also provide a self.__eq__ implementation which should return whether a SequenceBase-derived object @@ -405,11 +405,32 @@ def get_first_point(self, point): """Return the first point >= to point, or None if out of bounds.""" pass + @abstractmethod + def get_start_point(self): + """Return the first point of this sequence.""" + pass + @abstractmethod def get_stop_point(self): - """Return the last point in this sequence, or None if unbounded.""" + """Return the last point of this sequence, or None if unbounded.""" pass + def get_first_n_points(self, n, point=None): + """Return a list of first n points of this sequence.""" + if point is None: + p1 = self.get_start_point() + else: + p1 = self.get_first_point(point) + if p1 is None: + return [] + result = [p1] + for _ in range(1, n): + p1 = self.get_next_point_on_sequence(p1) + if p1 is None: + break + result.append(p1) + return result + @abstractmethod def __eq__(self, other) -> bool: # Return True if other (sequence) is equal to self. diff --git a/cylc/flow/cycling/iso8601.py b/cylc/flow/cycling/iso8601.py index 7ceca4174b1..ed5b4f21979 100644 --- a/cylc/flow/cycling/iso8601.py +++ b/cylc/flow/cycling/iso8601.py @@ -269,8 +269,14 @@ def build_exclusions(self, excl_points): for point in excl_points: try: # Try making an ISO8601Sequence - exclusion = ISO8601Sequence(point, self.exclusion_start_point, - self.exclusion_end_point) + exclusion = ISO8601Sequence( + point, + self.exclusion_start_point, + self.exclusion_end_point, + # disable warnings which are logged when exclusion is a + # time point + zero_duration_warning=False, + ) self.exclusion_sequences.append(exclusion) except (AttributeError, TypeError, ValueError): # Try making an ISO8601Point @@ -284,7 +290,20 @@ class ISO8601Sequence(SequenceBase): """A sequence of ISO8601 date time points separated by an interval. Note that an ISO8601Sequence object (may) contain - ISO8601ExclusionSequences""" + ISO8601ExclusionSequences + + Args: + dep_section: + The full sequence expression. + context_start_point: + Sequence start point from the global context. + context_end_point: + Sequence end point from the global context. + zero_duration_warning: + If `False`, then zero-duration recurrence warnings will be turned + off. This is set for exclusion parsing. + + """ TYPE = CYCLER_TYPE_ISO8601 TYPE_SORT_KEY = CYCLER_TYPE_SORT_KEY_ISO8601 @@ -303,8 +322,13 @@ def get_async_expr(cls, start_point=None): return "R1" return "R1/" + str(start_point) - def __init__(self, dep_section, context_start_point=None, - context_end_point=None): + def __init__( + self, + dep_section, + context_start_point=None, + context_end_point=None, + zero_duration_warning=True, + ): SequenceBase.__init__( self, dep_section, context_start_point, context_end_point) self.dep_section = dep_section @@ -344,7 +368,9 @@ def __init__(self, dep_section, context_start_point=None, # Parse_recurrence returns an isodatetime TimeRecurrence object # and a list of exclusion strings. self.recurrence, excl_points = self.abbrev_util.parse_recurrence( - dep_section) + dep_section, + zero_duration_warning=zero_duration_warning, + ) # Determine the exclusion start point and end point try: diff --git a/cylc/flow/data_messages.proto b/cylc/flow/data_messages.proto index ced0cad7bef..6068bb1c5df 100644 --- a/cylc/flow/data_messages.proto +++ b/cylc/flow/data_messages.proto @@ -105,6 +105,7 @@ message PbWorkflow { optional bool pruned = 37; optional int32 is_runahead_total = 38; optional bool states_updated = 39; + optional int32 n_edge_distance = 40; } // Selected runtime fields @@ -131,6 +132,7 @@ message PbRuntime { // Nodes message PbJob { + reserved 29; /* see https://github.com/cylc/cylc-flow/pull/5672 */ optional string stamp = 1; optional string id = 2; optional int32 submit_num = 3; @@ -144,7 +146,6 @@ message PbJob { optional float execution_time_limit = 14; optional string platform = 15; optional string job_log_dir = 17; - repeated string extra_logs = 29; optional string name = 30; /* filter item */ optional string cycle_point = 31; /* filter item */ repeated string messages = 32; @@ -227,6 +228,7 @@ message PbTaskProxy { optional bool is_runahead = 26; optional bool flow_wait = 27; optional PbRuntime runtime = 28; + optional int32 graph_depth = 29; } message PbFamily { @@ -264,6 +266,7 @@ message PbFamilyProxy { optional bool is_runahead = 19; optional int32 is_runahead_total = 20; optional PbRuntime runtime = 21; + optional int32 graph_depth = 22; } message PbEdge { diff --git a/cylc/flow/data_messages_pb2.py b/cylc/flow/data_messages_pb2.py index 437cc941fdf..82c620bcacf 100644 --- a/cylc/flow/data_messages_pb2.py +++ b/cylc/flow/data_messages_pb2.py @@ -14,26 +14,25 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xa2\x0c\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x12\x1b\n\x0estates_updated\x18\' \x01(\x08H\x1b\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_totalB\x11\n\x0f_states_updated\"\xb9\x06\n\tPbRuntime\x12\x15\n\x08platform\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06script\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0binit_script\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x17\n\nenv_script\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\nerr_script\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x17\n\npre_script\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\t \x01(\tH\x08\x88\x01\x01\x12(\n\x1b\x65xecution_polling_intervals\x18\n \x01(\tH\t\x88\x01\x01\x12#\n\x16\x65xecution_retry_delays\x18\x0b \x01(\tH\n\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12)\n\x1csubmission_polling_intervals\x18\r \x01(\tH\x0c\x88\x01\x01\x12$\n\x17submission_retry_delays\x18\x0e \x01(\tH\r\x88\x01\x01\x12\x17\n\ndirectives\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x14\n\x07outputs\x18\x11 \x01(\tH\x10\x88\x01\x01\x42\x0b\n\t_platformB\t\n\x07_scriptB\x0e\n\x0c_init_scriptB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\r\n\x0b_pre_scriptB\x0e\n\x0c_post_scriptB\x0f\n\r_work_sub_dirB\x1e\n\x1c_execution_polling_intervalsB\x19\n\x17_execution_retry_delaysB\x17\n\x15_execution_time_limitB\x1f\n\x1d_submission_polling_intervalsB\x1a\n\x18_submission_retry_delaysB\r\n\x0b_directivesB\x0e\n\x0c_environmentB\n\n\x08_outputs\"\xab\x05\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\n\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x0c\x88\x01\x01\x12\x12\n\nextra_logs\x18\x1d \x03(\t\x12\x11\n\x04name\x18\x1e \x01(\tH\r\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x0e\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\t\x12 \n\x07runtime\x18! \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_job_log_dirB\x07\n\x05_nameB\x0e\n\x0c_cycle_pointB\n\n\x08_runtime\"\xe2\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x07\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xe7\x07\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0c\x88\x01\x01\x12\x16\n\tflow_wait\x18\x1b \x01(\x08H\r\x88\x01\x01\x12 \n\x07runtime\x18\x1c \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runaheadB\x0c\n\n_flow_waitB\n\n\x08_runtime\"\xc8\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\x84\x06\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x12 \n\x07runtime\x18\x15 \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_totalB\n\n\x08_runtime\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xd4\x0c\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x12\x1b\n\x0estates_updated\x18\' \x01(\x08H\x1b\x88\x01\x01\x12\x1c\n\x0fn_edge_distance\x18( \x01(\x05H\x1c\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_totalB\x11\n\x0f_states_updatedB\x12\n\x10_n_edge_distance\"\xb9\x06\n\tPbRuntime\x12\x15\n\x08platform\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06script\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0binit_script\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x17\n\nenv_script\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\nerr_script\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x17\n\npre_script\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\t \x01(\tH\x08\x88\x01\x01\x12(\n\x1b\x65xecution_polling_intervals\x18\n \x01(\tH\t\x88\x01\x01\x12#\n\x16\x65xecution_retry_delays\x18\x0b \x01(\tH\n\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12)\n\x1csubmission_polling_intervals\x18\r \x01(\tH\x0c\x88\x01\x01\x12$\n\x17submission_retry_delays\x18\x0e \x01(\tH\r\x88\x01\x01\x12\x17\n\ndirectives\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x14\n\x07outputs\x18\x11 \x01(\tH\x10\x88\x01\x01\x42\x0b\n\t_platformB\t\n\x07_scriptB\x0e\n\x0c_init_scriptB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\r\n\x0b_pre_scriptB\x0e\n\x0c_post_scriptB\x0f\n\r_work_sub_dirB\x1e\n\x1c_execution_polling_intervalsB\x19\n\x17_execution_retry_delaysB\x17\n\x15_execution_time_limitB\x1f\n\x1d_submission_polling_intervalsB\x1a\n\x18_submission_retry_delaysB\r\n\x0b_directivesB\x0e\n\x0c_environmentB\n\n\x08_outputs\"\x9d\x05\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\n\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x0c\x88\x01\x01\x12\x11\n\x04name\x18\x1e \x01(\tH\r\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x0e\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\t\x12 \n\x07runtime\x18! \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_job_log_dirB\x07\n\x05_nameB\x0e\n\x0c_cycle_pointB\n\n\x08_runtimeJ\x04\x08\x1d\x10\x1e\"\xe2\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x07\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\x91\x08\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0c\x88\x01\x01\x12\x16\n\tflow_wait\x18\x1b \x01(\x08H\r\x88\x01\x01\x12 \n\x07runtime\x18\x1c \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x1d \x01(\x05H\x0f\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runaheadB\x0c\n\n_flow_waitB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xc8\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xae\x06\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x12 \n\x07runtime\x18\x15 \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x16 \x01(\x05H\x0f\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_totalB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'data_messages_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _PBWORKFLOW_STATETOTALSENTRY._options = None - _PBWORKFLOW_STATETOTALSENTRY._serialized_options = b'8\001' - _PBWORKFLOW_LATESTSTATETASKSENTRY._options = None - _PBWORKFLOW_LATESTSTATETASKSENTRY._serialized_options = b'8\001' - _PBTASKPROXY_OUTPUTSENTRY._options = None - _PBTASKPROXY_OUTPUTSENTRY._serialized_options = b'8\001' - _PBTASKPROXY_EXTERNALTRIGGERSENTRY._options = None - _PBTASKPROXY_EXTERNALTRIGGERSENTRY._serialized_options = b'8\001' - _PBTASKPROXY_XTRIGGERSENTRY._options = None - _PBTASKPROXY_XTRIGGERSENTRY._serialized_options = b'8\001' - _PBFAMILYPROXY_STATETOTALSENTRY._options = None - _PBFAMILYPROXY_STATETOTALSENTRY._serialized_options = b'8\001' + _globals['_PBWORKFLOW_STATETOTALSENTRY']._options = None + _globals['_PBWORKFLOW_STATETOTALSENTRY']._serialized_options = b'8\001' + _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._options = None + _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_options = b'8\001' + _globals['_PBTASKPROXY_OUTPUTSENTRY']._options = None + _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_options = b'8\001' + _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._options = None + _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_options = b'8\001' + _globals['_PBTASKPROXY_XTRIGGERSENTRY']._options = None + _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_options = b'8\001' + _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._options = None + _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_options = b'8\001' _globals['_PBMETA']._serialized_start=24 _globals['_PBMETA']._serialized_end=174 _globals['_PBTIMEZONE']._serialized_start=177 @@ -41,61 +40,61 @@ _globals['_PBTASKPROXYREFS']._serialized_start=349 _globals['_PBTASKPROXYREFS']._serialized_end=388 _globals['_PBWORKFLOW']._serialized_start=391 - _globals['_PBWORKFLOW']._serialized_end=1961 - _globals['_PBWORKFLOW_STATETOTALSENTRY']._serialized_start=1411 - _globals['_PBWORKFLOW_STATETOTALSENTRY']._serialized_end=1461 - _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_start=1463 - _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_end=1536 - _globals['_PBRUNTIME']._serialized_start=1964 - _globals['_PBRUNTIME']._serialized_end=2789 - _globals['_PBJOB']._serialized_start=2792 - _globals['_PBJOB']._serialized_end=3475 - _globals['_PBTASK']._serialized_start=3478 - _globals['_PBTASK']._serialized_end=3832 - _globals['_PBPOLLTASK']._serialized_start=3835 - _globals['_PBPOLLTASK']._serialized_end=4051 - _globals['_PBCONDITION']._serialized_start=4054 - _globals['_PBCONDITION']._serialized_end=4257 - _globals['_PBPREREQUISITE']._serialized_start=4260 - _globals['_PBPREREQUISITE']._serialized_end=4410 - _globals['_PBOUTPUT']._serialized_start=4413 - _globals['_PBOUTPUT']._serialized_end=4553 - _globals['_PBTRIGGER']._serialized_start=4556 - _globals['_PBTRIGGER']._serialized_end=4721 - _globals['_PBTASKPROXY']._serialized_start=4724 - _globals['_PBTASKPROXY']._serialized_end=5723 - _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_start=5349 - _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_end=5406 - _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_start=5408 - _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_end=5475 - _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_start=5477 - _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_end=5537 - _globals['_PBFAMILY']._serialized_start=5726 - _globals['_PBFAMILY']._serialized_end=6054 - _globals['_PBFAMILYPROXY']._serialized_start=6057 - _globals['_PBFAMILYPROXY']._serialized_end=6829 - _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_start=1411 - _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_end=1461 - _globals['_PBEDGE']._serialized_start=6832 - _globals['_PBEDGE']._serialized_end=7020 - _globals['_PBEDGES']._serialized_start=7022 - _globals['_PBEDGES']._serialized_end=7145 - _globals['_PBENTIREWORKFLOW']._serialized_start=7148 - _globals['_PBENTIREWORKFLOW']._serialized_end=7390 - _globals['_EDELTAS']._serialized_start=7393 - _globals['_EDELTAS']._serialized_end=7568 - _globals['_FDELTAS']._serialized_start=7571 - _globals['_FDELTAS']._serialized_end=7750 - _globals['_FPDELTAS']._serialized_start=7753 - _globals['_FPDELTAS']._serialized_end=7943 - _globals['_JDELTAS']._serialized_start=7946 - _globals['_JDELTAS']._serialized_end=8119 - _globals['_TDELTAS']._serialized_start=8122 - _globals['_TDELTAS']._serialized_end=8297 - _globals['_TPDELTAS']._serialized_start=8300 - _globals['_TPDELTAS']._serialized_end=8486 - _globals['_WDELTAS']._serialized_start=8489 - _globals['_WDELTAS']._serialized_end=8684 - _globals['_ALLDELTAS']._serialized_start=8687 - _globals['_ALLDELTAS']._serialized_end=8896 + _globals['_PBWORKFLOW']._serialized_end=2011 + _globals['_PBWORKFLOW_STATETOTALSENTRY']._serialized_start=1441 + _globals['_PBWORKFLOW_STATETOTALSENTRY']._serialized_end=1491 + _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_start=1493 + _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_end=1566 + _globals['_PBRUNTIME']._serialized_start=2014 + _globals['_PBRUNTIME']._serialized_end=2839 + _globals['_PBJOB']._serialized_start=2842 + _globals['_PBJOB']._serialized_end=3511 + _globals['_PBTASK']._serialized_start=3514 + _globals['_PBTASK']._serialized_end=3868 + _globals['_PBPOLLTASK']._serialized_start=3871 + _globals['_PBPOLLTASK']._serialized_end=4087 + _globals['_PBCONDITION']._serialized_start=4090 + _globals['_PBCONDITION']._serialized_end=4293 + _globals['_PBPREREQUISITE']._serialized_start=4296 + _globals['_PBPREREQUISITE']._serialized_end=4446 + _globals['_PBOUTPUT']._serialized_start=4449 + _globals['_PBOUTPUT']._serialized_end=4589 + _globals['_PBTRIGGER']._serialized_start=4592 + _globals['_PBTRIGGER']._serialized_end=4757 + _globals['_PBTASKPROXY']._serialized_start=4760 + _globals['_PBTASKPROXY']._serialized_end=5801 + _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_start=5411 + _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_end=5468 + _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_start=5470 + _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_end=5537 + _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_start=5539 + _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_end=5599 + _globals['_PBFAMILY']._serialized_start=5804 + _globals['_PBFAMILY']._serialized_end=6132 + _globals['_PBFAMILYPROXY']._serialized_start=6135 + _globals['_PBFAMILYPROXY']._serialized_end=6949 + _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_start=1441 + _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_end=1491 + _globals['_PBEDGE']._serialized_start=6952 + _globals['_PBEDGE']._serialized_end=7140 + _globals['_PBEDGES']._serialized_start=7142 + _globals['_PBEDGES']._serialized_end=7265 + _globals['_PBENTIREWORKFLOW']._serialized_start=7268 + _globals['_PBENTIREWORKFLOW']._serialized_end=7510 + _globals['_EDELTAS']._serialized_start=7513 + _globals['_EDELTAS']._serialized_end=7688 + _globals['_FDELTAS']._serialized_start=7691 + _globals['_FDELTAS']._serialized_end=7870 + _globals['_FPDELTAS']._serialized_start=7873 + _globals['_FPDELTAS']._serialized_end=8063 + _globals['_JDELTAS']._serialized_start=8066 + _globals['_JDELTAS']._serialized_end=8239 + _globals['_TDELTAS']._serialized_start=8242 + _globals['_TDELTAS']._serialized_end=8417 + _globals['_TPDELTAS']._serialized_start=8420 + _globals['_TPDELTAS']._serialized_end=8606 + _globals['_WDELTAS']._serialized_start=8609 + _globals['_WDELTAS']._serialized_end=8804 + _globals['_ALLDELTAS']._serialized_start=8807 + _globals['_ALLDELTAS']._serialized_end=9016 # @@protoc_insertion_point(module_scope) diff --git a/cylc/flow/data_store_mgr.py b/cylc/flow/data_store_mgr.py index be39f2d1a76..ef77105b3a4 100644 --- a/cylc/flow/data_store_mgr.py +++ b/cylc/flow/data_store_mgr.py @@ -33,19 +33,17 @@ includes workflow, task, and family definition objects. The cycle point nodes/edges (i.e. task/family proxies) generation is triggered -individually on transition from staging to active task pool. Each active task -is generated along with any children and parents recursively out to a -specified maximum graph distance (n_edge_distance), that can be externally -altered (via API). Collectively this forms the N-Distance-Window on the -workflow graph. - -Pruning of data-store elements is done using both the collection/set of nodes -generated through the associated graph paths of the active nodes and the -tracking of the boundary nodes (n_edge_distance+1) of those active nodes. -Once active, these boundary nodes act as the prune trigger for their -original/generator node(s). Set operations are used to do a diff between the -nodes of active paths (paths whose node is in the active task pool) and the -nodes of flagged paths (whose boundary node(s) have become active). +individually on transition to active task pool. Each active task is generated +along with any children and parents via a graph walk out to a specified maximum +graph distance (n_edge_distance), that can be externally altered (via API). +Collectively this forms the N-Distance-Window on the workflow graph. + +Pruning of data-store elements is done using the collection/set of nodes +generated at the boundary of an active node's graph walk and registering active +node's parents against them. Once active, these boundary nodes act as the prune +triggers for the associated parent nodes. Set operations are used to do a diff +between the nodes of active paths (paths whose node is in the active task pool) +and the nodes of flagged paths (whose boundary node(s) have become active). Updates are created by the event/task/job managers. @@ -63,7 +61,10 @@ from time import time from typing import ( Any, + Dict, Optional, + List, + Set, TYPE_CHECKING, Tuple, Union, @@ -71,6 +72,7 @@ import zlib from cylc.flow import __version__ as CYLC_VERSION, LOG +from cylc.flow.cycling.loader import get_point from cylc.flow.data_messages_pb2 import ( # type: ignore PbEdge, PbEntireWorkflow, PbFamily, PbFamilyProxy, PbJob, PbTask, PbTaskProxy, PbWorkflow, PbRuntime, AllDeltas, EDeltas, FDeltas, @@ -337,7 +339,8 @@ def apply_delta(key, delta, data): # elements and their relationships missing on reload. if key == TASK_PROXIES: # remove relationship from task - data[TASKS][data[key][del_id].task].proxies.remove(del_id) + with suppress(KeyError, ValueError): + data[TASKS][data[key][del_id].task].proxies.remove(del_id) # remove relationship from parent/family with suppress(KeyError, ValueError): data[FAMILY_PROXIES][ @@ -347,7 +350,10 @@ def apply_delta(key, delta, data): with suppress(KeyError, ValueError): getattr(data[WORKFLOW], key).remove(del_id) elif key == FAMILY_PROXIES: - data[FAMILIES][data[key][del_id].family].proxies.remove(del_id) + with suppress(KeyError, ValueError): + data[FAMILIES][ + data[key][del_id].family + ].proxies.remove(del_id) with suppress(KeyError, ValueError): data[FAMILY_PROXIES][ data[key][del_id].first_parent @@ -469,13 +475,15 @@ def __init__(self, schd): self.parents = {} self.state_update_families = set() self.updated_state_families = set() + # Update workflow state totals once more post delta application. + self.state_update_follow_on = False self.n_edge_distance = 1 self.next_n_edge_distance = None self.latest_state_tasks = { state: deque(maxlen=LATEST_STATE_TASKS_QUEUE_SIZE) for state in TASK_STATUSES_ORDERED } - self.xtrigger_tasks = {} + self.xtrigger_tasks: Dict[str, Set[Tuple[str, str]]] = {} # Managed data types self.data = { self.workflow_id: deepcopy(DATA_TEMPLATE) @@ -496,15 +504,20 @@ def __init__(self, schd): self.publish_deltas = [] # internal n-window self.all_task_pool = set() + self.all_n_window_nodes = set() self.n_window_nodes = {} - self.n_window_edges = {} - self.n_window_boundary_nodes = {} + self.n_window_edges = set() + self.n_window_node_walks = {} + self.n_window_completed_walks = set() + self.n_window_depths = {} + self.update_window_depths = False self.db_load_task_proxies = {} self.family_pruned_ids = set() self.prune_trigger_nodes = {} self.prune_flagged_nodes = set() self.pruned_task_proxies = set() self.updates_pending = False + self.updates_pending_follow_on = False self.publish_pending = False def initiate_data_model(self, reloaded=False): @@ -523,21 +536,23 @@ def initiate_data_model(self, reloaded=False): self.generate_definition_elements() # Update workflow statuses and totals (assume needed) - self.update_workflow() + self.update_workflow(True) # Apply current deltas self.batch_deltas() self.apply_delta_batch() + # Clear deltas after application + self.clear_delta_store() + self.clear_delta_batch() - if not reloaded: - # Gather this batch of deltas for publish - self.apply_delta_checksum() - self.publish_deltas = self.get_publish_deltas() + # Gather the store as batch of deltas for publishing + self.batch_deltas(True) + self.apply_delta_checksum() + self.publish_deltas = self.get_publish_deltas() self.updates_pending = False - # Clear deltas after application and publishing - self.clear_delta_store() + # Clear second batch after publishing self.clear_delta_batch() def generate_definition_elements(self): @@ -553,8 +568,11 @@ def generate_definition_elements(self): families = self.added[FAMILIES] workflow = self.added[WORKFLOW] workflow.id = self.workflow_id + workflow.n_edge_distance = self.n_edge_distance workflow.last_updated = update_time workflow.stamp = f'{workflow.id}@{workflow.last_updated}' + # Treat play/restart as hard reload of definition. + workflow.reloaded = True graph = workflow.edges graph.leaves[:] = config.leaves @@ -696,32 +714,25 @@ def increment_graph_window( source_tokens: Tokens, point, flow_nums, - edge_distance=0, - active_id: Optional[str] = None, - descendant=False, - is_parent=False, is_manual_submit=False, itask=None ) -> None: """Generate graph window about active task proxy to n-edge-distance. - A recursive function, that creates a node then moves to children and - parents repeating this process out to one edge beyond the max window - size (in edges). Going out one edge further, we can trigger - pruning as new active tasks appear beyond this boundary. - + Fills in graph walk from existing walks if possible, otherwise expands + the graph front from whereever hasn't been walked. + Walk nodes are grouped into locations which are tag according to + parent child path, i.e. 'cpc' would be children-parents-children away + from active/start task. Which not only provide a way to cheaply rewalk, + but also the edge distance from origin. + The futherest child boundary nodes are registered as prune triggers for + the origin's parents, so when they become active the parents are + assessed for pruning eligibility. Args: source_tokens (cylc.flow.id.Tokens) point (PointBase) flow_nums (set) - edge_distance (int): - Graph distance from active/origin node. - active_id (str): - Active/origin node id. - descendant (bool): - Is the current node a direct descendent of the active/origin. - is_parent (bool) is_manual_submit (bool) itask (cylc.flow.task_proxy.TaskProxy): Active/Other task proxy, passed in with pool invocation. @@ -730,146 +741,343 @@ def increment_graph_window( None """ - is_active = not (descendant or is_parent) - # ID passed through recursion as reference to original/active node. - if active_id is None: - source_tokens = self.id_.duplicate(source_tokens) - active_id = source_tokens.id - - # flag manual triggers for pruning on deletion. - if is_manual_submit: - self.prune_trigger_nodes.setdefault(active_id, set()).add( - source_tokens.id - ) - # Setup and check if active node is another's boundary node - # to flag its paths for pruning. - if is_active: - self.n_window_edges[active_id] = set() - self.n_window_boundary_nodes[active_id] = {} - self.n_window_nodes[active_id] = set() - if active_id in self.prune_trigger_nodes: - self.prune_flagged_nodes.update( - self.prune_trigger_nodes[active_id]) - del self.prune_trigger_nodes[active_id] - - # This part is vital to constructing a set of boundary nodes - # associated with the current Active node. - if edge_distance > self.n_edge_distance: - if descendant and self.n_edge_distance > 0: - self.n_window_boundary_nodes[ - active_id - ].setdefault(edge_distance, set()).add(source_tokens.id) - return + # common refrences + active_id = source_tokens.id + all_walks = self.n_window_node_walks + taskdefs = self.schd.config.taskdefs + final_point = self.schd.config.final_point + + # walk keys/tags + # Children location tag + c_tag = 'c' + # Parents location tag + p_tag = 'p' + + # Setup walk fields: + # - locations (locs): i.e. 'cpc' children-parents-children from origin, + # with their respective node ids. + # - orphans: task no longer exists in workflow. + # - done_locs: set of locactions that have been walked over. + # - done_ids: set of node ids that have been walked (from initial + # walk filling, that may not have been the entire walk). + # If walk already completed, must have gone from non-active to active + # again.. So redo walk (as walk nodes may be pruned). + if ( + active_id not in all_walks + or active_id in self.n_window_completed_walks + ): + all_walks[active_id] = { + 'locations': {}, + 'orphans': set(), + 'done_locs': set(), + 'done_ids': set(), + 'walk_ids': {active_id}, + 'depths': { + depth: set() + for depth in range(1, self.n_edge_distance + 1) + } + } + if active_id in self.n_window_completed_walks: + self.n_window_completed_walks.remove(active_id) + active_walk = all_walks[active_id] + active_locs = active_walk['locations'] + if source_tokens['task'] not in taskdefs: + active_walk['orphans'].add(active_id) # Generate task proxy node - is_orphan, graph_children = self.generate_ghost_task( + self.n_window_nodes[active_id] = set() + + self.generate_ghost_task( source_tokens, point, flow_nums, - is_parent, + False, itask ) - self.n_window_nodes[active_id].add(source_tokens.id) - - edge_distance += 1 + # Pre-populate from previous walks + # Will check all location permutations. + # There may be short cuts for parent locs, however children will more + # likely be incomplete walks with no 'done_locs' and using parent's + # children will required sifting out cousin branches. + working_locs: List[str] = [] + if self.n_edge_distance > 1: + if c_tag in active_locs: + working_locs.extend(('cc', 'cp')) + if p_tag in active_locs: + working_locs.extend(('pp', 'pc')) + n_depth = 2 + while working_locs: + for w_loc in working_locs: + loc_done = True + # Most will be incomplete walks, however, we can check. + # i.e. parents of children may all exist. + if w_loc[:-1] in active_locs: + for loc_id in active_locs[w_loc[:-1]]: + if loc_id not in all_walks: + loc_done = False + break + else: + continue + # find child nodes of parent location, + # i.e. 'cpcc' = 'cpc' + 'c' + w_set = set().union(*( + all_walks[loc_id]['locations'][w_loc[-1]] + for loc_id in active_locs[w_loc[:-1]] + if ( + loc_id in all_walks + and w_loc[-1] in all_walks[loc_id]['locations'] + ) + )) + w_set.difference_update(active_walk['walk_ids']) + if w_set: + active_locs[w_loc] = w_set + active_walk['walk_ids'].update(w_set) + active_walk['depths'][n_depth].update(w_set) + # If child/parent nodes have been pruned we will need + # to regenerate them. + if ( + loc_done + and not w_set.difference(self.all_n_window_nodes) + ): + active_walk['done_locs'].add(w_loc[:-1]) + active_walk['done_ids'].update( + active_locs[w_loc[:-1]] + ) + working_locs = [ + new_loc + for loc in working_locs + if loc in active_locs and len(loc) < self.n_edge_distance + for new_loc in (loc + c_tag, loc + p_tag) + ] + n_depth += 1 - # Don't expand window about orphan task. + # Graph walk + node_tokens: Tokens child_tokens: Tokens parent_tokens: Tokens - if not is_orphan: - tdef = self.schd.config.taskdefs[source_tokens['task']] - # TODO: xtrigger is workflow_state edges too - # Reference set for workflow relations - final_point = self.schd.config.final_point - if descendant or is_active: - if graph_children is None: - graph_children = generate_graph_children(tdef, point) - if not any(graph_children.values()): - self.n_window_boundary_nodes[active_id].setdefault( - edge_distance - 1, - set() - ).add(source_tokens.id) - - # Children/downstream nodes - for items in graph_children.values(): - for child_name, child_point, _ in items: - if child_point > final_point: - continue - child_tokens = self.id_.duplicate( - cycle=str(child_point), - task=child_name, - ) - # We still increment the graph one further to find - # boundary nodes, but don't create elements. - if edge_distance <= self.n_edge_distance: - self.generate_edge( - source_tokens, - child_tokens, - active_id - ) - if child_tokens.id in self.n_window_nodes[active_id]: - continue - self.increment_graph_window( - child_tokens, - child_point, - flow_nums, - edge_distance, - active_id, - True, - False - ) + walk_incomplete = True + while walk_incomplete: + walk_incomplete = False + # Only walk locations not fully explored + locations = [ + loc + for loc in active_locs + if ( - # Parents/upstream nodes - if is_parent or is_active: - for items in generate_graph_parents( - tdef, - point, - self.schd.config.taskdefs - ).values(): - for parent_name, parent_point, _ in items: - if parent_point > final_point: + len(loc) < self.n_edge_distance + and loc not in active_walk['done_locs'] + ) + ] + # Origin/Active usually first or isolate nodes + if ( + not active_walk['done_ids'] + and not locations + and active_id not in active_walk['orphans'] + and self.n_edge_distance != 0 + ): + locations = [''] + # Explore/walk locations + for location in locations: + walk_incomplete = True + if not location: + loc_nodes = {active_id} + else: + loc_nodes = active_locs[location] + active_walk['done_locs'].add(location) + c_loc = location + c_tag + p_loc = location + p_tag + c_ids = set() + p_ids = set() + n_depth = len(location) + 1 + # Exclude walked nodes at this location. + # This also helps avoid walking in a circle. + for node_id in loc_nodes.difference(active_walk['done_ids']): + active_walk['done_ids'].add(node_id) + node_tokens = Tokens(node_id) + # Don't expand window about orphan task. + try: + tdef = taskdefs[node_tokens['task']] + except KeyError: + active_walk['orphans'].add(node_id) + continue + # Use existing children/parents from other walks. + # (note: nodes/edges should already be generated) + c_done = False + p_done = False + if node_id in all_walks and node_id is not active_id: + with suppress(KeyError): + # If children have been pruned, don't skip, + # re-generate them (uncommon or impossible?). + if not all_walks[node_id]['locations'][ + c_tag + ].difference(self.all_n_window_nodes): + c_ids.update( + all_walks[node_id]['locations'][c_tag] + ) + c_done = True + with suppress(KeyError): + # If parent have been pruned, don't skip, + # re-generate them (more common case). + if not all_walks[node_id]['locations'][ + p_tag + ].difference(self.all_n_window_nodes): + p_ids.update( + all_walks[node_id]['locations'][p_tag] + ) + p_done = True + if p_done and c_done: continue - parent_tokens = self.id_.duplicate( - cycle=str(parent_point), - task=parent_name, - ) - if edge_distance <= self.n_edge_distance: - # reverse for parent - self.generate_edge( - parent_tokens, - source_tokens, - active_id + + # Children/downstream nodes + # TODO: xtrigger is workflow_state edges too + # see: https://github.com/cylc/cylc-flow/issues/4582 + # Reference set for workflow relations + nc_ids = set() + if not c_done: + if itask is not None and n_depth == 1: + graph_children = itask.graph_children + else: + graph_children = generate_graph_children( + tdef, + get_point(node_tokens['cycle']) ) - if parent_tokens.id in self.n_window_nodes[active_id]: - continue - self.increment_graph_window( - parent_tokens, - parent_point, - flow_nums, - edge_distance, - active_id, - False, - True - ) + for items in graph_children.values(): + for child_name, child_point, _ in items: + if child_point > final_point: + continue + child_tokens = self.id_.duplicate( + cycle=str(child_point), + task=child_name, + ) + self.generate_ghost_task( + child_tokens, + child_point, + flow_nums, + False, + None, + n_depth + ) + self.generate_edge( + node_tokens, + child_tokens, + active_id + ) + nc_ids.add(child_tokens.id) + + # Parents/upstream nodes + np_ids = set() + if not p_done: + for items in generate_graph_parents( + tdef, + get_point(node_tokens['cycle']), + taskdefs + ).values(): + for parent_name, parent_point, _ in items: + if parent_point > final_point: + continue + parent_tokens = self.id_.duplicate( + cycle=str(parent_point), + task=parent_name, + ) + self.generate_ghost_task( + parent_tokens, + parent_point, + flow_nums, + True, + None, + n_depth + ) + # reverse for parent + self.generate_edge( + parent_tokens, + node_tokens, + active_id + ) + np_ids.add(parent_tokens.id) + + # Register new walk + if node_id not in all_walks: + all_walks[node_id] = { + 'locations': {}, + 'done_ids': set(), + 'done_locs': set(), + 'orphans': set(), + 'walk_ids': {node_id} | nc_ids | np_ids, + 'depths': { + depth: set() + for depth in range(1, self.n_edge_distance + 1) + } + } + if nc_ids: + all_walks[node_id]['locations'][c_tag] = nc_ids + all_walks[node_id]['depths'][1].update(nc_ids) + c_ids.update(nc_ids) + if np_ids: + all_walks[node_id]['locations'][p_tag] = np_ids + all_walks[node_id]['depths'][1].update(np_ids) + p_ids.update(np_ids) + + # Create location association + c_ids.difference_update(active_walk['walk_ids']) + if c_ids: + active_locs.setdefault(c_loc, set()).update(c_ids) + p_ids.difference_update(active_walk['walk_ids']) + if p_ids: + active_locs.setdefault(p_loc, set()).update(p_ids) + active_walk['walk_ids'].update(c_ids, p_ids) + active_walk['depths'][n_depth].update(c_ids, p_ids) + + self.n_window_completed_walks.add(active_id) + self.n_window_nodes[active_id].update(active_walk['walk_ids']) - # If this is the active task (edge_distance has been incremented), - # then add the most distant child as a trigger to prune it. - if is_active: - levels = self.n_window_boundary_nodes[active_id].keys() + # This part is vital to constructing a set of boundary nodes + # associated with the n=0 window of current active node. + # Only trigger pruning for furthest set of boundary nodes + boundary_nodes: Set[str] = set() + max_level: int = 0 + with suppress(ValueError): + max_level = max( + len(loc) + for loc in active_locs + if p_tag not in loc + ) + # add the most distant child as a trigger to prune it. + boundary_nodes.update(*( + active_locs[loc] + for loc in active_locs + if p_tag not in loc and len(loc) >= max_level + )) + if not boundary_nodes and not max_level: # Could be self-reference node foo:failed => foo - if not levels: - self.n_window_boundary_nodes[active_id][0] = {active_id} - levels = (0,) - # Only trigger pruning for furthest set of boundary nodes - for tp_id in self.n_window_boundary_nodes[active_id][max(levels)]: - self.prune_trigger_nodes.setdefault( - tp_id, set()).add(active_id) - del self.n_window_boundary_nodes[active_id] - if self.n_window_edges[active_id]: - getattr(self.updated[WORKFLOW], EDGES).edges.extend( - self.n_window_edges[active_id]) + boundary_nodes = {active_id} + # associate + for tp_id in boundary_nodes: + try: + self.prune_trigger_nodes.setdefault(tp_id, set()).update( + active_walk['walk_ids'] + ) + self.prune_trigger_nodes[tp_id].discard(tp_id) + except KeyError: + self.prune_trigger_nodes.setdefault(tp_id, set()).add( + active_id + ) + # flag manual triggers for pruning on deletion. + if is_manual_submit: + self.prune_trigger_nodes.setdefault(active_id, set()).add( + active_id + ) + if active_walk['orphans']: + self.prune_trigger_nodes.setdefault(active_id, set()).union( + active_walk['orphans'] + ) + # Check if active node is another's boundary node + # to flag its paths for pruning. + if active_id in self.prune_trigger_nodes: + self.prune_flagged_nodes.update( + self.prune_trigger_nodes[active_id]) + del self.prune_trigger_nodes[active_id] def generate_edge( self, @@ -880,7 +1088,7 @@ def generate_edge( """Construct edge of child and parent task proxy node.""" # Initiate edge element. e_id = self.edge_id(parent_tokens, child_tokens) - if e_id in self.n_window_edges[active_id]: + if e_id in self.n_window_edges: return if ( e_id not in self.data[self.workflow_id][EDGES] @@ -898,7 +1106,8 @@ def generate_edge( self.updated[TASK_PROXIES].setdefault( parent_tokens.id, PbTaskProxy(id=parent_tokens.id)).edges.append(e_id) - self.n_window_edges[active_id].add(e_id) + getattr(self.updated[WORKFLOW], EDGES).edges.append(e_id) + self.n_window_edges.add(e_id) def remove_pool_node(self, name, point): """Remove ID reference and flag isolate node/branch for pruning.""" @@ -916,13 +1125,16 @@ def remove_pool_node(self, name, point): ): self.prune_flagged_nodes.update(self.prune_trigger_nodes[tp_id]) del self.prune_trigger_nodes[tp_id] - self.updates_pending = True elif ( tp_id in self.n_window_nodes and self.n_window_nodes[tp_id].isdisjoint(self.all_task_pool) ): self.prune_flagged_nodes.add(tp_id) - self.updates_pending = True + elif tp_id in self.n_window_node_walks: + self.prune_flagged_nodes.update( + self.n_window_node_walks[tp_id]['walk_ids'] + ) + self.updates_pending = True def add_pool_node(self, name, point): """Add external ID reference for internal task pool node.""" @@ -931,6 +1143,7 @@ def add_pool_node(self, name, point): task=name, ).id self.all_task_pool.add(tp_id) + self.update_window_depths = True def generate_ghost_task( self, @@ -938,8 +1151,9 @@ def generate_ghost_task( point, flow_nums, is_parent=False, - itask=None - ) -> Tuple[bool, Optional[dict]]: + itask=None, + n_depth=0, + ): """Create task-point element populated with static data. Args: @@ -950,29 +1164,26 @@ def generate_ghost_task( Used to determine whether to load DB state. itask (cylc.flow.task_proxy.TaskProxy): Update task-node from corresponding task proxy object. + n_depth (int): n-window graph edge distance. Returns: - (is_orphan, graph_children) - Orphan tasks with no children return (True, None) respectively. + None """ + tp_id = tokens.id + if ( + tp_id in self.data[self.workflow_id][TASK_PROXIES] + or tp_id in self.added[TASK_PROXIES] + ): + return + name = tokens['task'] point_string = tokens['cycle'] t_id = self.definition_id(name) - tp_id = tokens.id - task_proxies = self.data[self.workflow_id][TASK_PROXIES] - - is_orphan = False - if name not in self.schd.config.taskdefs: - is_orphan = True if itask is None: itask = self.schd.pool.get_task(point_string, name) - if tp_id in task_proxies or tp_id in self.added[TASK_PROXIES]: - if itask is None: - return is_orphan, None - return is_orphan, itask.graph_children if itask is None: itask = TaskProxy( @@ -984,7 +1195,9 @@ def generate_ghost_task( data_mode=True ) - if is_orphan: + is_orphan = False + if name not in self.schd.config.taskdefs: + is_orphan = True self.generate_orphan_task(itask) # Most of the time the definition node will be in the store. @@ -995,7 +1208,7 @@ def generate_ghost_task( task_def = self.added[TASKS][t_id] except KeyError: # Task removed from workflow definition. - return False, itask.graph_children + return update_time = time() tp_stamp = f'{tp_id}@{update_time}' @@ -1009,8 +1222,11 @@ def generate_ghost_task( in self.schd.pool.tasks_to_hold ), depth=task_def.depth, + graph_depth=n_depth, name=name, ) + self.all_n_window_nodes.add(tp_id) + self.n_window_depths.setdefault(n_depth, set()).add(tp_id) tproxy.namespace[:] = task_def.namespace if is_orphan: @@ -1063,7 +1279,7 @@ def generate_ghost_task( self.updates_pending = True - return is_orphan, itask.graph_children + return def generate_orphan_task(self, itask): """Generate orphan task definition.""" @@ -1182,7 +1398,6 @@ def generate_ghost_family(self, fp_id, child_fam=None, child_task=None): def apply_task_proxy_db_history(self): """Extract and apply DB history on given task proxies.""" - if not self.db_load_task_proxies: return @@ -1303,7 +1518,7 @@ def _process_internal_task_proxy(self, itask, tproxy): xtrig.id = sig xtrig.label = label xtrig.satisfied = satisfied - self.xtrigger_tasks.setdefault(sig, set()).add(tproxy.id) + self.xtrigger_tasks.setdefault(sig, set()).add((tproxy.id, label)) if tproxy.state in self.latest_state_tasks: tp_ref = itask.identity @@ -1372,7 +1587,7 @@ def insert_job(self, name, cycle_point, status, job_conf): name=tproxy.name, cycle_point=tproxy.cycle_point, execution_time_limit=job_conf.get('execution_time_limit'), - platform=job_conf.get('platform')['name'], + platform=job_conf['platform']['name'], job_runner_name=job_conf.get('job_runner_name'), ) # Not all fields are populated with some submit-failures, @@ -1388,7 +1603,6 @@ def insert_job(self, name, cycle_point, status, job_conf): # Add in log files. j_buf.job_log_dir = get_task_job_log( self.schd.workflow, tproxy.cycle_point, tproxy.name, sub_num) - j_buf.extra_logs.extend(job_conf.get('logfiles', [])) self.added[JOBS][j_id] = j_buf getattr(self.updated[WORKFLOW], JOBS).append(j_id) @@ -1486,50 +1700,140 @@ def insert_db_job(self, row_idx, row): tp_delta.jobs.append(j_id) self.updates_pending = True - def update_data_structure(self, reloaded=False): + def update_data_structure(self): """Workflow batch updates in the data structure.""" - # load database history for flagged nodes - self.apply_task_proxy_db_history() - # Avoids changing window edge distance during edge/node creation if self.next_n_edge_distance is not None: self.n_edge_distance = self.next_n_edge_distance + self.window_resize_rewalk() self.next_n_edge_distance = None + # load database history for flagged nodes + self.apply_task_proxy_db_history() + + self.updates_pending_follow_on = False self.prune_data_store() - if self.state_update_families: - self.update_family_proxies() - next_update_pending = False + # Find depth changes and create deltas + if self.update_window_depths: + self.window_depth_finder() + if self.updates_pending: + # update + self.update_family_proxies() + # Update workflow statuses and totals if needed self.update_workflow() # Don't process updated deltas of pruned nodes - if self.pruned_task_proxies: - next_update_pending = True self.prune_pruned_updated_nodes() - # Apply current deltas + # Gather deltas self.batch_deltas() + # Apply all deltas self.apply_delta_batch() - if reloaded: - self.clear_delta_batch() - self.batch_deltas(reloaded=True) - - if self.updates_pending or reloaded: + if self.updates_pending: self.apply_delta_checksum() # Gather this batch of deltas for publish self.publish_deltas = self.get_publish_deltas() - self.updates_pending = next_update_pending + self.updates_pending = self.updates_pending_follow_on # Clear deltas self.clear_delta_batch() self.clear_delta_store() + def update_workflow_states(self): + """Batch workflow state updates.""" + + # update the workflow state in the data store + self.update_workflow() + + # push out update deltas + self.batch_deltas() + self.apply_delta_batch() + self.apply_delta_checksum() + self.publish_deltas = self.get_publish_deltas() + + def window_resize_rewalk(self): + """Re-create data-store n-window on resize.""" + tokens: Tokens + # Gather pre-resize window nodes + if not self.all_n_window_nodes: + self.all_n_window_nodes = set().union(*( + v + for k, v in self.n_window_nodes.items() + if k in self.all_task_pool + )) + + # Clear window walks, and walk from scratch. + self.prune_flagged_nodes.clear() + self.n_window_node_walks.clear() + for tp_id in self.all_task_pool: + tokens = Tokens(tp_id) + tp_id, tproxy = self.store_node_fetcher(tokens) + self.increment_graph_window( + tokens, + get_point(tokens['cycle']), + tproxy.flow_nums + ) + # Flag difference between old and new window for pruning. + self.prune_flagged_nodes.update( + self.all_n_window_nodes.difference(*( + v + for k, v in self.n_window_nodes.items() + if k in self.all_task_pool + )) + ) + self.update_window_depths = True + + def window_depth_finder(self): + """Recalculate window depths, creating depth deltas.""" + # Setup new window depths + n_window_depths: Dict[int, Set[str]] = { + 0: self.all_task_pool.copy() + } + + depth = 1 + # Since starting from smaller depth, exclude those whose depth has + # already been found. + depth_found_tasks: Set[str] = self.all_task_pool.copy() + while depth <= self.n_edge_distance: + n_window_depths[depth] = set().union(*( + self.n_window_node_walks[n_id]['depths'][depth] + for n_id in self.all_task_pool + if ( + n_id in self.n_window_node_walks + and depth in self.n_window_node_walks[n_id]['depths'] + ) + )).difference(depth_found_tasks) + depth_found_tasks.update(n_window_depths[depth]) + # Calculate next depth parameters. + depth += 1 + + # Create deltas of those whose depth has changed, a node should only + # appear once across all depths. + # So the diff will only contain it at a single depth and if it didn't + # appear at the same depth previously. + update_time = time() + for depth, node_set in n_window_depths.items(): + node_set_diff = node_set.difference( + self.n_window_depths.setdefault(depth, set()) + ) + if not self.updates_pending and node_set_diff: + self.updates_pending = True + for tp_id in node_set_diff: + tp_delta = self.updated[TASK_PROXIES].setdefault( + tp_id, PbTaskProxy(id=tp_id) + ) + tp_delta.stamp = f'{tp_id}@{update_time}' + tp_delta.graph_depth = depth + # Set old to new. + self.n_window_depths = n_window_depths + self.update_window_depths = False + def prune_data_store(self): """Remove flagged nodes and edges not in the set of active paths.""" @@ -1539,21 +1843,21 @@ def prune_data_store(self): return # Keep all nodes in the path of active tasks. - in_paths_nodes = set().union(*[ + self.all_n_window_nodes = set().union(*( v for k, v in self.n_window_nodes.items() if k in self.all_task_pool - ]) + )) # Gather all nodes in the paths of tasks flagged for pruning. - out_paths_nodes = self.prune_flagged_nodes.union(*[ + out_paths_nodes = self.prune_flagged_nodes.union(*( v for k, v in self.n_window_nodes.items() if k in self.prune_flagged_nodes - ]) + )) # Trim out any nodes in the runahead pool out_paths_nodes.difference(self.all_task_pool) # Prune only nodes not in the paths of active nodes - node_ids = out_paths_nodes.difference(in_paths_nodes) + node_ids = out_paths_nodes.difference(self.all_n_window_nodes) # Absolute triggers may be present in task pool, so recheck. # Clear the rest. self.prune_flagged_nodes.intersection_update(self.all_task_pool) @@ -1564,8 +1868,6 @@ def prune_data_store(self): for tp_id in list(node_ids): if tp_id in self.n_window_nodes: del self.n_window_nodes[tp_id] - if tp_id in self.n_window_edges: - del self.n_window_edges[tp_id] if tp_id in tp_data: node = tp_data[tp_id] elif tp_id in tp_added: @@ -1573,8 +1875,15 @@ def prune_data_store(self): else: node_ids.remove(tp_id) continue + self.n_window_edges.difference_update(node.edges) + if tp_id in self.n_window_node_walks: + del self.n_window_node_walks[tp_id] + if tp_id in self.n_window_completed_walks: + self.n_window_completed_walks.remove(tp_id) for sig in node.xtriggers: - self.xtrigger_tasks[sig].remove(tp_id) + self.xtrigger_tasks[sig].remove( + (tp_id, node.xtriggers[sig].label) + ) if not self.xtrigger_tasks[sig]: del self.xtrigger_tasks[sig] @@ -1593,6 +1902,7 @@ def prune_data_store(self): if node_ids: self.pruned_task_proxies.update(node_ids) self.updates_pending = True + self.updates_pending_follow_on = True def _family_ascent_point_prune( self, fp_id, node_ids, parent_ids, checked_ids, prune_ids): @@ -1607,12 +1917,9 @@ def _family_ascent_point_prune( if fp_id in fp_data: fam_node = fp_data[fp_id] # Gather child families, then check/update recursively - child_fam_nodes = [ - n_id - for n_id in fam_node.child_families - if n_id not in checked_ids - ] - for child_id in child_fam_nodes: + for child_id in fam_node.child_families: + if child_id in checked_ids: + continue self._family_ascent_point_prune( child_id, node_ids, parent_ids, checked_ids, prune_ids) child_tasks = set(fam_node.child_tasks) @@ -1623,12 +1930,15 @@ def _family_ascent_point_prune( child_tasks.update(fp_updated[fp_id].child_tasks) if fp_updated[fp_id].child_families: child_families.update(fp_updated[fp_id].child_families) - # if any child tasks or families are active, don't prune. + # if any child tasks or families are in window, don't prune. if ( - child_tasks.difference(node_ids) - or child_families.difference(prune_ids) + child_tasks.difference(node_ids) + or child_families.difference(prune_ids) ): - if fp_id in prune_ids: + if ( + child_tasks.intersection(node_ids) + or child_families.intersection(prune_ids) + ): self.state_update_families.add(fp_id) else: if fam_node.first_parent: @@ -1684,7 +1994,7 @@ def update_family_proxies(self): self._family_ascent_point_update( next(iter(self.state_update_families))) if self.updated_state_families: - self.updates_pending = True + self.state_update_follow_on = True def _family_ascent_point_update(self, fp_id): """Updates the given family and children recursively. @@ -1695,6 +2005,7 @@ def _family_ascent_point_update(self, fp_id): ancestor to the set of families flagged for update. """ + all_nodes = self.all_n_window_nodes fp_added = self.added[FAMILY_PROXIES] fp_data = self.data[self.workflow_id][FAMILY_PROXIES] if fp_id in fp_data: @@ -1709,12 +2020,9 @@ def _family_ascent_point_update(self, fp_id): self.state_update_families.remove(fp_id) return # Gather child families, then check/update recursively - child_fam_nodes = [ - n_id - for n_id in fam_node.child_families - if n_id not in self.updated_state_families - ] - for child_fam_id in child_fam_nodes: + for child_fam_id in fam_node.child_families: + if child_fam_id in self.updated_state_families: + continue self._family_ascent_point_update(child_fam_id) if fp_id in self.state_update_families: fp_updated = self.updated[FAMILY_PROXIES] @@ -1726,6 +2034,7 @@ def _family_ascent_point_update(self, fp_id): is_held_total = 0 is_queued_total = 0 is_runahead_total = 0 + graph_depth = self.n_edge_distance for child_id in fam_node.child_families: child_node = fp_updated.get(child_id, fp_data.get(child_id)) if child_node is not None: @@ -1733,9 +2042,13 @@ def _family_ascent_point_update(self, fp_id): is_queued_total += child_node.is_queued_total is_runahead_total += child_node.is_runahead_total state_counter += Counter(dict(child_node.state_totals)) + if child_node.graph_depth < graph_depth: + graph_depth = child_node.graph_depth # Gather all child task states task_states = [] for tp_id in fam_node.child_tasks: + if all_nodes and tp_id not in all_nodes: + continue tp_delta = tp_updated.get(tp_id) tp_node = tp_added.get(tp_id, tp_data.get(tp_id)) @@ -1765,6 +2078,12 @@ def _family_ascent_point_update(self, fp_id): if tp_runahead.is_runahead: is_runahead_total += 1 + tp_depth = tp_delta + if tp_depth is None or not tp_depth.HasField('graph_depth'): + tp_depth = tp_node + if tp_depth.graph_depth < graph_depth: + graph_depth = tp_depth.graph_depth + state_counter += Counter(task_states) # created delta data element fp_delta = PbFamilyProxy( @@ -1776,7 +2095,8 @@ def _family_ascent_point_update(self, fp_id): is_queued=(is_queued_total > 0), is_queued_total=is_queued_total, is_runahead=(is_runahead_total > 0), - is_runahead_total=is_runahead_total + is_runahead_total=is_runahead_total, + graph_depth=graph_depth ) fp_delta.states[:] = state_counter.keys() # Use all states to clean up pruned counts @@ -1798,10 +2118,11 @@ def set_graph_window_extent(self, n_edge_distance): Maximum edge distance from active node. """ - self.next_n_edge_distance = n_edge_distance - self.updates_pending = True + if n_edge_distance != self.n_edge_distance: + self.next_n_edge_distance = n_edge_distance + self.updates_pending = True - def update_workflow(self): + def update_workflow(self, reloaded=False): """Update workflow element status and state totals.""" # Create new message and copy existing message content data = self.data[self.workflow_id] @@ -1811,7 +2132,9 @@ def update_workflow(self): # new updates/deltas not applied yet # so need to search/use updated states if available. - if self.updated_state_families: + if self.updated_state_families or self.state_update_follow_on: + if not self.updated_state_families: + self.state_update_follow_on = False state_counter = Counter({}) is_held_total = 0 is_queued_total = 0 @@ -1855,6 +2178,13 @@ def update_workflow(self): w_delta.status_msg = status_msg delta_set = True + if reloaded is not w_data.reloaded: + w_delta.reloaded = reloaded + + if w_data.n_edge_distance != self.n_edge_distance: + w_delta.n_edge_distance = self.n_edge_distance + delta_set = True + if self.schd.pool.main_pool: pool_points = set(self.schd.pool.main_pool) oldest_point = str(min(pool_points)) @@ -2152,6 +2482,7 @@ def delta_task_ext_trigger( tp_id, PbTaskProxy(id=tp_id)) tp_delta.stamp = f'{tp_id}@{update_time}' ext_trigger = tp_delta.external_triggers[trig] + ext_trigger.id = tproxy.external_triggers[trig].id ext_trigger.message = message ext_trigger.satisfied = satisfied ext_trigger.time = update_time @@ -2169,12 +2500,14 @@ def delta_task_xtrigger(self, sig, satisfied): """ update_time = time() - for tp_id in self.xtrigger_tasks.get(sig, set()): + for tp_id, label in self.xtrigger_tasks.get(sig, set()): # update task instance tp_delta = self.updated[TASK_PROXIES].setdefault( tp_id, PbTaskProxy(id=tp_id)) tp_delta.stamp = f'{tp_id}@{update_time}' xtrigger = tp_delta.xtriggers[sig] + xtrigger.id = sig + xtrigger.label = label xtrigger.satisfied = satisfied xtrigger.time = update_time self.updates_pending = True diff --git a/cylc/flow/dbstatecheck.py b/cylc/flow/dbstatecheck.py index e5a85805efe..ca45b5deba6 100644 --- a/cylc/flow/dbstatecheck.py +++ b/cylc/flow/dbstatecheck.py @@ -56,10 +56,13 @@ class CylcWorkflowDBChecker: ], } - def __init__(self, rund, workflow): - db_path = expand_path( - rund, workflow, "log", CylcWorkflowDAO.DB_FILE_BASE_NAME - ) + def __init__(self, rund, workflow, db_path=None): + # (Explicit dp_path arg is to make testing easier). + if db_path is None: + # Infer DB path from workflow name and run dir. + db_path = expand_path( + rund, workflow, "log", CylcWorkflowDAO.DB_FILE_BASE_NAME + ) if not os.path.exists(db_path): raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), db_path) self.conn = sqlite3.connect(db_path, timeout=10.0) @@ -73,7 +76,7 @@ def display_maps(res): sys.stdout.write((", ").join(row) + "\n") def get_remote_point_format(self): - """Query a remote workflow database for a 'cycle point format' entry""" + """Query a workflow database for a 'cycle point format' entry""" for row in self.conn.execute( rf''' SELECT @@ -87,6 +90,24 @@ def get_remote_point_format(self): ): return row[0] + def get_remote_point_format_compat(self): + """Query a Cylc 7 suite database for a 'cycle point format' entry. + + Back compat for Cylc 8 workflow state triggers targeting Cylc 7 DBs. + """ + for row in self.conn.execute( + rf''' + SELECT + value + FROM + {CylcWorkflowDAO.TABLE_SUITE_PARAMS} + WHERE + key==? + ''', # nosec (table name is code constant) + ['cycle_point_format'] + ): + return row[0] + def state_lookup(self, state): """allows for multiple states to be searched via a status alias""" if state in self.STATE_ALIASES: diff --git a/cylc/flow/etc/tutorial/cylc-forecasting-workflow/etc/python-job.settings b/cylc/flow/etc/tutorial/cylc-forecasting-workflow/etc/python-job.settings index 15de1f8ea13..53b4aa2c17a 100644 --- a/cylc/flow/etc/tutorial/cylc-forecasting-workflow/etc/python-job.settings +++ b/cylc/flow/etc/tutorial/cylc-forecasting-workflow/etc/python-job.settings @@ -7,6 +7,5 @@ [[[environment]]] # These environment variables ensure that tasks can # run in the same environment as the workflow: - {% from "sys" import path, executable %} - PYTHONPATH = {{':'.join(path)}} + {% from "sys" import executable %} PATH = $(dirname {{executable}}):$PATH diff --git a/cylc/flow/etc/tutorial/runtime-tutorial/flow.cylc b/cylc/flow/etc/tutorial/runtime-tutorial/flow.cylc index 4593c75a68e..5356e113973 100644 --- a/cylc/flow/etc/tutorial/runtime-tutorial/flow.cylc +++ b/cylc/flow/etc/tutorial/runtime-tutorial/flow.cylc @@ -3,12 +3,8 @@ UTC mode = True [scheduling] - # Start the workflow 7 hours before now ignoring minutes and seconds - # * previous(T-00) takes the current time ignoring minutes and seconds. - # * - PT7H subtracts 7 hours from the time. - initial cycle point = previous(T-00) - PT7H - # Stop the workflow 6 hours after the initial cycle point. - final cycle point = +PT6H + # TODO: Set initial cycle point + # TODO: Set final cycle point [[graph]] # Repeat every three hours starting at the initial cycle point. PT3H = """ @@ -34,13 +30,5 @@ """ [runtime] - [[get_observations_camborne]] - [[get_observations_heathrow]] - [[get_observations_aldergrove]] - [[get_observations_shetland]] - [[consolidate_observations]] - [[forecast]] - [[get_rainfall]] - [[post_process_exeter]] -{% include 'etc/python-job.settings' %} +%include 'etc/python-job.settings' diff --git a/cylc/flow/exceptions.py b/cylc/flow/exceptions.py index 79a726d7bbe..d1a459f996b 100644 --- a/cylc/flow/exceptions.py +++ b/cylc/flow/exceptions.py @@ -26,11 +26,14 @@ Tuple, Type, Union, + TYPE_CHECKING, ) -from cylc.flow.subprocctx import SubFuncContext from cylc.flow.util import format_cmd +if TYPE_CHECKING: + from cylc.flow.subprocctx import SubFuncContext + class CylcError(Exception): """Generic exception for Cylc errors. @@ -198,7 +201,7 @@ def __init__( message: str, platform_name: str, *, - ctx: Optional[SubFuncContext] = None, + ctx: 'Optional[SubFuncContext]' = None, cmd: Optional[Union[str, Iterable]] = None, ret_code: Optional[int] = None, out: Optional[str] = None, diff --git a/cylc/flow/flow_mgr.py b/cylc/flow/flow_mgr.py index 148adb8213f..41aec947a80 100644 --- a/cylc/flow/flow_mgr.py +++ b/cylc/flow/flow_mgr.py @@ -16,11 +16,14 @@ """Manage flow counter and flow metadata.""" -from typing import Dict, Set, Optional +from typing import Dict, Set, Optional, TYPE_CHECKING import datetime from cylc.flow import LOG -from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager + + +if TYPE_CHECKING: + from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager FlowNums = Set[int] diff --git a/cylc/flow/graph_parser.py b/cylc/flow/graph_parser.py index e09d3d8d319..ad0ec280a3d 100644 --- a/cylc/flow/graph_parser.py +++ b/cylc/flow/graph_parser.py @@ -23,7 +23,8 @@ Dict, List, Tuple, - Optional + Optional, + Union ) import cylc.flow.flags @@ -39,6 +40,20 @@ TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_SUBMIT_FAILED ) +from cylc.flow.task_qualifiers import ( + QUAL_FAM_SUCCEED_ALL, + QUAL_FAM_SUCCEED_ANY, + QUAL_FAM_FAIL_ALL, + QUAL_FAM_FAIL_ANY, + QUAL_FAM_FINISH_ALL, + QUAL_FAM_FINISH_ANY, + QUAL_FAM_START_ALL, + QUAL_FAM_START_ANY, + QUAL_FAM_SUBMIT_ALL, + QUAL_FAM_SUBMIT_ANY, + QUAL_FAM_SUBMIT_FAIL_ALL, + QUAL_FAM_SUBMIT_FAIL_ANY, +) class Replacement: @@ -71,10 +86,10 @@ class GraphParser: store dependencies for the whole workflow (call parse_graph multiple times and key results by graph section). - The general form of a dependency is "EXPRESSION => NODE", where: - * On the right, NODE is a task or family name + The general form of a dependency is "LHS => RHS", where: * On the left, an EXPRESSION of nodes involving parentheses, and logical operators '&' (AND), and '|' (OR). + * On the right, an EXPRESSION of nodes NOT involving '|' * Node names may be parameterized (any number of parameters): NODE NODE # specific parameter value @@ -102,19 +117,6 @@ class GraphParser: CONTINUATION_STRS = (ARROW, OP_AND, OP_OR) BAD_STRS = (OP_AND_ERR, OP_OR_ERR) - QUAL_FAM_SUCCEED_ALL = "succeed-all" - QUAL_FAM_SUCCEED_ANY = "succeed-any" - QUAL_FAM_FAIL_ALL = "fail-all" - QUAL_FAM_FAIL_ANY = "fail-any" - QUAL_FAM_FINISH_ALL = "finish-all" - QUAL_FAM_FINISH_ANY = "finish-any" - QUAL_FAM_START_ALL = "start-all" - QUAL_FAM_START_ANY = "start-any" - QUAL_FAM_SUBMIT_ALL = "submit-all" - QUAL_FAM_SUBMIT_ANY = "submit-any" - QUAL_FAM_SUBMIT_FAIL_ALL = "submit-fail-all" - QUAL_FAM_SUBMIT_FAIL_ANY = "submit-fail-any" - # Map family trigger type to (member-trigger, any/all), for use in # expanding family trigger expressions to member trigger expressions. # - "FAM:succeed-all => g" means "f1:succeed & f2:succeed => g" @@ -516,32 +518,33 @@ def _proc_dep_pair( "Suicide markers must be" f" on the right of a trigger: {left}") + # Check that parentheses match. + mismatch_msg = 'Mismatched parentheses in: "{}"' + if left and left.count("(") != left.count(")"): + raise GraphParseError(mismatch_msg.format(left)) + if right.count("(") != right.count(")"): + raise GraphParseError(mismatch_msg.format(right)) + # Ignore cycle point offsets on the right side. # (Note we can't ban this; all nodes get process as left and right.) if '[' in right: return - # Check that parentheses match. - if left and left.count("(") != left.count(")"): - raise GraphParseError( - "Mismatched parentheses in: \"" + left + "\"") - # Split right side on AND. rights = right.split(self.__class__.OP_AND) if '' in rights or right and not all(rights): raise GraphParseError( f"Null task name in graph: {left} => {right}") + lefts: Union[List[str], List[Optional[str]]] if not left or (self.__class__.OP_OR in left or '(' in left): - # Treat conditional or bracketed expressions as a single entity. + # Treat conditional or parenthesised expressions as a single entity # Can get [None] or [""] here - lefts: List[Optional[str]] = [left] + lefts = [left] else: # Split non-conditional left-side expressions on AND. # Can get [""] here too - # TODO figure out how to handle this wih mypy: - # assign List[str] to List[Optional[str]] - lefts = left.split(self.__class__.OP_AND) # type: ignore + lefts = left.split(self.__class__.OP_AND) if '' in lefts or left and not all(lefts): raise GraphParseError( f"Null task name in graph: {left} => {right}") @@ -846,9 +849,14 @@ def _compute_triggers( trigs += [f"{name}{offset}:{trigger}"] for right in rights: + right = right.strip('()') # parentheses don't matter m = self.__class__.REC_RHS_NODE.match(right) - # This will match, bad nodes are detected earlier (type ignore): - suicide_char, name, output, opt_char = m.groups() # type: ignore + if not m: + # Bad nodes should have been detected earlier; fail loudly + raise ValueError( # pragma: no cover + f"Unexpected graph expression: '{right}'" + ) + suicide_char, name, output, opt_char = m.groups() suicide = (suicide_char == self.__class__.SUICIDE) optional = (opt_char == self.__class__.OPTIONAL) if output: @@ -860,7 +868,7 @@ def _compute_triggers( if not output: # (Plain family name on RHS). # Make implicit success explicit. - output = self.__class__.QUAL_FAM_SUCCEED_ALL + output = QUAL_FAM_SUCCEED_ALL elif output.startswith("finish"): if optional: raise GraphParseError( diff --git a/cylc/flow/id.py b/cylc/flow/id.py index b8f34fb217f..222d16f82b6 100644 --- a/cylc/flow/id.py +++ b/cylc/flow/id.py @@ -497,7 +497,7 @@ def duplicate( )? (?: # cycle/task/job - { RELATIVE_PATTERN } + {RELATIVE_PATTERN} )? )? )? diff --git a/cylc/flow/id_cli.py b/cylc/flow/id_cli.py index 96cb438fded..803c8506439 100644 --- a/cylc/flow/id_cli.py +++ b/cylc/flow/id_cli.py @@ -31,11 +31,6 @@ upgrade_legacy_ids, ) from cylc.flow.pathutil import EXPLICIT_RELATIVE_PATH_REGEX -from cylc.flow.network.scan import ( - filter_name, - is_active, - scan, -) from cylc.flow.workflow_files import ( check_flow_file, detect_both_flow_and_suite, @@ -487,6 +482,12 @@ async def _expand_workflow_tokens_impl(tokens, match_active=True): 'currently supported.' ) + # import only when needed to avoid slowing CLI unnecessarily + from cylc.flow.network.scan import ( + filter_name, + is_active, + scan, + ) # construct the pipe pipe = scan | filter_name(fnmatch.translate(tokens['workflow'])) if match_active is not None: diff --git a/cylc/flow/install.py b/cylc/flow/install.py index 7db6eacb939..27810f72e97 100644 --- a/cylc/flow/install.py +++ b/cylc/flow/install.py @@ -547,7 +547,7 @@ def detect_flow_exists( """Returns True if installed flow already exists. Args: - run_path_base: Absolute path of workflow directory, + run_path_base: Absolute path of the parent of the workflow's run dir, i.e ~/cylc-run/ numbered: If True, will detect if numbered runs exist. If False, will detect if non-numbered runs exist, i.e. runs installed @@ -583,8 +583,8 @@ def check_nested_dirs( install dirs. Raises: - WorkflowFilesError if reg dir is nested inside a run dir, or an - install dirs are nested. + WorkflowFilesError if run_dir is nested inside an existing run dir, + or install dirs are nested. """ if install_dir is not None: install_dir = Path(os.path.normpath(install_dir)) diff --git a/cylc/flow/install_plugins/log_vc_info.py b/cylc/flow/install_plugins/log_vc_info.py index d2379c5cc0d..29d861f7654 100644 --- a/cylc/flow/install_plugins/log_vc_info.py +++ b/cylc/flow/install_plugins/log_vc_info.py @@ -63,12 +63,22 @@ from pathlib import Path from subprocess import Popen, DEVNULL, PIPE from typing import ( - Any, Dict, Iterable, List, Optional, TYPE_CHECKING, TextIO, Union, overload + Any, + Dict, + Iterable, + List, + Optional, + TYPE_CHECKING, + TextIO, + Union, + overload, ) from cylc.flow import LOG as _LOG, LoggerAdaptor from cylc.flow.exceptions import CylcError import cylc.flow.flags +from cylc.flow.pipe_poller import pipe_poller +from cylc.flow.util import format_cmd from cylc.flow.workflow_files import WorkflowFiles if TYPE_CHECKING: @@ -171,7 +181,7 @@ def get_vc_info(path: Union[Path, str]) -> Optional[Dict[str, Any]]: ): LOG.debug(f"Source dir {path} is not a {vcs} repository") elif cylc.flow.flags.verbosity > -1: - LOG.warning(f"$ {vcs} {' '.join(args)}\n{exc}") + LOG.warning(f"$ {vcs} {format_cmd(args)}\n{exc}") continue info['version control system'] = vcs @@ -217,9 +227,7 @@ def _run_cmd( args: The args to pass to the version control command. cwd: Directory to run the command in. stdout: Where to redirect output (either PIPE or a - text stream/file object). Note: only use PIPE for - commands that will not generate a large output, otherwise - the pipe might get blocked. + text stream/file object). Returns: Stdout output if stdout=PIPE, else None as the output has been @@ -231,6 +239,7 @@ def _run_cmd( OSError: Non-zero return code for VCS command. """ cmd = [vcs, *args] + LOG.debug(f'$ {format_cmd(cmd)}') try: proc = Popen( # nosec cmd, @@ -245,13 +254,15 @@ def _run_cmd( # This will only be raised if the VCS command is not installed, # otherwise Popen() will succeed with a non-zero return code raise VCSNotInstalledError(vcs, exc) - ret_code = proc.wait() - out, err = proc.communicate() - if ret_code: + if stdout == PIPE: + out, err = pipe_poller(proc, proc.stdout, proc.stderr) + else: + out, err = proc.communicate() + if proc.returncode: if any(err.lower().startswith(msg) for msg in NO_BASE_ERRS[vcs]): # No base commit in repo raise VCSMissingBaseError(vcs, cwd) - raise OSError(ret_code, err) + raise OSError(proc.returncode, err) return out diff --git a/cylc/flow/job_file.py b/cylc/flow/job_file.py index cc8576dad37..930331dc5a4 100644 --- a/cylc/flow/job_file.py +++ b/cylc/flow/job_file.py @@ -25,16 +25,9 @@ from cylc.flow import __version__ as CYLC_VERSION from cylc.flow.job_runner_mgr import JobRunnerManager import cylc.flow.flags -from cylc.flow.option_parsers import verbosity_to_env +from cylc.flow.log_level import verbosity_to_env from cylc.flow.config import interpolate_template, ParamExpandError -# the maximum number of task dependencies which Cylc will list before -# omitting the CYLC_TASK_DEPENDENCIES environment variable -# see: https://github.com/cylc/cylc-flow/issues/5551 -# NOTE: please update `src/reference/job-script-vars/var-list.txt` -# in cylc-doc if changing this value -MAX_CYLC_TASK_DEPENDENCIES_LEN = 50 - class JobFileWriter: @@ -227,18 +220,6 @@ def _write_task_environment(self, handle, job_conf): handle.write( '\n export CYLC_TASK_NAMESPACE_HIERARCHY="%s"' % ' '.join(job_conf['namespace_hierarchy'])) - if len(job_conf['dependencies']) <= MAX_CYLC_TASK_DEPENDENCIES_LEN: - handle.write( - '\n export CYLC_TASK_DEPENDENCIES="%s"' % - ' '.join(job_conf['dependencies'])) - else: - # redact the CYLC_TASK_DEPENDENCIES variable but leave a note - # explaining why - # see: https://github.com/cylc/cylc-flow/issues/5551 - handle.write( - '\n # CYLC_TASK_DEPENDENCIES=disabled' - f' (more than {MAX_CYLC_TASK_DEPENDENCIES_LEN} dependencies)' - ) handle.write( '\n export CYLC_TASK_TRY_NUMBER=%s' % job_conf['try_num']) handle.write( diff --git a/cylc/flow/job_runner_handlers/documentation.py b/cylc/flow/job_runner_handlers/documentation.py index 735b1801b16..a6f6af983be 100644 --- a/cylc/flow/job_runner_handlers/documentation.py +++ b/cylc/flow/job_runner_handlers/documentation.py @@ -22,13 +22,16 @@ not intended to be subclassed. """ -import re from typing import ( Iterable, List, Tuple, + TYPE_CHECKING, ) +if TYPE_CHECKING: + import re + class ExampleHandler(): """Documentation for writing job runner handlers. @@ -110,7 +113,6 @@ class MyHandler(): * ``job_file_path`` * ``job_runner_command_template`` * ``job_runner_name`` - * ``logfiles`` * ``namespace_hierarchy`` * ``param_var`` * ``platform`` @@ -258,7 +260,7 @@ class QSUBHandler(PBSHandler): """ - REC_ID_FROM_SUBMIT_OUT: re.Pattern + REC_ID_FROM_SUBMIT_OUT: 're.Pattern' """Regular expression to extract job IDs from submission stderr. A regular expression (compiled) to extract the job "id" from the standard @@ -266,7 +268,7 @@ class QSUBHandler(PBSHandler): """ - REC_ID_FROM_SUBMIT_ERR: re.Pattern + REC_ID_FROM_SUBMIT_ERR: 're.Pattern' """Regular expression to extract job IDs from submission stderr. See :py:attr:`ExampleHandler.REC_ID_FROM_SUBMIT_OUT`. diff --git a/cylc/flow/job_runner_handlers/pbs.py b/cylc/flow/job_runner_handlers/pbs.py index c28ce3899ff..aa264311fc4 100644 --- a/cylc/flow/job_runner_handlers/pbs.py +++ b/cylc/flow/job_runner_handlers/pbs.py @@ -82,7 +82,7 @@ class PBSHandler: # system, so there is no need to filter its output. POLL_CMD = "qstat" POLL_CANT_CONNECT_ERR = "Connection refused" - REC_ID_FROM_SUBMIT_OUT = re.compile(r"""\A\s*(?P\S+)\s*\Z""") + REC_ID_FROM_SUBMIT_OUT = re.compile(r"^\s*(?P\d+)", re.M) SUBMIT_CMD_TMPL = "qsub '%(job)s'" def format_directives(self, job_conf): @@ -123,5 +123,10 @@ def format_directives(self, job_conf): lines.append(self.DIRECTIVE_PREFIX + key) return lines + @classmethod + def filter_poll_many_output(cls, out): + """Strip trailing stuff from the job ID.""" + return cls.REC_ID_FROM_SUBMIT_OUT.findall(out) + JOB_RUNNER_HANDLER = PBSHandler() diff --git a/cylc/flow/job_runner_handlers/slurm.py b/cylc/flow/job_runner_handlers/slurm.py index 21c50b0a357..4ec6be20471 100644 --- a/cylc/flow/job_runner_handlers/slurm.py +++ b/cylc/flow/job_runner_handlers/slurm.py @@ -40,8 +40,11 @@ .. note:: - Since not all SLURM commands have a short form, cylc requires - the long form directives. + * Cylc requires long form directives (e.g. ``--begin`` not ``-b``). + * Cylc requires an ``=`` even if the directive does not have a value + (e.g. ``--hold=`` not ``--hold``). + * If a directive does not have a value you may use the short form + (e.g. ``-H=``). But the directive must still be suffixed with an ``=``. These are written to the top of the job script like this: diff --git a/cylc/flow/job_runner_mgr.py b/cylc/flow/job_runner_mgr.py index 60c68e7875c..f23c6c70104 100644 --- a/cylc/flow/job_runner_mgr.py +++ b/cylc/flow/job_runner_mgr.py @@ -403,7 +403,8 @@ def _create_nn(cls, job_file_path): rmtree( os.path.join(task_log_dir, name), ignore_errors=True) - def _filter_submit_output(self, st_file_path, job_runner, out, err): + @classmethod + def _filter_submit_output(cls, st_file_path, job_runner, out, err): """Filter submit command output, if relevant.""" job_id = None if hasattr(job_runner, "REC_ID_FROM_SUBMIT_ERR"): @@ -421,9 +422,9 @@ def _filter_submit_output(self, st_file_path, job_runner, out, err): job_id = job_runner.manip_job_id(job_id) with open(st_file_path, "a") as job_status_file: job_status_file.write("{0}={1}\n".format( - self.CYLC_JOB_ID, job_id)) + cls.CYLC_JOB_ID, job_id)) job_status_file.write("{0}={1}\n".format( - self.CYLC_JOB_RUNNER_SUBMIT_TIME, + cls.CYLC_JOB_RUNNER_SUBMIT_TIME, get_current_time_string())) break if hasattr(job_runner, "filter_submit_output"): diff --git a/cylc/flow/log_level.py b/cylc/flow/log_level.py new file mode 100644 index 00000000000..4b091154893 --- /dev/null +++ b/cylc/flow/log_level.py @@ -0,0 +1,116 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +"""Utilities for configuring logging level via the CLI.""" + +import logging +from typing import List, Dict, Union, TYPE_CHECKING + +if TYPE_CHECKING: + import os + + +def verbosity_to_log_level(verb: int) -> int: + """Convert Cylc verbosity to log severity level.""" + if verb < 0: + return logging.WARNING + if verb > 0: + return logging.DEBUG + return logging.INFO + + +def log_level_to_verbosity(lvl: int) -> int: + """Convert log severity level to Cylc verbosity. + + Examples: + >>> log_level_to_verbosity(logging.NOTSET) + 2 + >>> log_level_to_verbosity(logging.DEBUG) + 1 + >>> log_level_to_verbosity(logging.INFO) + 0 + >>> log_level_to_verbosity(logging.WARNING) + -1 + >>> log_level_to_verbosity(logging.ERROR) + -1 + """ + if lvl < logging.DEBUG: + return 2 + if lvl < logging.INFO: + return 1 + if lvl == logging.INFO: + return 0 + return -1 + + +def verbosity_to_opts(verb: int) -> List[str]: + """Convert Cylc verbosity to the CLI opts required to replicate it. + + Examples: + >>> verbosity_to_opts(0) + [] + >>> verbosity_to_opts(-2) + ['-q', '-q'] + >>> verbosity_to_opts(2) + ['-v', '-v'] + + """ + return [ + '-q' + for _ in range(verb, 0) + ] + [ + '-v' + for _ in range(0, verb) + ] + + +def verbosity_to_env(verb: int) -> Dict[str, str]: + """Convert Cylc verbosity to the env vars required to replicate it. + + Examples: + >>> verbosity_to_env(0) + {'CYLC_VERBOSE': 'false', 'CYLC_DEBUG': 'false'} + >>> verbosity_to_env(1) + {'CYLC_VERBOSE': 'true', 'CYLC_DEBUG': 'false'} + >>> verbosity_to_env(2) + {'CYLC_VERBOSE': 'true', 'CYLC_DEBUG': 'true'} + + """ + return { + 'CYLC_VERBOSE': str((verb > 0)).lower(), + 'CYLC_DEBUG': str((verb > 1)).lower(), + } + + +def env_to_verbosity(env: 'Union[Dict, os._Environ]') -> int: + """Extract verbosity from environment variables. + + Examples: + >>> env_to_verbosity({}) + 0 + >>> env_to_verbosity({'CYLC_VERBOSE': 'true'}) + 1 + >>> env_to_verbosity({'CYLC_DEBUG': 'true'}) + 2 + >>> env_to_verbosity({'CYLC_DEBUG': 'TRUE'}) + 2 + + """ + return ( + 2 if env.get('CYLC_DEBUG', '').lower() == 'true' + else 1 if env.get('CYLC_VERBOSE', '').lower() == 'true' + else 0 + ) diff --git a/cylc/flow/network/__init__.py b/cylc/flow/network/__init__.py index 448b35fb65d..916b129e244 100644 --- a/cylc/flow/network/__init__.py +++ b/cylc/flow/network/__init__.py @@ -67,7 +67,7 @@ def get_location(workflow: str) -> Tuple[str, int, int]: NB: if it fails to load the workflow contact file, it will exit. Args: - workflow: workflow name + workflow: workflow ID Returns: Tuple (host name, port number, publish port number) Raises: diff --git a/cylc/flow/network/authentication.py b/cylc/flow/network/authentication.py index b30392da84b..d9c571b8bf1 100644 --- a/cylc/flow/network/authentication.py +++ b/cylc/flow/network/authentication.py @@ -24,12 +24,12 @@ remove_keys_on_server) -def key_housekeeping(reg, platform=None, create=True): +def key_housekeeping(id_, platform=None, create=True): """Clean any existing authentication keys and create new ones. If create is set to false, keys will only be cleaned from server.""" - workflow_srv_dir = get_workflow_srv_dir(reg) + workflow_srv_dir = get_workflow_srv_dir(id_) keys = { "client_public_key": KeyInfo( KeyType.PUBLIC, diff --git a/cylc/flow/network/client.py b/cylc/flow/network/client.py index f8b499332e9..b3955d3b058 100644 --- a/cylc/flow/network/client.py +++ b/cylc/flow/network/client.py @@ -143,6 +143,10 @@ def timeout_handler(self) -> None: f'It has moved to {contact_host}:{contact_port}' ) + if os.getenv('CYLC_TASK_COMMS_METHOD'): + # don't attempt to clean up old contact files in task messages + return + # Cannot connect, perhaps workflow is no longer running and is leaving # behind a contact file? try: @@ -299,7 +303,9 @@ async def async_request( raise ClientTimeout( 'Timeout waiting for server response.' ' This could be due to network or server issues.' - ' Check the workflow log.' + '\n* You might want to increase the timeout using the' + ' --comms-timeout option;' + '\n* or check the workflow log.' ) if msg['command'] in PB_METHOD_MAP: diff --git a/cylc/flow/network/client_factory.py b/cylc/flow/network/client_factory.py index 161bbcbbc82..c387f3d1699 100644 --- a/cylc/flow/network/client_factory.py +++ b/cylc/flow/network/client_factory.py @@ -46,7 +46,7 @@ def get_runtime_client( Args: comm_method: communication method - workflow: workflow name + workflow: workflow ID """ if comms_method == CommsMeth.SSH: from cylc.flow.network.ssh_client import WorkflowRuntimeClient diff --git a/cylc/flow/network/multi.py b/cylc/flow/network/multi.py index 86c89c5f71d..a11842d6391 100644 --- a/cylc/flow/network/multi.py +++ b/cylc/flow/network/multi.py @@ -107,4 +107,4 @@ def _report_single(report, workflow, result): def _report(_): - print('Done') + print('Command submitted; the scheduler will log any problems.') diff --git a/cylc/flow/network/resolvers.py b/cylc/flow/network/resolvers.py index b6effe143c6..c51946ff188 100644 --- a/cylc/flow/network/resolvers.py +++ b/cylc/flow/network/resolvers.py @@ -267,6 +267,10 @@ def node_filter(node, node_type, args, state): args.get('maxdepth', -1) < 0 or node.depth <= args['maxdepth'] ) + and ( + args.get('graph_depth', -1) < 0 + or node.graph_depth <= args['graph_depth'] + ) # Now filter node against id arg lists and ( not args.get('ids') @@ -568,6 +572,9 @@ async def subscribe_delta(self, root, info, args): workflow_id=w_id) delta_store[DELTA_ADDED] = ( self.data_store_mgr.data[w_id]) + delta_store[DELTA_ADDED][ + WORKFLOW + ].reloaded = True deltas_queue.put( (w_id, 'initial_burst', delta_store)) elif w_id in self.delta_store[sub_id]: @@ -682,23 +689,35 @@ async def mutator( result = (True, 'Command queued') return [{'id': w_id, 'response': result}] + def _log_command(self, command: str, user: str) -> None: + """Log receipt of command, with user name if not owner.""" + is_owner = user == self.schd.owner + if command == 'put_messages' and is_owner: + # Logging put_messages is overkill. + return + log_msg = f"[command] {command}" + if not is_owner: + log_msg += (f" (issued by {user})") + LOG.info(log_msg) + async def _mutation_mapper( self, command: str, kwargs: Dict[str, Any], meta: Dict[str, Any] ) -> Optional[Tuple[bool, str]]: """Map between GraphQL resolvers and internal command interface.""" + + self._log_command( + command, + meta.get('auth_user', self.schd.owner) + ) method = getattr(self, command, None) if method is not None: return method(**kwargs) + try: self.schd.get_command_method(command) except AttributeError: raise ValueError(f"Command '{command}' not found") - if command != "put_messages": - log_msg = f"[command] {command}" - user = meta.get('auth_user', self.schd.owner) - if user != self.schd.owner: - log_msg += (f" (issued by {user})") - LOG.info(log_msg) + self.schd.queue_command( command, kwargs diff --git a/cylc/flow/network/scan.py b/cylc/flow/network/scan.py index 4dfe5510848..c2202f3f31e 100644 --- a/cylc/flow/network/scan.py +++ b/cylc/flow/network/scan.py @@ -50,10 +50,8 @@ import re from typing import AsyncGenerator, Dict, Iterable, List, Optional, Tuple, Union -from pkg_resources import ( - parse_requirements, - parse_version -) +from packaging.version import parse as parse_version +from packaging.specifiers import SpecifierSet from cylc.flow import LOG from cylc.flow.async_util import ( @@ -354,11 +352,7 @@ async def validate_contact_info(flow): def parse_requirement(requirement_string): """Parse a requirement from a requirement string.""" - # we have to give the requirement a name but what we call it doesn't - # actually matter - for req in parse_requirements(f'x {requirement_string}'): - # there should only be one requirement - return (req,), {} + return (SpecifierSet(requirement_string),), {} @pipe(preproc=parse_requirement) @@ -373,7 +367,7 @@ async def cylc_version(flow, requirement): flow (dict): Flow information dictionary, provided by scan through the pipe. requirement (str): - Requirement specifier in pkg_resources format e.g. ``> 8, < 9`` + Requirement specifier in PEP 440 format e.g. ``> 8, < 9`` """ return parse_version(flow[ContactFileFields.VERSION]) in requirement @@ -391,7 +385,7 @@ async def api_version(flow, requirement): flow (dict): Flow information dictionary, provided by scan through the pipe. requirement (str): - Requirement specifier in pkg_resources format e.g. ``> 8, < 9`` + Requirement specifier in PEP 440 format e.g. ``> 8, < 9`` """ return parse_version(flow[ContactFileFields.API]) in requirement @@ -536,13 +530,6 @@ async def workflow_params(flow): Requires: * is_active(True) """ - params = {} - - def _callback(_, entry): - nonlocal params - key, value = entry - params[key] = value - # NOTE: use the public DB for reading # (only the scheduler process/thread should access the private database) db_file = Path(get_workflow_run_dir( @@ -550,7 +537,6 @@ def _callback(_, entry): )) if db_file.exists(): with CylcWorkflowDAO(db_file, is_public=True) as dao: - dao.select_workflow_params(_callback) - flow['workflow_params'] = params + flow['workflow_params'] = dict(dao.select_workflow_params()) return flow diff --git a/cylc/flow/network/schema.py b/cylc/flow/network/schema.py index 7de2d88b17b..f9ed95c7158 100644 --- a/cylc/flow/network/schema.py +++ b/cylc/flow/network/schema.py @@ -203,6 +203,7 @@ class SortArgs(InputObjectType): 'is_runahead': Boolean(), 'mindepth': Int(default_value=-1), 'maxdepth': Int(default_value=-1), + 'graph_depth': Int(default_value=-1), 'sort': SortArgs(default_value=None), } @@ -218,6 +219,7 @@ class SortArgs(InputObjectType): 'is_runahead': Boolean(), 'mindepth': Int(default_value=-1), 'maxdepth': Int(default_value=-1), + 'graph_depth': Int(default_value=-1), 'sort': SortArgs(default_value=None), } @@ -226,8 +228,6 @@ class SortArgs(InputObjectType): 'exids': graphene.List(ID, default_value=[]), 'states': graphene.List(String, default_value=[]), 'exstates': graphene.List(String, default_value=[]), - 'mindepth': Int(default_value=-1), - 'maxdepth': Int(default_value=-1), 'sort': SortArgs(default_value=None), } @@ -606,91 +606,172 @@ class Workflow(ObjectType): class Meta: description = """Global workflow info.""" id = ID() # noqa: A003 (required for definition) - name = String() - status = String() - status_msg = String() - host = String() - port = Int() - pub_port = Int() - owner = String() + name = String( + description='The workflow ID with the ~user/ prefix removed.', + ) + status = String( + description='The workflow status e.g. `running`.', + ) + status_msg = String( + description='A description of the workflow status.', + ) + host = String( + description='The host where the scheduler process is running.', + ) + port = Int( + description='The port for sending ZMQ requests to the scheduler.', + ) + pub_port = Int( + description=sstrip(''' + The port for subscribing to ZMQ updates from the scheduler. + '''), + ) + owner = String( + description='The user account that the workflow is running under.', + ) tasks = graphene.List( lambda: Task, - description="""Task definitions.""", + description="Task definitions.", args=DEF_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - resolver=get_nodes_by_ids) + resolver=get_nodes_by_ids, + ) families = graphene.List( lambda: Family, - description="""Family definitions.""", + description="Family definitions.", args=DEF_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - resolver=get_nodes_by_ids) + resolver=get_nodes_by_ids, + ) task_proxies = graphene.List( lambda: TaskProxy, - description="""Task cycle instances.""", + description="Task instances.", args=PROXY_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - resolver=get_nodes_by_ids) + resolver=get_nodes_by_ids, + ) family_proxies = graphene.List( lambda: FamilyProxy, - description="""Family cycle instances.""", + description="Family instances.", args=PROXY_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - resolver=get_nodes_by_ids) + resolver=get_nodes_by_ids, + ) jobs = graphene.List( lambda: Job, - description="""Jobs.""", + description="Jobs.", args=JOB_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - resolver=get_nodes_by_ids) + resolver=get_nodes_by_ids, + ) edges = Field( lambda: Edges, args=EDGE_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - description="""Graph edges""") + description="Graph edges.", + ) nodes_edges = Field( lambda: NodesEdges, args=NODES_EDGES_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - resolver=get_nodes_edges) - api_version = Int() - cylc_version = String() - last_updated = Float() - meta = Field(NodeMeta) - newest_active_cycle_point = String() - oldest_active_cycle_point = String() - reloaded = Boolean() - run_mode = String() - is_held_total = Int() - is_queued_total = Int() - is_runahead_total = Int() - state_totals = GenericScalar(resolver=resolve_state_totals) + resolver=get_nodes_edges, + description='Graph nodes and edges.' + ) + api_version = Int( + description='The Cylc scheduler communication protocol version number.' + ) + cylc_version = String( + description='The Cylc version this workflow is running under.', + ) + last_updated = Float( + description='The time of the most recent state change in the workflow.' + ) + meta = Field( + NodeMeta, + description="The workflow's `[meta]` section.", + ) + newest_active_cycle_point = String( + description='The newest cycle point which has active tasks.' + ) + oldest_active_cycle_point = String( + description='The oldest cycle point which has active tasks.' + ) + reloaded = Boolean( + description=sstrip(''' + When subscribing to workflow updates, this field is `True` if the + update relates to a workflow reload. + '''), + ) + run_mode = String( + description="The scheduler's run-mode e.g. `live`.", + ) + is_held_total = Int( + description='The number of "held" tasks.', + ) + is_queued_total = Int( + description='The number of queued tasks.', + ) + is_runahead_total = Int( + description=sstrip(''' + The number of tasks which are held back by the runahead limit. + ''') + ) + state_totals = GenericScalar( + resolver=resolve_state_totals, + description='The number of tasks in each state as a JSON object.', + ) latest_state_tasks = GenericScalar( states=graphene.List( String, description="List of task states to show", default_value=TASK_STATUSES_ORDERED), - resolver=resolve_state_tasks) - workflow_log_dir = String() - time_zone_info = Field(TimeZone) - tree_depth = Int() - ns_def_order = graphene.List(String) - job_log_names = graphene.List(String) - states = graphene.List(String) + resolver=resolve_state_tasks, + description='The latest tasks to have entered each task state.', + ) + workflow_log_dir = String( + description="The path to the workflow's run directory.", + ) + time_zone_info = Field( + TimeZone, + description='The scheduler time zone.', + ) + tree_depth = Int() # TODO: what is this? write description + ns_def_order = graphene.List( + String, + description=sstrip(''' + Namespace definition order. + + The order in which tasks were defined in the workflow + configuration. + '''), + ) + job_log_names = graphene.List( + # TODO: remove, see https://github.com/cylc/cylc-flow/issues/5610 + String, + description='Deprecated, do not use this.', + ) + states = graphene.List( + String, + description=sstrip(''' + The task states present in the workflow. + + Similar to stateTotals. + '''), + ) broadcasts = GenericScalar( ids=graphene.List( ID, @@ -698,9 +779,18 @@ class Meta: Node IDs, cycle point and/or-just family/task namespace: ["1234/foo", "1234/FAM", "*/FAM"] '''), - default_value=[]), - resolver=resolve_broadcasts) - pruned = Boolean() + default_value=[] + ), + resolver=resolve_broadcasts, + description='Any active workflow broadcasts.' + ) + pruned = Boolean() # TODO: what is this? write description + n_edge_distance = Int( + description=sstrip(''' + The maximum graph distance (n) from an active node + of the data-store graph window. + '''), + ) class RuntimeSetting(ObjectType): @@ -757,45 +847,95 @@ class Meta: class Job(ObjectType): class Meta: - description = """Jobs.""" + description = "Jobs." + id = ID() # noqa: A003 (required for definition) - submit_num = Int() - state = String() - # name and cycle_point for filtering/sorting - name = String() - cycle_point = String() + submit_num = Int( + description='The submission number for this job, starts at 1.', + ) + state = String( + description='The job state e.g. `running` or `succeeded`.', + ) + name = String( + description='The name of the task which submitted this job.', + ) + cycle_point = String( + description='The cycle of the task which submitted this job.', + ) task_proxy = Field( lambda: TaskProxy, - description="""Associated Task Proxy""", + description="The TaskProxy of the task which submitted this job", strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, resolver=get_node_by_id) - submitted_time = String() - started_time = String() - finished_time = String() - job_id = ID() - job_runner_name = String() - execution_time_limit = Float() - platform = String() - job_log_dir = String() - extra_logs = graphene.List(String) - messages = graphene.List(String) - runtime = Field(Runtime) + submitted_time = String( + description='The time this job was submitted to the job runner.', + ) + started_time = String( + description='The time this job started running (if it has yet).', + ) + finished_time = String( + description='The time this job finished running (if it has yet).', + ) + job_id = ID( + description='The ID of this job in the job runner it was submitted to.' + ) + job_runner_name = String( + description='The job runner this job was submitted to.', + ) + execution_time_limit = Float( + description='The time limit for this job if configured.', + ) + platform = String( + description='The Cylc platform this job was submitted to.', + ) + job_log_dir = String( + description="The path to the job's log directory.", + ) + messages = graphene.List( + String, + description='The list of task messages generated by this job.', + ) + runtime = Field( + Runtime, + description=sstrip(''' + The `[runtime]` configuration of the task which submitted this job. + '''), + ) class Task(ObjectType): class Meta: - description = """Task definition, static fields""" + description = sstrip(""" + Task definitions. + + These are the task "definitions" as they appear in the + configuration as opposed to task "instances" which you will find + in the `TaskProxies` field. + """) + id = ID() # noqa: A003 (required for definition) - name = String() - meta = Field(NodeMeta) - runtime = Field(Runtime) - mean_elapsed_time = Float() - depth = Int() + name = String( + description='The task name.' + ) + meta = Field( + NodeMeta, + description="The task's `[meta]` section.", + ) + runtime = Field( + Runtime, + description="The task's `[runtime`] section.", + ) + mean_elapsed_time = Float( + description="The task's average runtime." + ) + depth = Int( + description='The family inheritance depth.' + ) proxies = graphene.List( lambda: TaskProxy, - description="""Associated cycle point proxies""", + description="Associated cycle point proxies", args=PROXY_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, @@ -803,7 +943,7 @@ class Meta: resolver=get_nodes_by_ids) parents = graphene.List( lambda: Family, - description="""Family definition parent.""", + description="Family definition parent.", args=DEF_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, @@ -812,7 +952,7 @@ class Meta: namespace = graphene.List(String) first_parent = Field( lambda: Family, - description="""Task first parent.""", + description="Task first parent.", strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, @@ -885,24 +1025,65 @@ class Meta: id = ID() # noqa: A003 (required for schema definition) task = Field( Task, - description="""Task definition""", + description="The task definition relating to this task instance.", strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, resolver=get_node_by_id) - runtime = Field(Runtime) - state = String() - cycle_point = String() - is_held = Boolean() - is_queued = Boolean() - is_runahead = Boolean() - flow_nums = String() - flow_wait = Boolean() - depth = Int() - job_submits = Int() + runtime = Field( + Runtime, + description="This task's `[runtime]` section.", + ) + state = String( + description='The task state e.g. `running`.', + ) + name = String( + description="The task's name.", + ) + cycle_point = String( + description="The task's cycle point.", + ) + namespace = graphene.List( + String, + description='The inheritance order for this task.', + ) + is_held = Boolean( + description='True if this task is "held".', + ) + is_queued = Boolean( + description=sstrip(''' + True if this task is "queued". + + This relates to Cylc's internal task queues, not a job runner + queue. + '''), + ) + is_runahead = Boolean( + description='True if this task is held back by the "runahead limit".', + ) + flow_nums = String( + description='The flows this task instance belongs to.', + ) + flow_wait = Boolean( + description=sstrip(''' + True if this task will wait for an approaching flow before spawning + outputs downstream. + '''), + ) + depth = Int( + description='The family inheritance depth', + ) + graph_depth = Int( + description=sstrip(''' + The n-window graph edge depth from closet active task(s). + '''), + ) + job_submits = Int( + description='The number of job submissions for this task instance.', + ) outputs = graphene.List( Output, - description="""Task outputs.""", + description="Outputs this task instance has generated.", sort=SortArgs(default_value=None), sort_order=graphene.List( String, @@ -913,7 +1094,7 @@ class Meta: resolver=resolve_mapping_to_list) external_triggers = graphene.List( XTrigger, - description="""Task external trigger prerequisites.""", + description="Task external trigger prerequisites.", sort=SortArgs(default_value=None), sort_order=graphene.List(String), limit=Int(default_value=0), @@ -921,49 +1102,56 @@ class Meta: resolver=resolve_mapping_to_list) xtriggers = graphene.List( XTrigger, - description="""Task xtrigger prerequisites.""", + description="Task xtrigger prerequisites.", sort=SortArgs(default_value=None), sort_order=graphene.List(String), limit=Int(default_value=0), satisfied=Boolean(), resolver=resolve_mapping_to_list) - extras = GenericScalar(resolver=resolve_json_dump) - # name & namespace for filtering/sorting - name = String() - namespace = graphene.List(String) - prerequisites = graphene.List(Prerequisite) + extras = GenericScalar( + # TODO: what is this? write description + resolver=resolve_json_dump, + ) + prerequisites = graphene.List( + Prerequisite, + description='The prerequisites of this task.', + ) jobs = graphene.List( Job, - description="""Jobs.""", + description="Jobs.", args=JOB_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - resolver=get_nodes_by_ids) + resolver=get_nodes_by_ids, + ) parents = graphene.List( lambda: FamilyProxy, - description="""Task parents.""", + description="Task parents (i.e. families).", args=PROXY_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - resolver=get_nodes_by_ids) + resolver=get_nodes_by_ids, + ) first_parent = Field( lambda: FamilyProxy, - description="""Task first parent.""", + description="The task's first parent (i.e. its containing family).", args=PROXY_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - resolver=get_node_by_id) + resolver=get_node_by_id, + ) ancestors = graphene.List( lambda: FamilyProxy, - description="""First parent ancestors.""", + description="First parent ancestors (i.e. inheritance hierarchy).", args=PROXY_ARGS, strip_null=STRIP_NULL_DEFAULT, delta_store=DELTA_STORE_DEFAULT, delta_type=DELTA_TYPE_DEFAULT, - resolver=get_nodes_by_ids) + resolver=get_nodes_by_ids, + ) class Family(ObjectType): @@ -1040,6 +1228,12 @@ class Meta: is_runahead = Boolean() is_runahead_total = Int() depth = Int() + graph_depth = Int( + description=sstrip(''' + The n-window graph edge smallest child task/family depth + from closet active task(s). + '''), + ) child_tasks = graphene.List( TaskProxy, description="""Descendant task proxies.""", diff --git a/cylc/flow/network/server.py b/cylc/flow/network/server.py index f6847dcef59..5c070472025 100644 --- a/cylc/flow/network/server.py +++ b/cylc/flow/network/server.py @@ -21,7 +21,6 @@ from time import sleep from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union -from graphql.execution import ExecutionResult from graphql.execution.executors.asyncio import AsyncioExecutor import zmq from zmq.auth.thread import ThreadAuthenticator @@ -41,6 +40,7 @@ if TYPE_CHECKING: from cylc.flow.scheduler import Scheduler + from graphql.execution import ExecutionResult # maps server methods to the protobuf message (for client/UIS import) @@ -368,7 +368,7 @@ def graphql( object: Execution result, or a list with errors. """ try: - executed: ExecutionResult = schema.execute( + executed: 'ExecutionResult' = schema.execute( request_string, variable_values=variables, context_value={ diff --git a/cylc/flow/option_parsers.py b/cylc/flow/option_parsers.py index cb21f5e8bcf..b83ff45aab9 100644 --- a/cylc/flow/option_parsers.py +++ b/cylc/flow/option_parsers.py @@ -33,7 +33,7 @@ import sys from textwrap import dedent -from typing import Any, Dict, Optional, List, Tuple, Union +from typing import Any, Dict, Optional, List, Tuple from cylc.flow import LOG from cylc.flow.terminal import supports_color, DIM @@ -42,8 +42,13 @@ CylcLogFormatter, setup_segregated_log_streams, ) +from cylc.flow.log_level import ( + env_to_verbosity, + verbosity_to_log_level +) WORKFLOW_ID_ARG_DOC = ('WORKFLOW', 'Workflow ID') +OPT_WORKFLOW_ID_ARG_DOC = ('[WORKFLOW]', 'Workflow ID') WORKFLOW_ID_MULTI_ARG_DOC = ('WORKFLOW ...', 'Workflow ID(s)') WORKFLOW_ID_OR_PATH_ARG_DOC = ('WORKFLOW | PATH', 'Workflow ID or path') ID_MULTI_ARG_DOC = ('ID ...', 'Workflow/Cycle/Family/Task ID(s)') @@ -177,99 +182,6 @@ def format_help_headings(string): ) -def verbosity_to_log_level(verb: int) -> int: - """Convert Cylc verbosity to log severity level.""" - if verb < 0: - return logging.WARNING - if verb > 0: - return logging.DEBUG - return logging.INFO - - -def log_level_to_verbosity(lvl: int) -> int: - """Convert log severity level to Cylc verbosity. - - Examples: - >>> log_level_to_verbosity(logging.NOTSET) - 2 - >>> log_level_to_verbosity(logging.DEBUG) - 1 - >>> log_level_to_verbosity(logging.INFO) - 0 - >>> log_level_to_verbosity(logging.WARNING) - -1 - >>> log_level_to_verbosity(logging.ERROR) - -1 - """ - if lvl < logging.DEBUG: - return 2 - if lvl < logging.INFO: - return 1 - if lvl == logging.INFO: - return 0 - return -1 - - -def verbosity_to_opts(verb: int) -> List[str]: - """Convert Cylc verbosity to the CLI opts required to replicate it. - - Examples: - >>> verbosity_to_opts(0) - [] - >>> verbosity_to_opts(-2) - ['-q', '-q'] - >>> verbosity_to_opts(2) - ['-v', '-v'] - - """ - return [ - '-q' - for _ in range(verb, 0) - ] + [ - '-v' - for _ in range(0, verb) - ] - - -def verbosity_to_env(verb: int) -> Dict[str, str]: - """Convert Cylc verbosity to the env vars required to replicate it. - - Examples: - >>> verbosity_to_env(0) - {'CYLC_VERBOSE': 'false', 'CYLC_DEBUG': 'false'} - >>> verbosity_to_env(1) - {'CYLC_VERBOSE': 'true', 'CYLC_DEBUG': 'false'} - >>> verbosity_to_env(2) - {'CYLC_VERBOSE': 'true', 'CYLC_DEBUG': 'true'} - - """ - return { - 'CYLC_VERBOSE': str((verb > 0)).lower(), - 'CYLC_DEBUG': str((verb > 1)).lower(), - } - - -def env_to_verbosity(env: Union[Dict, os._Environ]) -> int: - """Extract verbosity from environment variables. - - Examples: - >>> env_to_verbosity({}) - 0 - >>> env_to_verbosity({'CYLC_VERBOSE': 'true'}) - 1 - >>> env_to_verbosity({'CYLC_DEBUG': 'true'}) - 2 - >>> env_to_verbosity({'CYLC_DEBUG': 'TRUE'}) - 2 - - """ - return ( - 2 if env.get('CYLC_DEBUG', '').lower() == 'true' - else 1 if env.get('CYLC_VERBOSE', '').lower() == 'true' - else 0 - ) - - class CylcOption(Option): """Optparse option which adds a decrement action.""" diff --git a/cylc/flow/parsec/empysupport.py b/cylc/flow/parsec/empysupport.py index 7bc8a69e3c4..b4164894e0f 100644 --- a/cylc/flow/parsec/empysupport.py +++ b/cylc/flow/parsec/empysupport.py @@ -24,6 +24,7 @@ import typing as t from cylc.flow.parsec.exceptions import EmPyError +from cylc.flow.parsec.fileparse import get_cylc_env_vars def empyprocess( @@ -52,6 +53,12 @@ def empyprocess( ftempl = StringIO('\n'.join(flines)) xtempl = StringIO() interpreter = em.Interpreter(output=em.UncloseableFile(xtempl)) + + # Add `CYLC_` environment variables to the global namespace. + interpreter.updateGlobals( + get_cylc_env_vars() + ) + try: interpreter.file(ftempl, '